From 9e2d0ac50b22c5220f2df092ef3ab9e18e579df3 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 27 Oct 2016 03:30:52 -0700 Subject: [PATCH] Move to URL based syntax formatting. (#3092) For command line arguments we are currently following - :/path ... :/path This patch changes this to - http:///path ... http:///path --- cmd/benchmark-utils_test.go | 2 +- cmd/checkport.go | 11 +- cmd/checkport_test.go | 9 +- cmd/control-mains_test.go | 2 +- cmd/control-router.go | 14 +- cmd/control-router_test.go | 40 ++++-- cmd/erasure-readfile_test.go | 2 +- cmd/event-notifier_test.go | 19 ++- cmd/format-config-v1_test.go | 38 +++--- cmd/fs-v1-metadata_test.go | 2 +- cmd/fs-v1_test.go | 8 +- cmd/globals.go | 2 +- cmd/lock-rpc-server.go | 11 +- cmd/lock-rpc-server_test.go | 56 +++++--- cmd/namespace-lock.go | 10 +- cmd/object-api-listobjects_test.go | 2 +- cmd/object-common.go | 53 ++++++-- cmd/object-common_test.go | 61 ++++++++- cmd/posix-utils_windows_test.go | 56 ++------ cmd/posix.go | 28 ++-- cmd/posix_test.go | 8 +- cmd/prepare-storage.go | 36 +++-- cmd/s3-peer-client.go | 12 +- cmd/s3-peer-client_test.go | 13 +- cmd/server-main.go | 205 ++++++++++------------------- cmd/server-main_test.go | 19 ++- cmd/server-mux_test.go | 5 +- cmd/server_utils_test.go | 4 +- cmd/storage-rpc-client.go | 30 +++-- cmd/storage-rpc-client_test.go | 22 +++- cmd/storage-rpc-server.go | 15 ++- cmd/test-utils_test.go | 62 +++++---- cmd/tree-walk_test.go | 41 +++--- cmd/utils.go | 24 +++- cmd/utils_test.go | 87 ++++++++---- cmd/xl-v1_test.go | 21 +-- 36 files changed, 558 insertions(+), 472 deletions(-) diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index 574820e0c..7da3600c0 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -35,7 +35,7 @@ func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) if err != nil { return nil, nil, err } - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { return nil, nil, err } diff --git a/cmd/checkport.go b/cmd/checkport.go index c1fb9531c..489adfba3 100644 --- a/cmd/checkport.go +++ b/cmd/checkport.go @@ -17,7 +17,6 @@ package cmd import ( - "fmt" "net" "os" "syscall" @@ -32,10 +31,16 @@ import ( // This causes confusion on Mac OSX that minio server is not reachable // on 127.0.0.1 even though minio server is running. So before we start // the minio server we make sure that the port is free on each tcp network. -func checkPortAvailability(port int) error { +// +// Port is string on purpose here. +// https://github.com/golang/go/issues/16142#issuecomment-245912773 +// +// "Keep in mind that ports in Go are strings: https://play.golang.org/p/zk2WEri_E9" +// - @bradfitz +func checkPortAvailability(portStr string) error { network := [3]string{"tcp", "tcp4", "tcp6"} for _, n := range network { - l, err := net.Listen(n, fmt.Sprintf(":%d", port)) + l, err := net.Listen(n, net.JoinHostPort("", portStr)) if err != nil { if isAddrInUse(err) { // Return error if another process is listening on the diff --git a/cmd/checkport_test.go b/cmd/checkport_test.go index edd0d8925..9b3c4c087 100644 --- a/cmd/checkport_test.go +++ b/cmd/checkport_test.go @@ -17,7 +17,6 @@ package cmd import ( - "fmt" "net" "runtime" "testing" @@ -26,7 +25,7 @@ import ( // Tests for port availability logic written for server startup sequence. func TestCheckPortAvailability(t *testing.T) { tests := []struct { - port int + port string }{ {getFreePort()}, {getFreePort()}, @@ -35,11 +34,11 @@ func TestCheckPortAvailability(t *testing.T) { // This test should pass if the ports are available err := checkPortAvailability(test.port) if err != nil { - t.Fatalf("checkPortAvailability test failed for port: %d. Error: %v", test.port, err) + t.Fatalf("checkPortAvailability test failed for port: %s. Error: %v", test.port, err) } // Now use the ports and check again - ln, err := net.Listen("tcp", fmt.Sprintf(":%d", test.port)) + ln, err := net.Listen("tcp", net.JoinHostPort("", test.port)) if err != nil { t.Fail() } @@ -49,7 +48,7 @@ func TestCheckPortAvailability(t *testing.T) { // Skip if the os is windows due to https://github.com/golang/go/issues/7598 if err == nil && runtime.GOOS != "windows" { - t.Fatalf("checkPortAvailability should fail for port: %d. Error: %v", test.port, err) + t.Fatalf("checkPortAvailability should fail for port: %s. Error: %v", test.port, err) } } } diff --git a/cmd/control-mains_test.go b/cmd/control-mains_test.go index 346fd4a8f..87b191049 100644 --- a/cmd/control-mains_test.go +++ b/cmd/control-mains_test.go @@ -73,7 +73,7 @@ func TestControlHealMain(t *testing.T) { } // Remove the object - to simulate the case where the disk was down when the object was created. - err = os.RemoveAll(path.Join(testServer.Disks[0].path, bucket, object)) + err = os.RemoveAll(path.Join(testServer.Disks[0].Path, bucket, object)) if err != nil { t.Fatal(err) } diff --git a/cmd/control-router.go b/cmd/control-router.go index 76b057799..e7730b323 100644 --- a/cmd/control-router.go +++ b/cmd/control-router.go @@ -17,7 +17,6 @@ package cmd import ( - "fmt" "net/rpc" "path" @@ -36,24 +35,21 @@ func initRemoteControlClients(srvCmdConfig serverCmdConfig) []*AuthRPCClient { } // Initialize auth rpc clients. var remoteControlClnts []*AuthRPCClient - localMap := make(map[storageEndPoint]int) - for _, ep := range srvCmdConfig.endPoints { - // Set path to "" so that it is not used for filtering the - // unique entries. - ep.path = "" + localMap := make(map[string]int) + for _, ep := range srvCmdConfig.endpoints { // Validates if remote disk is local. if isLocalStorage(ep) { continue } - if localMap[ep] == 1 { + if localMap[ep.Host] == 1 { continue } - localMap[ep]++ + localMap[ep.Host]++ remoteControlClnts = append(remoteControlClnts, newAuthClient(&authConfig{ accessKey: serverConfig.GetCredential().AccessKeyID, secretKey: serverConfig.GetCredential().SecretAccessKey, secureConn: isSSL(), - address: fmt.Sprintf("%s:%d", ep.host, ep.port), + address: ep.Host, path: path.Join(reservedBucket, controlPath), loginMethod: "Control.LoginHandler", })) diff --git a/cmd/control-router_test.go b/cmd/control-router_test.go index c7282d69d..2d46b8f77 100644 --- a/cmd/control-router_test.go +++ b/cmd/control-router_test.go @@ -16,7 +16,10 @@ package cmd -import "testing" +import ( + "net/url" + "testing" +) // Tests initialization of remote controller clients. func TestInitRemoteControlClients(t *testing.T) { @@ -41,11 +44,19 @@ func TestInitRemoteControlClients(t *testing.T) { { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"10.1.10.1", 9000, "/mnt/disk1"}, - {"10.1.10.1", 9000, "/mnt/disk2"}, - {"10.1.10.2", 9000, "/mnt/disk1"}, - {"10.1.10.2", 9000, "/mnt/disk2"}, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "10.1.10.1:9000", + Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "10.1.10.1:9000", Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "10.1.10.2:9000", Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "10.1.10.2:9000", Path: "/mnt/disk2"}, }, }, totalClients: 2, @@ -54,11 +65,18 @@ func TestInitRemoteControlClients(t *testing.T) { { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"10.1.10.1", 9000, "/mnt/disk1"}, - {"10.1.10.2", 9000, "/mnt/disk2"}, - {"10.1.10.3", 9000, "/mnt/disk3"}, - {"10.1.10.4", 9000, "/mnt/disk4"}, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "10.1.10.1:9000", Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "10.1.10.2:9000", Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "10.1.10.3:9000", Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "10.1.10.4:9000", Path: "/mnt/disk2"}, }, }, totalClients: 4, diff --git a/cmd/erasure-readfile_test.go b/cmd/erasure-readfile_test.go index 6bfd3d9d2..8a2e4e3cd 100644 --- a/cmd/erasure-readfile_test.go +++ b/cmd/erasure-readfile_test.go @@ -222,7 +222,7 @@ func TestErasureReadUtils(t *testing.T) { if err != nil { t.Fatal(err) } - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) } diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index e15561cf2..420e77164 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -20,7 +20,6 @@ import ( "fmt" "net" "reflect" - "strconv" "testing" "time" ) @@ -40,7 +39,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { t.Fatal("Unable to create directories for FS backend. ", err) } defer removeAll(disks[0]) - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) } @@ -94,7 +93,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) { if err != nil { t.Fatal("Unable to create directories for FS backend. ", err) } - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) } @@ -125,7 +124,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) { if err != nil { t.Fatal("Unable to create directories for FS backend. ", err) } - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) } @@ -156,7 +155,7 @@ func TestInitEventNotifierWithRedis(t *testing.T) { if err != nil { t.Fatal("Unable to create directories for FS backend. ", err) } - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) } @@ -180,14 +179,12 @@ func (s *TestPeerRPCServerData) Setup(t *testing.T) { s.testServer = StartTestPeersRPCServer(t, s.serverType) // setup port and minio addr - _, portStr, err := net.SplitHostPort(s.testServer.Server.Listener.Addr().String()) - if err != nil { - t.Fatalf("Initialisation error: %v", err) - } - globalMinioPort, err = strconv.Atoi(portStr) + host, port, err := net.SplitHostPort(s.testServer.Server.Listener.Addr().String()) if err != nil { t.Fatalf("Initialisation error: %v", err) } + globalMinioHost = host + globalMinioPort = port globalMinioAddr = getLocalAddress( s.testServer.SrvCmdCfg, ) @@ -200,7 +197,7 @@ func (s *TestPeerRPCServerData) TearDown() { s.testServer.Stop() _ = removeAll(s.testServer.Root) for _, d := range s.testServer.Disks { - _ = removeAll(d.path) + _ = removeAll(d.Path) } } diff --git a/cmd/format-config-v1_test.go b/cmd/format-config-v1_test.go index dd0b4183c..c5689354c 100644 --- a/cmd/format-config-v1_test.go +++ b/cmd/format-config-v1_test.go @@ -275,7 +275,7 @@ func TestFormatXLHealFreshDisks(t *testing.T) { if err != nil { t.Fatal(err) } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -311,7 +311,7 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) { if err != nil { t.Fatal(err) } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -600,7 +600,7 @@ func TestInitFormatXLErrors(t *testing.T) { t.Fatal(err) } defer removeRoots(fsDirs) - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -706,7 +706,7 @@ func TestLoadFormatXLErrs(t *testing.T) { } defer removeRoots(fsDirs) - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -737,7 +737,7 @@ func TestLoadFormatXLErrs(t *testing.T) { } defer removeRoots(fsDirs) - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -765,7 +765,7 @@ func TestLoadFormatXLErrs(t *testing.T) { } defer removeRoots(fsDirs) - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -791,7 +791,7 @@ func TestLoadFormatXLErrs(t *testing.T) { } defer removeRoots(fsDirs) - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -818,7 +818,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -840,7 +840,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -864,7 +864,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -890,7 +890,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -912,7 +912,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -938,7 +938,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -968,7 +968,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -989,7 +989,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -1013,7 +1013,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -1039,7 +1039,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -1061,7 +1061,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } @@ -1087,7 +1087,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) { t.Fatal(err) } - endpoints, err = parseStorageEndPoints(fsDirs, 0) + endpoints, err = parseStorageEndpoints(fsDirs) if err != nil { t.Fatal(err) } diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go index bc9cf726c..df404746a 100644 --- a/cmd/fs-v1-metadata_test.go +++ b/cmd/fs-v1-metadata_test.go @@ -68,7 +68,7 @@ func TestHasExtendedHeader(t *testing.T) { } func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { - endpoints, err := parseStorageEndPoints([]string{disk}, 0) + endpoints, err := parseStorageEndpoints([]string{disk}) if err != nil { t.Fatal(err) } diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index a03e7e45d..490ec4d2f 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -40,7 +40,7 @@ func TestNewFS(t *testing.T) { disks = append(disks, xlDisk) } - endpoints, err := parseStorageEndPoints([]string{disk}, 0) + endpoints, err := parseStorageEndpoints([]string{disk}) if err != nil { t.Fatal("Uexpected error: ", err) } @@ -50,7 +50,7 @@ func TestNewFS(t *testing.T) { t.Fatal("Uexpected error: ", err) } - endpoints, err = parseStorageEndPoints(disks, 0) + endpoints, err = parseStorageEndpoints(disks) if err != nil { t.Fatal("Uexpected error: ", err) } @@ -61,7 +61,7 @@ func TestNewFS(t *testing.T) { } // Initializes all disks with XL - err = waitForFormatDisks(true, "", xlStorageDisks) + err = waitForFormatDisks(true, endpoints[0], xlStorageDisks) if err != nil { t.Fatalf("Unable to format XL %s", err) } @@ -79,7 +79,7 @@ func TestNewFS(t *testing.T) { } for _, testCase := range testCases { - if err = waitForFormatDisks(true, "", []StorageAPI{testCase.disk}); err != testCase.expectedErr { + if err = waitForFormatDisks(true, endpoints[0], []StorageAPI{testCase.disk}); err != testCase.expectedErr { t.Errorf("expected: %s, got :%s", testCase.expectedErr, err) } } diff --git a/cmd/globals.go b/cmd/globals.go index cc5ecf026..75a7c430f 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -56,7 +56,7 @@ var ( // Minio local server address (in `host:port` format) globalMinioAddr = "" // Minio default port, can be changed through command line. - globalMinioPort = 9000 + globalMinioPort = "9000" // Holds the host that was passed using --address globalMinioHost = "" // Peer communication struct diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index a1aacdedf..df25d0f6d 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -81,10 +81,10 @@ func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig) } // Create one lock server for every local storage rpc server. -func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { - for _, ep := range serverConfig.endPoints { - if ep.presentIn(serverConfig.ignoredEndPoints) { - // Skip initializing ignored end point. +func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) { + for _, ep := range srvConfig.endpoints { + if containsEndpoint(srvConfig.ignoredEndpoints, ep) { + // Skip initializing ignored endpoint. continue } @@ -92,9 +92,10 @@ func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { if !isLocalStorage(ep) { continue } + // Create handler for lock RPCs locker := &lockServer{ - rpcPath: ep.path, + rpcPath: getPath(ep), mutex: sync.Mutex{}, lockMap: make(map[string][]lockRequesterInfo), } diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index 0aab1acbd..2109fac6f 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -17,6 +17,7 @@ package cmd import ( + "net/url" "runtime" "sync" "testing" @@ -444,6 +445,7 @@ func TestLockServers(t *testing.T) { if runtime.GOOS == "windows" { return } + globalMinioHost = "" testCases := []struct { srvCmdConfig serverCmdConfig totalLockServers int @@ -452,12 +454,23 @@ func TestLockServers(t *testing.T) { { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"localhost", 9000, "/mnt/disk1"}, - {"1.1.1.2", 9000, "/mnt/disk2"}, - {"1.1.2.1", 9000, "/mnt/disk3"}, - {"1.1.2.2", 9000, "/mnt/disk4"}, - }, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "localhost:9000", + Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "1.1.1.2:9000", + Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "1.1.2.1:9000", + Path: "/mnt/disk3", + }, { + Scheme: "http", + Host: "1.1.2.2:9000", + Path: "/mnt/disk4", + }}, }, totalLockServers: 1, }, @@ -465,15 +478,28 @@ func TestLockServers(t *testing.T) { { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"localhost", 9000, "/mnt/disk1"}, - {"localhost", 9000, "/mnt/disk2"}, - {"1.1.2.1", 9000, "/mnt/disk3"}, - {"1.1.2.2", 9000, "/mnt/disk4"}, - }, - ignoredEndPoints: []storageEndPoint{ - {"localhost", 9000, "/mnt/disk2"}, - }, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "localhost:9000", + Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "localhost:9000", + Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "1.1.2.1:9000", + Path: "/mnt/disk3", + }, { + Scheme: "http", + Host: "1.1.2.2:9000", + Path: "/mnt/disk4", + }}, + ignoredEndpoints: []*url.URL{{ + Scheme: "http", + Host: "localhost:9000", + Path: "/mnt/disk2", + }}, }, totalLockServers: 1, }, diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 50f7e5e1d..386ac5104 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -18,9 +18,9 @@ package cmd import ( "errors" + "net/url" pathutil "path" "runtime" - "strconv" "sync" "github.com/minio/dsync" @@ -31,13 +31,13 @@ var nsMutex *nsLockMap // Initialize distributed locking only in case of distributed setup. // Returns if the setup is distributed or not on success. -func initDsyncNodes(eps []storageEndPoint) error { +func initDsyncNodes(eps []*url.URL) error { cred := serverConfig.GetCredential() // Initialize rpc lock client information only if this instance is a distributed setup. var clnts []dsync.RPC myNode := -1 for _, ep := range eps { - if ep.host == "" || ep.port == 0 || ep.path == "" { + if ep == nil { return errInvalidArgument } clnts = append(clnts, newAuthClient(&authConfig{ @@ -45,9 +45,9 @@ func initDsyncNodes(eps []storageEndPoint) error { secretKey: cred.SecretAccessKey, // Construct a new dsync server addr. secureConn: isSSL(), - address: ep.host + ":" + strconv.Itoa(ep.port), + address: ep.Host, // Construct a new rpc path for the endpoint. - path: pathutil.Join(lockRPCPath, ep.path), + path: pathutil.Join(lockRPCPath, getPath(ep)), loginMethod: "Dsync.LoginHandler", })) if isLocalStorage(ep) && myNode == -1 { diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index e186b1b66..fc4fbfa4e 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -565,7 +565,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) { } func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) { - endPoints, err := parseStorageEndPoints([]string{disk}, 0) + endPoints, err := parseStorageEndpoints([]string{disk}) if err != nil { t.Fatal("Unexpected err: ", err) } diff --git a/cmd/object-common.go b/cmd/object-common.go index f32900b5b..99ecf7633 100644 --- a/cmd/object-common.go +++ b/cmd/object-common.go @@ -18,6 +18,8 @@ package cmd import ( "net" + "net/url" + "runtime" "strings" "sync" ) @@ -104,21 +106,27 @@ func houseKeeping(storageDisks []StorageAPI) error { } // Check if a network path is local to this node. -func isLocalStorage(ep storageEndPoint) bool { - if ep.host == "" { +func isLocalStorage(ep *url.URL) bool { + if ep.Host == "" { return true } - if globalMinioHost != "" { - // if --address host:port was specified for distXL we short circuit only the endPoint - // that matches host:port - if globalMinioHost == ep.host && globalMinioPort == ep.port { + if globalMinioHost != "" && globalMinioPort != "" { + // if --address host:port was specified for distXL we short + // circuit only the endPoint that matches host:port + if net.JoinHostPort(globalMinioHost, globalMinioPort) == ep.Host { return true } return false } + // Split host to extract host information. + host, _, err := net.SplitHostPort(ep.Host) + if err != nil { + errorIf(err, "Cannot split host port") + return false + } // Resolve host to address to check if the IP is loopback. // If address resolution fails, assume it's a non-local host. - addrs, err := net.LookupHost(ep.host) + addrs, err := net.LookupHost(host) if err != nil { errorIf(err, "Failed to lookup host") return false @@ -149,12 +157,37 @@ func isLocalStorage(ep storageEndPoint) bool { return false } +// Fetch the path component from *url.URL*. +func getPath(ep *url.URL) string { + if ep == nil { + return "" + } + var diskPath string + // For windows ep.Path is usually empty + if runtime.GOOS == "windows" { + // For full URLs windows drive is part of URL path. + // Eg: http://ip:port/C:\mydrive + if ep.Scheme == "http" || ep.Scheme == "https" { + // For windows trim off the preceding "/". + diskPath = ep.Path[1:] + } else { + // For the rest url splits drive letter into + // Scheme contruct the disk path back. + diskPath = ep.Scheme + ":" + ep.Opaque + } + } else { + // For other operating systems ep.Path is non empty. + diskPath = ep.Path + } + return diskPath +} + // Depending on the disk type network or local, initialize storage API. -func newStorageAPI(ep storageEndPoint) (storage StorageAPI, err error) { +func newStorageAPI(ep *url.URL) (storage StorageAPI, err error) { if isLocalStorage(ep) { - return newPosix(ep.path) + return newPosix(getPath(ep)) } - return newRPCClient(ep) + return newStorageRPC(ep) } // Initializes meta volume on all input storage disks. diff --git a/cmd/object-common_test.go b/cmd/object-common_test.go index f39379afe..f48b89f5a 100644 --- a/cmd/object-common_test.go +++ b/cmd/object-common_test.go @@ -17,6 +17,8 @@ package cmd import ( + "net/url" + "runtime" "sync" "testing" ) @@ -35,8 +37,9 @@ func TestHouseKeeping(t *testing.T) { defer removeRoots(noSpaceDirs) properStorage := []StorageAPI{} - for _, fs := range fsDirs { - sd, err := newPosix(fs) + for _, fsDir := range fsDirs { + var sd StorageAPI + sd, err = newPosix(fsDir) if err != nil { t.Fatalf("Failed to create a local disk-based storage layer %v", err) } @@ -44,8 +47,8 @@ func TestHouseKeeping(t *testing.T) { } noSpaceBackend := []StorageAPI{} - for _, noSpaceFS := range noSpaceDirs { - sd, err := newPosix(noSpaceFS) + for _, noSpaceDir := range noSpaceDirs { + sd, err := newPosix(noSpaceDir) if err != nil { t.Fatalf("Failed to create a local disk-based storage layer %v", err) } @@ -68,7 +71,6 @@ func TestHouseKeeping(t *testing.T) { if errs[index] != nil { return } - errs[index] = store.AppendFile(pathJoin(minioMetaBucket, tmpMetaPrefix), "hello.txt", []byte("hello")) }(i, store) } @@ -97,3 +99,52 @@ func TestHouseKeeping(t *testing.T) { } } } + +// Test constructing the final path. +func TestGetPath(t *testing.T) { + var testCases []struct { + ep *url.URL + path string + } + if runtime.GOOS != "windows" { + testCases = []struct { + ep *url.URL + path string + }{ + { + ep: nil, + path: "", + }, + { + ep: &url.URL{Path: "/test1"}, + path: "/test1", + }, + } + } else { + testCases = []struct { + ep *url.URL + path string + }{ + { + ep: nil, + path: "", + }, + { + ep: &url.URL{Opaque: "\\test1", Scheme: "C"}, + path: "C:\\test1", + }, + { + ep: &url.URL{Scheme: "http", Path: "/C:\\test1"}, + path: "C:\\test1", + }, + } + } + + // Validate all the test cases. + for i, testCase := range testCases { + path := getPath(testCase.ep) + if path != testCase.path { + t.Fatalf("Test: %d Expected path %s, got %s", i+1, testCase.path, path) + } + } +} diff --git a/cmd/posix-utils_windows_test.go b/cmd/posix-utils_windows_test.go index 9cc7a21d6..7e608a612 100644 --- a/cmd/posix-utils_windows_test.go +++ b/cmd/posix-utils_windows_test.go @@ -48,7 +48,7 @@ func TestUNCPaths(t *testing.T) { defer os.RemoveAll("c:\\testdisk") var fs StorageAPI - fs, err = newPosix("c:\\testdisk") + fs, err = newPosix(`c:\testdisk`) if err != nil { t.Fatal(err) } @@ -66,7 +66,6 @@ func TestUNCPaths(t *testing.T) { } else if err == nil && !test.pass { t.Error(err) } - fs.DeleteFile("voldir", test.objName) } } @@ -81,8 +80,9 @@ func TestUNCPathENOTDIR(t *testing.T) { } // Cleanup on exit of test defer os.RemoveAll("c:\\testdisk") + var fs StorageAPI - fs, err = newPosix("c:\\testdisk") + fs, err = newPosix(`c:\testdisk`) if err != nil { t.Fatal(err) } @@ -106,58 +106,30 @@ func TestUNCPathENOTDIR(t *testing.T) { } } -// Test to validate that path name in UNC form works -func TestUNCPathDiskName(t *testing.T) { - var err error - // Instantiate posix object to manage a disk - longPathDisk := `\\?\c:\testdisk` - err = mkdirAll(longPathDisk, 0777) - if err != nil { - t.Fatal(err) - } - // Cleanup on exit of test - defer removeAll(longPathDisk) - var fs StorageAPI - fs, err = newPosix(longPathDisk) - if err != nil { - t.Fatal(err) - } - - // Create volume to use in conjunction with other StorageAPI's file API(s) - err = fs.MakeVol("voldir") - if err != nil { - t.Fatal(err) - } -} - // Test to validate 32k path works on windows platform func Test32kUNCPath(t *testing.T) { var err error - // Instantiate posix object to manage a disk - longDiskName := `\\?\c:` + // The following calculation was derived empirically. It is not exactly MAX_PATH - len(longDiskName) + // possibly due to expansion rules as mentioned here - + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath + var longPathName string for { compt := strings.Repeat("a", 255) - if len(compt)+len(longDiskName)+1 > 32767 { + if len(compt)+len(longPathName)+1 > 32767 { break } - longDiskName = longDiskName + `\` + compt + longPathName = longPathName + `\` + compt } - - if len(longDiskName) < 32767 { - // The following calculation was derived empirically. It is not exactly MAX_PATH - len(longDiskName) - // possibly due to expansion rules as mentioned here - - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath - remaining := 32767 - 25 - len(longDiskName) - 10 - longDiskName = longDiskName + `\` + strings.Repeat("a", remaining) - } - err = mkdirAll(longDiskName, 0777) + longPathName = "C:" + longPathName + err = mkdirAll(longPathName, 0777) if err != nil { t.Fatal(err) } // Cleanup on exit of test - defer removeAll(longDiskName) - _, err = newPosix(longDiskName) + defer removeAll(longPathName) + + _, err = newPosix(longPathName) if err != nil { t.Fatal(err) } diff --git a/cmd/posix.go b/cmd/posix.go index e26375bdc..9e8424a35 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -41,12 +41,11 @@ const ( // posix - implements StorageAPI interface. type posix struct { - ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG - diskPath string - suppliedDiskPath string - minFreeSpace int64 - minFreeInodes int64 - pool sync.Pool + ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG + diskPath string + minFreeSpace int64 + minFreeInodes int64 + pool sync.Pool } var errFaultyDisk = errors.New("Faulty disk") @@ -100,22 +99,19 @@ func isDirEmpty(dirname string) bool { } // Initialize a new storage disk. -func newPosix(diskPath string) (StorageAPI, error) { - if diskPath == "" { +func newPosix(path string) (StorageAPI, error) { + if path == "" { return nil, errInvalidArgument } - suppliedDiskPath := diskPath - var err error // Disallow relative paths, figure out absolute paths. - diskPath, err = filepath.Abs(diskPath) + diskPath, err := filepath.Abs(path) if err != nil { return nil, err } fs := &posix{ - suppliedDiskPath: suppliedDiskPath, - diskPath: diskPath, - minFreeSpace: fsMinFreeSpace, - minFreeInodes: fsMinFreeInodesPercent, + diskPath: diskPath, + minFreeSpace: fsMinFreeSpace, + minFreeInodes: fsMinFreeInodesPercent, // 1MiB buffer pool for posix internal operations. pool: sync.Pool{ New: func() interface{} { @@ -182,7 +178,7 @@ func (s *posix) checkDiskFree() (err error) { // Implements stringer compatible interface. func (s *posix) String() string { - return s.suppliedDiskPath + return s.diskPath } // DiskInfo provides current information about disk space usage, diff --git a/cmd/posix_test.go b/cmd/posix_test.go index 200ee8674..470473e86 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -35,6 +35,7 @@ func newPosixTestSetup() (StorageAPI, string, error) { if err != nil { return nil, "", err } + // Initialize a new posix layer. posixStorage, err := newPosix(diskPath) if err != nil { return nil, "", err @@ -179,8 +180,8 @@ func TestNewPosix(t *testing.T) { // List of all tests for posix initialization. testCases := []struct { - diskPath string - err error + name string + err error }{ // Validates input argument cannot be empty. { @@ -203,7 +204,8 @@ func TestNewPosix(t *testing.T) { // Validate all test cases. for i, testCase := range testCases { - _, err := newPosix(testCase.diskPath) + // Initialize a new posix layer. + _, err := newPosix(testCase.name) if err != testCase.err { t.Fatalf("Test %d failed wanted: %s, got: %s", i+1, err, testCase.err) } diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index 293d229f7..fb742ec9f 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -17,6 +17,7 @@ package cmd import ( + "net/url" "time" "github.com/minio/mc/pkg/console" @@ -184,7 +185,10 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions { // Implements a jitter backoff loop for formatting all disks during // initialization of the server. -func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) error { +func retryFormattingDisks(firstDisk bool, firstEndpoint *url.URL, storageDisks []StorageAPI) error { + if firstEndpoint == nil { + return errInvalidArgument + } if storageDisks == nil { return errInvalidArgument } @@ -227,7 +231,7 @@ func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []S // Validate formats load before proceeding forward. err := genericFormatCheck(formatConfigs, sErrs) if err == nil { - printHealMsg(firstEndpoint, storageDisks, printOnceFn()) + printHealMsg(firstEndpoint.String(), storageDisks, printOnceFn()) } return err case WaitForQuorum: @@ -256,23 +260,28 @@ func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []S } // Initialize storage disks based on input arguments. -func initStorageDisks(endPoints, ignoredEndPoints []storageEndPoint) ([]StorageAPI, error) { +func initStorageDisks(endpoints, ignoredEndpoints []*url.URL) ([]StorageAPI, error) { // Single disk means we will use FS backend. - if len(endPoints) == 1 { - storage, err := newStorageAPI(endPoints[0]) + if len(endpoints) == 1 { + if endpoints[0] == nil { + return nil, errInvalidArgument + } + storage, err := newStorageAPI(endpoints[0]) if err != nil && err != errDiskNotFound { return nil, err } return []StorageAPI{storage}, nil } - // Otherwise proceed with XL setup. - // Bootstrap disks. - storageDisks := make([]StorageAPI, len(endPoints)) - for index, ep := range endPoints { + // Otherwise proceed with XL setup. Bootstrap disks. + storageDisks := make([]StorageAPI, len(endpoints)) + for index, ep := range endpoints { + if ep == nil { + return nil, errInvalidArgument + } // Check if disk is ignored. ignored := false - for _, iep := range ignoredEndPoints { - if ep == iep { + for _, iep := range ignoredEndpoints { + if *ep == *iep { ignored = true break } @@ -294,7 +303,10 @@ func initStorageDisks(endPoints, ignoredEndPoints []storageEndPoint) ([]StorageA } // Format disks before initialization object layer. -func waitForFormatDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) (err error) { +func waitForFormatDisks(firstDisk bool, firstEndpoint *url.URL, storageDisks []StorageAPI) (err error) { + if firstEndpoint == nil { + return errInvalidArgument + } if storageDisks == nil { return errInvalidArgument } diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 002bab97f..719bbe239 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/rpc" + "net/url" "path" "sync" "time" @@ -39,7 +40,7 @@ type s3Peers struct { peers []string } -func initGlobalS3Peers(eps []storageEndPoint) { +func initGlobalS3Peers(eps []*url.URL) { // Get list of de-duplicated peers. peers := getAllPeers(eps) @@ -111,16 +112,17 @@ func (s3p *s3Peers) Close() error { } // Returns the network addresses of all Minio servers in the cluster in `host:port` format. -func getAllPeers(eps []storageEndPoint) (peers []string) { +func getAllPeers(eps []*url.URL) (peers []string) { if eps == nil { return nil } peers = []string{globalMinioAddr} // Starts with a default peer. for _, ep := range eps { - // Rest of the peers configured. - if ep.host != "" { - peers = append(peers, fmt.Sprintf("%s:%d", ep.host, ep.port)) + if ep == nil { + return nil } + // Rest of the peers configured. + peers = append(peers, ep.Host) } return peers } diff --git a/cmd/s3-peer-client_test.go b/cmd/s3-peer-client_test.go index 07c14f03d..31f471c5e 100644 --- a/cmd/s3-peer-client_test.go +++ b/cmd/s3-peer-client_test.go @@ -17,6 +17,7 @@ package cmd import ( + "net/url" "reflect" "testing" ) @@ -24,16 +25,14 @@ import ( // Validates getAllPeers, fetches all peers based on list of storage endpoints. func TestGetAllPeers(t *testing.T) { testCases := []struct { - eps []storageEndPoint + eps []*url.URL peers []string }{ {nil, nil}, - {[]storageEndPoint{{path: "/mnt/disk1"}}, []string{globalMinioAddr}}, - {[]storageEndPoint{{ - host: "localhost", - port: 9001, - }}, []string{ - globalMinioAddr, "localhost:9001", + {[]*url.URL{nil}, nil}, + {[]*url.URL{{Path: "/mnt/disk1"}}, []string{globalMinioAddr, ""}}, + {[]*url.URL{{Host: "localhost:9001"}}, []string{globalMinioAddr, + "localhost:9001", }}, } diff --git a/cmd/server-main.go b/cmd/server-main.go index 75a7046b6..7926c1cbb 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -17,18 +17,15 @@ package cmd import ( - "errors" "fmt" "net" "net/http" + "net/url" "os" - "runtime" "strconv" "strings" "time" - "regexp" - "github.com/minio/cli" ) @@ -90,126 +87,44 @@ EXAMPLES: /mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \ /mnt/export8/ /mnt/export9/ /mnt/export10/ /mnt/export11/ /mnt/export12/ - 6. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes. + 6. Start minio server for a 4 node distributed setup. Type the following command on all the 4 nodes exactly. $ export MINIO_ACCESS_KEY=minio $ export MINIO_SECRET_KEY=miniostorage - $ minio {{.Name}} 192.168.1.11:/mnt/export/ 192.168.1.12:/mnt/export/ \ - 192.168.1.13:/mnt/export/ 192.168.1.14:/mnt/export/ + $ minio {{.Name}} http://192.168.1.11/mnt/export/ http://192.168.1.12/mnt/export/ \ + http://192.168.1.13/mnt/export/ http://192.168.1.14/mnt/export/ + + 7. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes exactly. + $ minio {{.Name}} http://minio:miniostorage@192.168.1.11/mnt/export/ \ + http://minio:miniostorage@192.168.1.12/mnt/export/ \ + http://minio:miniostorage@192.168.1.13/mnt/export/ \ + http://minio:miniostorage@192.168.1.14/mnt/export/ `, } type serverCmdConfig struct { serverAddr string - endPoints []storageEndPoint - ignoredEndPoints []storageEndPoint + endpoints []*url.URL + ignoredEndpoints []*url.URL isDistXL bool // True only if its distributed XL. storageDisks []StorageAPI } -// End point is specified in the command line as host:port:path or host:path or path -// host:port:path or host:path - for distributed XL. Default port is 9000. -// just path - for single node XL or FS. -type storageEndPoint struct { - host string // Will be empty for single node XL and FS - port int // Will be valid for distributed XL - path string // Will be valid for all configs -} - -// Returns string form. -func (ep storageEndPoint) String() string { - var str []string - if ep.host != "" { - str = append(str, ep.host) - } - if ep.port != 0 { - str = append(str, strconv.Itoa(ep.port)) - } - if ep.path != "" { - str = append(str, ep.path) - } - return strings.Join(str, ":") -} - -// Returns if ep is present in the eps list. -func (ep storageEndPoint) presentIn(eps []storageEndPoint) bool { - for _, entry := range eps { - if entry == ep { - return true - } - } - return false -} - -// Parse end-point (of the form host:port:path or host:path or path) -func parseStorageEndPoint(ep string, defaultPort int) (storageEndPoint, error) { - if runtime.GOOS == "windows" { - // Try to match path, ex. C:\export or export - matched, err := regexp.MatchString(`^([a-zA-Z]:\\[^:]+|[^:]+)$`, ep) - if err != nil { - return storageEndPoint{}, err - } - if matched { - return storageEndPoint{path: ep}, nil - } - - // Try to match host:path ex. 127.0.0.1:C:\export - re, err := regexp.Compile(`^([^:]+):([a-zA-Z]:\\[^:]+)$`) - if err != nil { - return storageEndPoint{}, err - } - result := re.FindStringSubmatch(ep) - if len(result) != 0 { - return storageEndPoint{host: result[1], port: defaultPort, path: result[2]}, nil - } - - // Try to match host:port:path ex. 127.0.0.1:443:C:\export - re, err = regexp.Compile(`^([^:]+):([0-9]+):([a-zA-Z]:\\[^:]+)$`) - if err != nil { - return storageEndPoint{}, err - } - result = re.FindStringSubmatch(ep) - if len(result) != 0 { - portInt, err := strconv.Atoi(result[2]) - if err != nil { - return storageEndPoint{}, err - } - return storageEndPoint{host: result[1], port: portInt, path: result[3]}, nil - } - return storageEndPoint{}, errors.New("Unable to parse endpoint " + ep) - } - // For *nix OSes - parts := strings.Split(ep, ":") - var parsedep storageEndPoint - switch len(parts) { - case 1: - parsedep = storageEndPoint{path: parts[0]} - case 2: - parsedep = storageEndPoint{host: parts[0], port: defaultPort, path: parts[1]} - case 3: - port, err := strconv.Atoi(parts[1]) - if err != nil { - return storageEndPoint{}, err - } - parsedep = storageEndPoint{host: parts[0], port: port, path: parts[2]} - default: - return storageEndPoint{}, errors.New("Unable to parse " + ep) - } - return parsedep, nil -} - -// Parse an array of end-points (passed on the command line) -func parseStorageEndPoints(eps []string, defaultPort int) (endpoints []storageEndPoint, err error) { +// Parse an array of end-points (from the command line) +func parseStorageEndpoints(eps []string) (endpoints []*url.URL, err error) { for _, ep := range eps { if ep == "" { - continue + return nil, errInvalidArgument } - var endpoint storageEndPoint - endpoint, err = parseStorageEndPoint(ep, defaultPort) + var u *url.URL + u, err = url.Parse(ep) if err != nil { return nil, err } - endpoints = append(endpoints, endpoint) + if u.Host != "" && globalMinioHost == "" { + u.Host = net.JoinHostPort(u.Host, globalMinioPort) + } + endpoints = append(endpoints, u) } return endpoints, nil } @@ -305,7 +220,7 @@ func initServerConfig(c *cli.Context) { } // Validate if input disks are sufficient for initializing XL. -func checkSufficientDisks(eps []storageEndPoint) error { +func checkSufficientDisks(eps []*url.URL) error { // Verify total number of disks. total := len(eps) if total > maxErasureBlocks { @@ -331,30 +246,31 @@ func checkSufficientDisks(eps []storageEndPoint) error { } // Validate input disks. -func validateDisks(endPoints []storageEndPoint, ignoredEndPoints []storageEndPoint) []StorageAPI { - isXL := len(endPoints) > 1 +func validateDisks(endpoints []*url.URL, ignoredEndpoints []*url.URL) []StorageAPI { + isXL := len(endpoints) > 1 if isXL { // Validate if input disks have duplicates in them. - err := checkDuplicateEndPoints(endPoints) + err := checkDuplicateEndpoints(endpoints) fatalIf(err, "Invalid disk arguments for server.") // Validate if input disks are sufficient for erasure coded setup. - err = checkSufficientDisks(endPoints) - fatalIf(err, "Invalid disk arguments for server.") + err = checkSufficientDisks(endpoints) + fatalIf(err, "Invalid disk arguments for server. %#v", endpoints) } - storageDisks, err := initStorageDisks(endPoints, ignoredEndPoints) + storageDisks, err := initStorageDisks(endpoints, ignoredEndpoints) fatalIf(err, "Unable to initialize storage disks.") return storageDisks } // Returns if slice of disks is a distributed setup. -func isDistributedSetup(eps []storageEndPoint) (isDist bool) { - // Port to connect to for the lock servers in a distributed setup. +func isDistributedSetup(eps []*url.URL) (isDist bool) { + // Validate if one the disks is not local. for _, ep := range eps { if !isLocalStorage(ep) { // One or more disks supplied as arguments are not // attached to the local node. isDist = true + break } } return isDist @@ -372,29 +288,28 @@ func serverMain(c *cli.Context) { // Check if requested port is available. host, portStr, err := net.SplitHostPort(serverAddr) fatalIf(err, "Unable to parse %s.", serverAddr) - - portInt, err := strconv.Atoi(portStr) - fatalIf(err, "Invalid port number.") - - fatalIf(checkPortAvailability(portInt), "Port unavailable %d", portInt) - - // Saves host and port in a globally accessible value. - globalMinioPort = portInt globalMinioHost = host + // Check if requested port is available. + fatalIf(checkPortAvailability(portStr), "Port unavailable %s", portStr) + globalMinioPort = portStr + // Disks to be ignored in server init, to skip format healing. - ignoredDisks, err := parseStorageEndPoints(strings.Split(c.String("ignore-disks"), ","), portInt) - fatalIf(err, "Unable to parse storage endpoints %s", strings.Split(c.String("ignore-disks"), ",")) + var ignoredEndpoints []*url.URL + if len(c.String("ignore-disks")) > 0 { + ignoredEndpoints, err = parseStorageEndpoints(strings.Split(c.String("ignore-disks"), ",")) + fatalIf(err, "Unable to parse storage endpoints %s", strings.Split(c.String("ignore-disks"), ",")) + } // Disks to be used in server init. - disks, err := parseStorageEndPoints(c.Args(), portInt) + endpoints, err := parseStorageEndpoints(c.Args()) fatalIf(err, "Unable to parse storage endpoints %s", c.Args()) - // Initialize server config. - initServerConfig(c) - // Check 'server' cli arguments. - storageDisks := validateDisks(disks, ignoredDisks) + storageDisks := validateDisks(endpoints, ignoredEndpoints) + + // Check if endpoints are part of distributed setup. + isDistXL := isDistributedSetup(endpoints) // Cleanup objects that weren't successfully written into the namespace. fatalIf(houseKeeping(storageDisks), "Unable to purge temporary files.") @@ -402,16 +317,28 @@ func serverMain(c *cli.Context) { // If https. tls := isSSL() + // Fail if SSL is not configured and ssl endpoints are provided for distributed setup. + if !tls && isDistXL { + for _, ep := range endpoints { + if ep.Host != "" && ep.Scheme == "https" { + fatalIf(errInvalidArgument, "Cannot use secure endpoints when SSL is not configured %s", ep) + } + } + } + + // Initialize server config. + initServerConfig(c) + // First disk argument check if it is local. - firstDisk := isLocalStorage(disks[0]) + firstDisk := isLocalStorage(endpoints[0]) // Configure server. srvConfig := serverCmdConfig{ serverAddr: serverAddr, - endPoints: disks, - ignoredEndPoints: ignoredDisks, + endpoints: endpoints, + ignoredEndpoints: ignoredEndpoints, storageDisks: storageDisks, - isDistXL: isDistributedSetup(disks), + isDistXL: isDistXL, } // Configure server. @@ -419,12 +346,12 @@ func serverMain(c *cli.Context) { fatalIf(err, "Unable to configure one of server's RPC services.") // Set nodes for dsync for distributed setup. - if srvConfig.isDistXL { - fatalIf(initDsyncNodes(disks), "Unable to initialize distributed locking") + if isDistXL { + fatalIf(initDsyncNodes(endpoints), "Unable to initialize distributed locking") } // Initialize name space lock. - initNSLock(srvConfig.isDistXL) + initNSLock(isDistXL) // Initialize a new HTTP server. apiServer := NewServerMux(serverAddr, handler) @@ -436,7 +363,7 @@ func serverMain(c *cli.Context) { globalMinioAddr = getLocalAddress(srvConfig) // Initialize S3 Peers inter-node communication - initGlobalS3Peers(disks) + initGlobalS3Peers(endpoints) // Start server, automatically configures TLS if certs are available. go func(tls bool) { @@ -451,7 +378,7 @@ func serverMain(c *cli.Context) { }(tls) // Wait for formatting of disks. - err = waitForFormatDisks(firstDisk, endPoints[0], storageDisks) + err = waitForFormatDisks(firstDisk, endpoints[0], storageDisks) fatalIf(err, "formatting storage disks failed") // Once formatted, initialize object layer. diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 89e64575b..336d51d90 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -167,7 +167,7 @@ func TestCheckSufficientDisks(t *testing.T) { // Validates different variations of input disks. for i, testCase := range testCases { - endpoints, err := parseStorageEndPoints(testCase.disks, 0) + endpoints, err := parseStorageEndpoints(testCase.disks) if err != nil { t.Fatalf("Unexpected error %s", err) } @@ -201,7 +201,7 @@ func TestCheckServerSyntax(t *testing.T) { t.Errorf("Test %d failed to parse arguments %s", i+1, disks) } defer removeRoots(disks) - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatalf("Unexpected error %s", err) } @@ -219,26 +219,25 @@ func TestIsDistributedSetup(t *testing.T) { disks []string result bool }{ - {[]string{`4.4.4.4:c:\mnt\disk1`, `4.4.4.4:c:\mnt\disk2`}, true}, - {[]string{`4.4.4.4:c:\mnt\disk1`, `localhost:c:\mnt\disk2`}, true}, - {[]string{`localhost:c:\mnt\disk1`, `localhost:c:\mnt\disk2`}, false}, + {[]string{`http://4.4.4.4:80/c:\mnt\disk1`, `http://4.4.4.4:80/c:\mnt\disk2`}, true}, + {[]string{`http://4.4.4.4:9000/c:\mnt\disk1`, `http://127.0.0.1:9000/c:\mnt\disk2`}, true}, + {[]string{`http://127.0.0.1:9000/c:\mnt\disk1`, `http://127.0.0.1:9001/c:\mnt\disk2`}, true}, {[]string{`c:\mnt\disk1`, `c:\mnt\disk2`}, false}, } - } else { testCases = []struct { disks []string result bool }{ - {[]string{"4.4.4.4:/mnt/disk1", "4.4.4.4:/mnt/disk2"}, true}, - {[]string{"4.4.4.4:/mnt/disk1", "localhost:/mnt/disk2"}, true}, - {[]string{"localhost:/mnt/disk1", "localhost:/mnt/disk2"}, false}, + {[]string{"http://4.4.4.4:9000/mnt/disk1", "http://4.4.4.4:9000/mnt/disk2"}, true}, + {[]string{"http://4.4.4.4:9000/mnt/disk1", "http://127.0.0.1:9000/mnt/disk2"}, true}, + {[]string{"http://127.0.0.1:9000/mnt/disk1", "http://127.0.0.1:9000/mnt/disk2"}, true}, {[]string{"/mnt/disk1", "/mnt/disk2"}, false}, } } for i, test := range testCases { - endpoints, err := parseStorageEndPoints(test.disks, 0) + endpoints, err := parseStorageEndpoints(test.disks) if err != nil { t.Fatalf("Unexpected error %s", err) } diff --git a/cmd/server-mux_test.go b/cmd/server-mux_test.go index d209ce587..a8f80216b 100644 --- a/cmd/server-mux_test.go +++ b/cmd/server-mux_test.go @@ -31,7 +31,6 @@ import ( "net/http" "net/http/httptest" "os" - "strconv" "sync" "testing" "time" @@ -153,7 +152,7 @@ func TestServerCloseBlocking(t *testing.T) { func TestListenAndServePlain(t *testing.T) { wait := make(chan struct{}) - addr := "127.0.0.1:" + strconv.Itoa(getFreePort()) + addr := net.JoinHostPort("127.0.0.1", getFreePort()) errc := make(chan error) once := &sync.Once{} @@ -203,7 +202,7 @@ func TestListenAndServePlain(t *testing.T) { func TestListenAndServeTLS(t *testing.T) { wait := make(chan struct{}) - addr := "127.0.0.1:" + strconv.Itoa(getFreePort()) + addr := net.JoinHostPort("127.0.0.1", getFreePort()) errc := make(chan error) once := &sync.Once{} diff --git a/cmd/server_utils_test.go b/cmd/server_utils_test.go index d0b42fef3..7036cfd4d 100644 --- a/cmd/server_utils_test.go +++ b/cmd/server_utils_test.go @@ -101,7 +101,7 @@ func calculateStreamContentLength(dataLen, chunkSize int64) int64 { } // Ask the kernel for a free open port. -func getFreePort() int { +func getFreePort() string { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { panic(err) @@ -112,7 +112,7 @@ func getFreePort() int { panic(err) } defer l.Close() - return l.Addr().(*net.TCPAddr).Port + return fmt.Sprintf("%d", l.Addr().(*net.TCPAddr).Port) } func verifyError(c *C, response *http.Response, code, description string, statusCode int) { diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index eddbbbaa4..0e75a1f87 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -17,10 +17,10 @@ package cmd import ( - "fmt" "io" "net" "net/rpc" + "net/url" "path" "github.com/minio/minio/pkg/disk" @@ -92,22 +92,28 @@ func toStorageErr(err error) error { return err } -// Initialize new rpc client. -func newRPCClient(ep storageEndPoint) (StorageAPI, error) { - // Input validation. - if ep.host == "" || ep.port == 0 || ep.path == "" { +// Initialize new storage rpc client. +func newStorageRPC(ep *url.URL) (StorageAPI, error) { + if ep == nil { return nil, errInvalidArgument } // Dial minio rpc storage http path. - rpcPath := path.Join(storageRPCPath, ep.path) - rpcAddr := fmt.Sprintf("%s:%d", ep.host, ep.port) + rpcPath := path.Join(storageRPCPath, getPath(ep)) + rpcAddr := ep.Host // Initialize rpc client with network address and rpc path. - cred := serverConfig.GetCredential() + accessKeyID := serverConfig.GetCredential().AccessKeyID + secretAccessKey := serverConfig.GetCredential().SecretAccessKey + if ep.User != nil { + accessKeyID = ep.User.Username() + if key, set := ep.User.Password(); set { + secretAccessKey = key + } + } rpcClient := newAuthClient(&authConfig{ - accessKey: cred.AccessKeyID, - secretKey: cred.SecretAccessKey, + accessKey: accessKeyID, + secretKey: secretAccessKey, secureConn: isSSL(), address: rpcAddr, path: rpcPath, @@ -116,8 +122,8 @@ func newRPCClient(ep storageEndPoint) (StorageAPI, error) { // Initialize network storage. ndisk := &networkStorage{ - netAddr: ep.host, - netPath: ep.path, + netAddr: ep.Host, + netPath: getPath(ep), rpcClient: rpcClient, } diff --git a/cmd/storage-rpc-client_test.go b/cmd/storage-rpc-client_test.go index 9da47f41c..7a4c61b9d 100644 --- a/cmd/storage-rpc-client_test.go +++ b/cmd/storage-rpc-client_test.go @@ -23,6 +23,7 @@ import ( "io" "net" "net/rpc" + "net/url" "runtime" "testing" ) @@ -144,17 +145,26 @@ func (s *TestRPCStorageSuite) SetUpSuite(c *testing.T) { s.testServer = StartTestStorageRPCServer(c, s.serverType, 1) listenAddress := s.testServer.Server.Listener.Addr().String() - for _, disk := range s.testServer.Disks { - remoteEndPoint, err := parseStorageEndPoint(listenAddress+":"+disk.path, 0) - if err != nil { - c.Fatalf("Unexpected error %s", err) - } - storageDisk, err := newRPCClient(remoteEndPoint) + for _, ep := range s.testServer.Disks { + ep.Host = listenAddress + storageDisk, err := newStorageRPC(ep) if err != nil { c.Fatal("Unable to initialize RPC client", err) } s.remoteDisks = append(s.remoteDisks, storageDisk) } + _, err := newStorageRPC(nil) + if err != errInvalidArgument { + c.Fatalf("Unexpected error %s, expecting %s", err, errInvalidArgument) + } + u, err := url.Parse("http://abcd:abcd123@localhost/mnt/disk") + if err != nil { + c.Fatal("Unexpected error", err) + } + _, err = newStorageRPC(u) + if err != nil { + c.Fatal("Unexpected error", err) + } } // No longer used with gocheck, but used in explicit teardown code in diff --git a/cmd/storage-rpc-server.go b/cmd/storage-rpc-server.go index 0e00840b0..a8d563de5 100644 --- a/cmd/storage-rpc-server.go +++ b/cmd/storage-rpc-server.go @@ -217,21 +217,24 @@ func (s *storageServer) TryInitHandler(args *GenericArgs, reply *GenericReply) e } // Initialize new storage rpc. -func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err error) { - for _, ep := range serverConfig.endPoints { - if ep.presentIn(serverConfig.ignoredEndPoints) { - // Do not init ignored end point. +func newRPCServer(srvConfig serverCmdConfig) (servers []*storageServer, err error) { + for _, ep := range srvConfig.endpoints { + if containsEndpoint(srvConfig.ignoredEndpoints, ep) { + // Do not init disk RPC for ignored end point. continue } // e.g server:/mnt/disk1 if isLocalStorage(ep) { - storage, err := newPosix(ep.path) + // Get the posix path. + path := getPath(ep) + var storage StorageAPI + storage, err = newPosix(path) if err != nil && err != errDiskNotFound { return nil, err } servers = append(servers, &storageServer{ storage: storage, - path: ep.path, + path: path, }) } } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 2950d1216..e306f82b6 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -62,7 +62,7 @@ func prepareFS() (ObjectLayer, string, error) { if err != nil { return nil, "", err } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { return nil, "", err } @@ -80,7 +80,7 @@ func prepareXL() (ObjectLayer, []string, error) { if err != nil { return nil, nil, err } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { return nil, nil, err } @@ -154,7 +154,7 @@ func isSameType(obj1, obj2 interface{}) bool { // defer s.Stop() type TestServer struct { Root string - Disks []storageEndPoint + Disks []*url.URL AccessKey string SecretKey string Server *httptest.Server @@ -182,7 +182,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { credentials := serverConfig.GetCredential() testServer.Root = root - testServer.Disks, err = parseStorageEndPoints(disks, 0) + testServer.Disks, err = parseStorageEndpoints(disks) if err != nil { t.Fatalf("Unexpected error %s", err) } @@ -195,7 +195,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { } srvCmdCfg := serverCmdConfig{ - endPoints: testServer.Disks, + endpoints: testServer.Disks, storageDisks: storageDisks, } httpHandler, err := configureServerHandler( @@ -216,16 +216,14 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { globalObjLayerMutex.Unlock() // initialize peer rpc - _, portStr, err := net.SplitHostPort(srvCmdCfg.serverAddr) - if err != nil { - t.Fatal("Early setup error:", err) - } - globalMinioPort, err = strconv.Atoi(portStr) + host, port, err := net.SplitHostPort(srvCmdCfg.serverAddr) if err != nil { t.Fatal("Early setup error:", err) } + globalMinioHost = host + globalMinioPort = port globalMinioAddr = getLocalAddress(srvCmdCfg) - endpoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal("Early setup error:", err) } @@ -331,7 +329,7 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int) if err != nil { t.Fatal("Failed to create disks for the backend") } - endPoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatalf("%s", err) } @@ -347,13 +345,13 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int) credentials := serverConfig.GetCredential() testRPCServer.Root = root - testRPCServer.Disks = endPoints + testRPCServer.Disks = endpoints testRPCServer.AccessKey = credentials.AccessKeyID testRPCServer.SecretKey = credentials.SecretAccessKey // Run TestServer. testRPCServer.Server = httptest.NewServer(initTestStorageRPCEndPoint(serverCmdConfig{ - endPoints: endPoints, + endpoints: endpoints, })) return testRPCServer } @@ -366,7 +364,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { if err != nil { t.Fatal("Failed to create disks for the backend") } - endPoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatalf("%s", err) } @@ -382,12 +380,12 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { credentials := serverConfig.GetCredential() testRPCServer.Root = root - testRPCServer.Disks = endPoints + testRPCServer.Disks = endpoints testRPCServer.AccessKey = credentials.AccessKeyID testRPCServer.SecretKey = credentials.SecretAccessKey // create temporary backend for the test server. - objLayer, storageDisks, err := initObjectLayer(endPoints, nil) + objLayer, storageDisks, err := initObjectLayer(endpoints, nil) if err != nil { t.Fatalf("Failed obtaining Temp Backend: %s", err) } @@ -398,7 +396,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { globalObjLayerMutex.Unlock() srvCfg := serverCmdConfig{ - endPoints: endPoints, + endpoints: endpoints, storageDisks: storageDisks, } @@ -438,7 +436,7 @@ func StartTestControlRPCServer(t TestErrHandler, instanceType string) TestServer if err != nil { t.Fatal("Failed to create disks for the backend") } - endPoints, err := parseStorageEndPoints(disks, 0) + endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatalf("%s", err) } @@ -454,12 +452,12 @@ func StartTestControlRPCServer(t TestErrHandler, instanceType string) TestServer credentials := serverConfig.GetCredential() testRPCServer.Root = root - testRPCServer.Disks = endPoints + testRPCServer.Disks = endpoints testRPCServer.AccessKey = credentials.AccessKeyID testRPCServer.SecretKey = credentials.SecretAccessKey // create temporary backend for the test server. - objLayer, storageDisks, err := initObjectLayer(endPoints, nil) + objLayer, storageDisks, err := initObjectLayer(endpoints, nil) if err != nil { t.Fatalf("Failed obtaining Temp Backend: %s", err) } @@ -508,7 +506,7 @@ func newTestConfig(bucketLocation string) (rootPath string, err error) { func (testServer TestServer) Stop() { removeAll(testServer.Root) for _, disk := range testServer.Disks { - removeAll(disk.path) + removeAll(disk.Path) } testServer.Server.Close() } @@ -1556,13 +1554,13 @@ func getRandomDisks(N int) ([]string, error) { } // initObjectLayer - Instantiates object layer and returns it. -func initObjectLayer(endPoints []storageEndPoint, ignoredEndPoints []storageEndPoint) (ObjectLayer, []StorageAPI, error) { - storageDisks, err := initStorageDisks(endPoints, ignoredEndPoints) +func initObjectLayer(endpoints, ignoredEndpoints []*url.URL) (ObjectLayer, []StorageAPI, error) { + storageDisks, err := initStorageDisks(endpoints, ignoredEndpoints) if err != nil { return nil, nil, err } - err = waitForFormatDisks(true, "", storageDisks) + err = waitForFormatDisks(true, endpoints[0], storageDisks) if err != nil { return nil, nil, err } @@ -1633,7 +1631,7 @@ func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) { if err != nil { t.Fatal("Unexpected error: ", err) } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatal("Unexpected error: ", err) } @@ -1650,7 +1648,7 @@ func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) { // initializes the specified API endpoints for the tests. // initialies the root and returns its path. // return credentials. -func initAPIHandlerTest(obj ObjectLayer, endPoints []string) (bucketName string, apiRouter http.Handler, err error) { +func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (bucketName string, apiRouter http.Handler, err error) { // get random bucket name. bucketName = getRandomBucketName() @@ -1662,7 +1660,7 @@ func initAPIHandlerTest(obj ObjectLayer, endPoints []string) (bucketName string, } // Register the API end points with XL/FS object layer. // Registering only the GetObject handler. - apiRouter = initTestAPIEndPoints(obj, endPoints) + apiRouter = initTestAPIEndPoints(obj, endpoints) return bucketName, apiRouter, nil } @@ -1835,7 +1833,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc // ExecObjectLayerAPITest - executes object layer API tests. // Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers. -func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints []string) { +func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. rootPath, err := newTestConfig("us-east-1") @@ -1846,7 +1844,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints [ if err != nil { t.Fatalf("Initialization of object layer failed for single node setup: %s", err) } - bucketFS, fsAPIRouter, err := initAPIHandlerTest(objLayer, endPoints) + bucketFS, fsAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) if err != nil { t.Fatalf("Initialzation of API handler tests failed: %s", err) } @@ -1858,7 +1856,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints [ if err != nil { t.Fatalf("Initialization of object layer failed for XL setup: %s", err) } - bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endPoints) + bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) if err != nil { t.Fatalf("Initialzation of API handler tests failed: %s", err) } @@ -1929,7 +1927,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) if err != nil { t.Fatalf("Initialization of disks for XL setup: %s", err) } - endpoints, err := parseStorageEndPoints(erasureDisks, 0) + endpoints, err := parseStorageEndpoints(erasureDisks) if err != nil { t.Fatalf("Initialization of disks for XL setup: %s", err) } diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index 7426512c5..fc9c44b4b 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -165,11 +165,11 @@ func TestTreeWalk(t *testing.T) { if err != nil { t.Fatalf("Unable to create tmp directory: %s", err) } - endpoint, err := parseStorageEndPoint(fsDir, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir}) if err != nil { t.Fatalf("Unexpected error %s", err) } - disk, err := newStorageAPI(endpoint) + disk, err := newStorageAPI(endpoints[0]) if err != nil { t.Fatalf("Unable to create StorageAPI: %s", err) } @@ -206,11 +206,11 @@ func TestTreeWalkTimeout(t *testing.T) { if err != nil { t.Fatalf("Unable to create tmp directory: %s", err) } - endpoint, err := parseStorageEndPoint(fsDir, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir}) if err != nil { t.Fatalf("Unexpected error %s", err) } - disk, err := newStorageAPI(endpoint) + disk, err := newStorageAPI(endpoints[0]) if err != nil { t.Fatalf("Unable to create StorageAPI: %s", err) } @@ -286,23 +286,18 @@ func TestListDir(t *testing.T) { t.Errorf("Unable to create tmp directory: %s", err) } - endpoint1, err := parseStorageEndPoint(fsDir1, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir1, fsDir2}) if err != nil { t.Fatalf("Unexpected error %s", err) } // Create two StorageAPIs disk1 and disk2. - disk1, err := newStorageAPI(endpoint1) + disk1, err := newStorageAPI(endpoints[0]) if err != nil { t.Errorf("Unable to create StorageAPI: %s", err) } - endpoint2, err := parseStorageEndPoint(fsDir2, 0) - if err != nil { - t.Fatalf("Unexpected error %s", err) - } - - disk2, err := newStorageAPI(endpoint2) + disk2, err := newStorageAPI(endpoints[1]) if err != nil { t.Errorf("Unable to create StorageAPI: %s", err) } @@ -370,13 +365,11 @@ func TestRecursiveTreeWalk(t *testing.T) { t.Fatalf("Unable to create tmp directory: %s", err) } - endpoint1, err := parseStorageEndPoint(fsDir1, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir1}) if err != nil { t.Fatalf("Unexpected error %s", err) } - - // Create two StorageAPIs disk1. - disk1, err := newStorageAPI(endpoint1) + disk1, err := newStorageAPI(endpoints[0]) if err != nil { t.Fatalf("Unable to create StorageAPI: %s", err) } @@ -482,15 +475,13 @@ func TestSortedness(t *testing.T) { t.Errorf("Unable to create tmp directory: %s", err) } - endpoint1, err := parseStorageEndPoint(fsDir1, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir1}) if err != nil { t.Fatalf("Unexpected error %s", err) } - - // Create two StorageAPIs disk1. - disk1, err := newStorageAPI(endpoint1) + disk1, err := newStorageAPI(endpoints[0]) if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) + t.Fatalf("Unable to create StorageAPI: %s", err) } // Simple isLeaf check, returns true if there is no trailing "/" @@ -562,15 +553,13 @@ func TestTreeWalkIsEnd(t *testing.T) { t.Errorf("Unable to create tmp directory: %s", err) } - endpoint1, err := parseStorageEndPoint(fsDir1, 0) + endpoints, err := parseStorageEndpoints([]string{fsDir1}) if err != nil { t.Fatalf("Unexpected error %s", err) } - - // Create two StorageAPIs disk1. - disk1, err := newStorageAPI(endpoint1) + disk1, err := newStorageAPI(endpoints[0]) if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) + t.Fatalf("Unable to create StorageAPI: %s", err) } isLeaf := func(volume, prefix string) bool { diff --git a/cmd/utils.go b/cmd/utils.go index 009026000..9c8c9a9e4 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "net/http" + "net/url" "os" "strings" @@ -70,24 +71,23 @@ func checkDuplicateStrings(list []string) error { } // checkDuplicates - function to validate if there are duplicates in a slice of endPoints. -func checkDuplicateEndPoints(list []storageEndPoint) error { +func checkDuplicateEndpoints(endpoints []*url.URL) error { var strs []string - for _, ep := range list { + for _, ep := range endpoints { strs = append(strs, ep.String()) } return checkDuplicateStrings(strs) } -// Find local node through the command line arguments. Returns in -// `host:port` format. +// Find local node through the command line arguments. Returns in `host:port` format. func getLocalAddress(srvCmdConfig serverCmdConfig) string { if !srvCmdConfig.isDistXL { return srvCmdConfig.serverAddr } - for _, ep := range srvCmdConfig.endPoints { - // Validates if remote disk is local. + for _, ep := range srvCmdConfig.endpoints { + // Validates if remote endpoint is local. if isLocalStorage(ep) { - return fmt.Sprintf("%s:%d", ep.host, ep.port) + return ep.Host } } return "" @@ -144,6 +144,16 @@ func contains(stringList []string, element string) bool { return false } +// Contains endpoint returns true if endpoint found in the list of input endpoints. +func containsEndpoint(endpoints []*url.URL, endpoint *url.URL) bool { + for _, ep := range endpoints { + if *ep == *endpoint { + return true + } + } + return false +} + // urlPathSplit - split url path into bucket and object components. func urlPathSplit(urlPath string) (bucketName, prefixName string) { if urlPath == "" { diff --git a/cmd/utils_test.go b/cmd/utils_test.go index a046f5c52..07899d140 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -18,7 +18,9 @@ package cmd import ( "fmt" + "net" "net/http" + "net/url" "reflect" "runtime" "testing" @@ -224,7 +226,8 @@ func TestLocalAddress(t *testing.T) { return } // need to set this to avoid stale values from other tests. - globalMinioPort = 9000 + globalMinioPort = "9000" + globalMinioHost = "" testCases := []struct { srvCmdConfig serverCmdConfig localAddr string @@ -233,39 +236,64 @@ func TestLocalAddress(t *testing.T) { { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"localhost", 9000, "/mnt/disk1"}, - {"1.1.1.2", 9000, "/mnt/disk2"}, - {"1.1.2.1", 9000, "/mnt/disk3"}, - {"1.1.2.2", 9000, "/mnt/disk4"}, - }, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "localhost:9000", + Path: "/mnt/disk1", + }, { + Scheme: "http", + Host: "1.1.1.2:9000", + Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "1.1.2.1:9000", + Path: "/mnt/disk3", + }, { + Scheme: "http", + Host: "1.1.2.2:9000", + Path: "/mnt/disk4", + }}, }, - localAddr: fmt.Sprintf("localhost:%d", globalMinioPort), + localAddr: net.JoinHostPort("localhost", globalMinioPort), }, // Test 2 - local address is everything. { srvCmdConfig: serverCmdConfig{ - serverAddr: fmt.Sprintf(":%d", globalMinioPort), + serverAddr: net.JoinHostPort("", globalMinioPort), isDistXL: false, - endPoints: []storageEndPoint{ - {path: "/mnt/disk1"}, - {path: "/mnt/disk2"}, - {path: "/mnt/disk3"}, - {path: "/mnt/disk4"}, - }, + endpoints: []*url.URL{{ + Path: "/mnt/disk1", + }, { + Path: "/mnt/disk2", + }, { + Path: "/mnt/disk3", + }, { + Path: "/mnt/disk4", + }}, }, - localAddr: fmt.Sprintf(":%d", globalMinioPort), + localAddr: net.JoinHostPort("", globalMinioPort), }, // Test 3 - local address is not found. { srvCmdConfig: serverCmdConfig{ isDistXL: true, - endPoints: []storageEndPoint{ - {"1.1.1.1", 9000, "/mnt/disk1"}, - {"1.1.1.2", 9000, "/mnt/disk2"}, - {"1.1.2.1", 9000, "/mnt/disk3"}, - {"1.1.2.2", 9000, "/mnt/disk4"}, - }, + endpoints: []*url.URL{{ + Scheme: "http", + Host: "1.1.1.1:9000", + Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "1.1.1.2:9000", + Path: "/mnt/disk2", + }, { + Scheme: "http", + Host: "1.1.2.1:9000", + Path: "/mnt/disk3", + }, { + Scheme: "http", + Host: "1.1.2.2:9000", + Path: "/mnt/disk4", + }}, }, localAddr: "", }, @@ -276,12 +304,15 @@ func TestLocalAddress(t *testing.T) { srvCmdConfig: serverCmdConfig{ serverAddr: "play.minio.io:9000", isDistXL: false, - endPoints: []storageEndPoint{ - {path: "/mnt/disk1"}, - {path: "/mnt/disk2"}, - {path: "/mnt/disk3"}, - {path: "/mnt/disk4"}, - }, + endpoints: []*url.URL{{ + Path: "/mnt/disk1", + }, { + Path: "/mnt/disk2", + }, { + Path: "/mnt/disk3", + }, { + Path: "/mnt/disk4", + }}, }, localAddr: "play.minio.io:9000", }, diff --git a/cmd/xl-v1_test.go b/cmd/xl-v1_test.go index 5ada97883..7cd0f3649 100644 --- a/cmd/xl-v1_test.go +++ b/cmd/xl-v1_test.go @@ -51,12 +51,12 @@ func TestStorageInfo(t *testing.T) { t.Fatalf("Diskinfo total values should be greater 0") } - endpoints, err := parseStorageEndPoints(fsDirs, 0) + endpoints, err := parseStorageEndpoints(fsDirs) if err != nil { t.Fatalf("Unexpected error %s", err) } - ignoredEndpoints, err := parseStorageEndPoints(fsDirs[:4], 0) + ignoredEndpoints, err := parseStorageEndpoints(fsDirs[:4]) if err != nil { t.Fatalf("Unexpected error %s", err) } @@ -68,7 +68,7 @@ func TestStorageInfo(t *testing.T) { objLayer, err = newXLObjects(storageDisks) if err != nil { - t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s.", fsDirs[:4]) + t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s. error %s", fsDirs[:4], err) } // Get storage info first attempt. @@ -151,7 +151,7 @@ func TestNewXL(t *testing.T) { t.Fatalf("Unable to initialize erasure, %s", err) } - endpoints, err := parseStorageEndPoints(erasureDisks, 0) + endpoints, err := parseStorageEndpoints(erasureDisks) if err != nil { t.Fatalf("Unable to initialize erasure, %s", err) } @@ -161,13 +161,18 @@ func TestNewXL(t *testing.T) { t.Fatal("Unexpected error: ", err) } - err = waitForFormatDisks(true, "", nil) + err = waitForFormatDisks(true, endpoints[0], nil) + if err != errInvalidArgument { + t.Fatalf("Expecting error, got %s", err) + } + + err = waitForFormatDisks(true, nil, storageDisks) if err != errInvalidArgument { t.Fatalf("Expecting error, got %s", err) } // Initializes all erasure disks - err = waitForFormatDisks(true, "", storageDisks) + err = waitForFormatDisks(true, endpoints[0], storageDisks) if err != nil { t.Fatalf("Unable to format disks for erasure, %s", err) } @@ -176,12 +181,12 @@ func TestNewXL(t *testing.T) { t.Fatalf("Unable to initialize erasure, %s", err) } - endpoints, err = parseStorageEndPoints(erasureDisks, 0) + endpoints, err = parseStorageEndpoints(erasureDisks) if err != nil { t.Fatalf("Unable to initialize erasure, %s", err) } - ignoredEndpoints, err := parseStorageEndPoints(erasureDisks[:2], 0) + ignoredEndpoints, err := parseStorageEndpoints(erasureDisks[:2]) if err != nil { t.Fatalf("Unable to initialize erasure, %s", err) }