object layer initialization using distributed locking (#2397)

* vendorized latest minio/dsync

* wip - object layer initialization using distributed locking
master
Krishnan Parthasarathi 8 years ago committed by Harshavardhana
parent 3939c75345
commit b7c169d71d
  1. 5
      cmd/routers.go
  2. 6
      cmd/rpc-server.go
  3. 33
      cmd/server-main.go
  4. 12
      lock-rpc-server.go
  5. 3
      vendor/github.com/minio/dsync/dmutex.go
  6. 9
      vendor/github.com/minio/dsync/dsync.go
  7. 6
      vendor/vendor.json

@ -49,6 +49,11 @@ func newObjectLayerFactory(disks, ignoredDisks []string) func() ObjectLayer {
if objAPI != nil { if objAPI != nil {
return objAPI return objAPI
} }
// Acquire a distributed lock to ensure only one of the nodes
// initializes the format.json.
nsMutex.Lock(minioMetaBucket, formatConfigFile)
defer nsMutex.Unlock(minioMetaBucket, formatConfigFile)
objAPI, err = newObjectLayer(disks, ignoredDisks) objAPI, err = newObjectLayer(disks, ignoredDisks)
if err != nil { if err != nil {
return nil return nil

@ -92,7 +92,7 @@ func (s *storageServer) ReadAllHandler(arg *ReadFileArgs, reply *[]byte) error {
if err != nil { if err != nil {
return err return err
} }
reply = &buf *reply = buf
return nil return nil
} }
@ -102,7 +102,7 @@ func (s *storageServer) ReadFileHandler(arg *ReadFileArgs, reply *int64) error {
if err != nil { if err != nil {
return err return err
} }
reply = &n *reply = n
return nil return nil
} }
@ -160,9 +160,9 @@ func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err e
// registerStorageRPCRouter - register storage rpc router. // registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouters(mux *router.Router, stServers []*storageServer) { func registerStorageRPCRouters(mux *router.Router, stServers []*storageServer) {
storageRPCServer := rpc.NewServer()
// Create a unique route for each disk exported from this node. // Create a unique route for each disk exported from this node.
for _, stServer := range stServers { for _, stServer := range stServers {
storageRPCServer := rpc.NewServer()
storageRPCServer.RegisterName("Storage", stServer) storageRPCServer.RegisterName("Storage", stServer)
// Add minio storage routes. // Add minio storage routes.
storageRouter := mux.PathPrefix(reservedBucket).Subrouter() storageRouter := mux.PathPrefix(reservedBucket).Subrouter()

@ -21,11 +21,13 @@ import (
"net" "net"
"net/http" "net/http"
"os" "os"
"path"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/dsync"
) )
var srvConfig serverCmdConfig var srvConfig serverCmdConfig
@ -220,6 +222,29 @@ func getPort(address string) int {
return portInt return portInt
} }
// Initialize distributed locking only in case of distributed setup.
func initDsyncNodes(disks []string, port int) error {
var isDist bool = false
var dsyncNodes []string
var rpcPaths []string
serverPort := strconv.Itoa(port)
for _, disk := range disks {
if idx := strings.LastIndex(disk, ":"); idx != -1 {
dsyncNodes = append(dsyncNodes, disk[:idx]+":"+serverPort)
rpcPaths = append(rpcPaths, path.Join(lockRPCPath, disk[idx+1:]))
}
if !isLocalStorage(disk) {
// One or more disks supplied as arguments are remote.
isDist = true
}
}
if isDist {
return dsync.SetNodesWithPath(dsyncNodes, rpcPaths)
}
return nil
}
// serverMain handler called for 'minio server' command. // serverMain handler called for 'minio server' command.
func serverMain(c *cli.Context) { func serverMain(c *cli.Context) {
// Check 'server' cli arguments. // Check 'server' cli arguments.
@ -245,6 +270,14 @@ func serverMain(c *cli.Context) {
// Disks to be used in server init. // Disks to be used in server init.
disks := c.Args() disks := c.Args()
// Set nodes for dsync
err = initDsyncNodes(disks, port)
fatalIf(err, "Unable to initialize distributed locking")
// Initialize name space lock.
// FIXME: add logic to switch between distributed and single-node namespace locking.
initNSLock()
// Configure server. // Configure server.
srvConfig = serverCmdConfig{ srvConfig = serverCmdConfig{
serverAddr: serverAddress, serverAddr: serverAddress,

@ -26,7 +26,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
) )
const lockRPCPath = "/lock" const lockRPCPath = "/minio/lock"
type lockServer struct { type lockServer struct {
rpcPath string rpcPath string
@ -37,7 +37,7 @@ type lockServer struct {
/// Distributed lock handlers /// Distributed lock handlers
// LockHandler - rpc handler for lock operation. // LockHandler - rpc handler for lock operation.
func (l *lockServer) LockHandler(name *string, reply *bool) error { func (l *lockServer) Lock(name *string, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
_, ok := l.lockMap[*name] _, ok := l.lockMap[*name]
@ -51,7 +51,7 @@ func (l *lockServer) LockHandler(name *string, reply *bool) error {
} }
// UnlockHandler - rpc handler for unlock operation. // UnlockHandler - rpc handler for unlock operation.
func (l *lockServer) UnlockHandler(name *string, reply *bool) error { func (l *lockServer) Unlock(name *string, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
_, ok := l.lockMap[*name] _, ok := l.lockMap[*name]
@ -84,6 +84,7 @@ func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
if skipDisks[export] { if skipDisks[export] {
continue continue
} }
if isLocalStorage(export) {
if idx := strings.LastIndex(export, ":"); idx != -1 { if idx := strings.LastIndex(export, ":"); idx != -1 {
export = export[idx+1:] export = export[idx+1:]
} }
@ -93,13 +94,14 @@ func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
lockMap: make(map[string]struct{}), lockMap: make(map[string]struct{}),
}) })
} }
}
return lockServers return lockServers
} }
// registerStorageLockers - register locker rpc handlers for valyala/gorpc library clients // registerStorageLockers - register locker rpc handlers for net/rpc library clients
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) { func registerStorageLockers(mux *router.Router, lockServers []*lockServer) {
lockRPCServer := rpc.NewServer()
for _, lockServer := range lockServers { for _, lockServer := range lockServers {
lockRPCServer := rpc.NewServer()
lockRPCServer.RegisterName("Dsync", lockServer) lockRPCServer.RegisterName("Dsync", lockServer)
lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter := mux.PathPrefix(reservedBucket).Subrouter()
lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer)

@ -21,7 +21,6 @@ import (
"math" "math"
"math/rand" "math/rand"
"net/rpc" "net/rpc"
"strings"
"sync" "sync"
"time" "time"
) )
@ -52,7 +51,7 @@ func connectLazy(dm *DMutex) {
for i := range dm.clnts { for i := range dm.clnts {
if dm.clnts[i] == nil { if dm.clnts[i] == nil {
// pass in unique path (as required by server.HandleHTTP() // pass in unique path (as required by server.HandleHTTP()
dm.clnts[i], _ = rpc.DialHTTPPath("tcp", nodes[i], rpcPath+"-"+strings.Split(nodes[i], ":")[1]) dm.clnts[i], _ = rpc.DialHTTPPath("tcp", nodes[i], rpcPaths[i])
} }
} }
} }

@ -28,7 +28,7 @@ const DefaultPath = "/rpc/dsync"
var n int var n int
var nodes []string var nodes []string
var rpcPath string var rpcPaths []string
func closeClients(clients []*rpc.Client) { func closeClients(clients []*rpc.Client) {
for _, clnt := range clients { for _, clnt := range clients {
@ -36,8 +36,8 @@ func closeClients(clients []*rpc.Client) {
} }
} }
// Same as SetNodes, but takes a path argument different from the package-level default. // Same as SetNodes, but takes a slice of rpc paths as argument different from the package-level default.
func SetNodesWithPath(nodeList []string, path string) (err error) { func SetNodesWithPath(nodeList []string, paths []string) (err error) {
// Validate if number of nodes is within allowable range. // Validate if number of nodes is within allowable range.
if n != 0 { if n != 0 {
@ -50,7 +50,8 @@ func SetNodesWithPath(nodeList []string, path string) (err error) {
nodes = make([]string, len(nodeList)) nodes = make([]string, len(nodeList))
copy(nodes, nodeList[:]) copy(nodes, nodeList[:])
rpcPath = path rpcPaths = make([]string, len(paths))
copy(rpcPaths, paths[:])
n = len(nodes) n = len(nodes)
return nil return nil
} }

@ -98,10 +98,10 @@
"revisionTime": "2015-11-18T20:00:48-08:00" "revisionTime": "2015-11-18T20:00:48-08:00"
}, },
{ {
"checksumSHA1": "KCM0UiuvLA5fPiX5I83/HTklxlI=", "checksumSHA1": "r1Vf/vQTkMsZrDVORBGAAIlOMP4=",
"path": "github.com/minio/dsync", "path": "github.com/minio/dsync",
"revision": "c10eebd6b637bb834d502a6574c53e0ea6c64997", "revision": "6bfa8c0c1c37959c1bda15bfdae228a986d3cca8",
"revisionTime": "2016-08-05T20:56:13Z" "revisionTime": "2016-08-07T19:01:27Z"
}, },
{ {
"path": "github.com/minio/go-homedir", "path": "github.com/minio/go-homedir",

Loading…
Cancel
Save