Bypass network in lock requests to local server (#4465)

This makes lock RPCs similar to other RPCs where requests to the local
server bypass the network. Requests to the local lock-subsystem may
bypass the network layer and directly access the locking
data-structures.

This incidentally fixes #4451.
master
Aditya Manthramurthy 8 years ago committed by Harshavardhana
parent 2559614bfd
commit 986aa8fabf
  1. 4
      cmd/lock-rpc-server-common.go
  2. 24
      cmd/lock-rpc-server-common_test.go
  3. 220
      cmd/lock-rpc-server.go
  4. 36
      cmd/lock-rpc-server_test.go
  5. 53
      cmd/namespace-lock.go
  6. 4
      cmd/server-main.go
  7. 3
      cmd/server-mux_test.go

@ -22,7 +22,7 @@ import (
) )
// Similar to removeEntry but only removes an entry only if the lock entry exists in map. // Similar to removeEntry but only removes an entry only if the lock entry exists in map.
func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) { func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
if lri, ok := l.lockMap[nlrip.name]; ok { if lri, ok := l.lockMap[nlrip.name]; ok {
if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) { if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) {
@ -38,7 +38,7 @@ func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// removeEntry either, based on the uid of the lock message, removes a single entry from the // removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock) // lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool { func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid. // Find correct entry to remove based on uid.
for index, entry := range *lri { for index, entry := range *lri {
if entry.uid == uid { if entry.uid == uid {

@ -38,9 +38,9 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri} nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
// first test by simulating item has already been deleted // first test by simulating item has already been deleted
locker.removeEntryIfExists(nlrip) locker.ll.removeEntryIfExists(nlrip)
{ {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -48,10 +48,10 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
} }
// then test normal deletion // then test normal deletion
locker.lockMap["name"] = []lockRequesterInfo{lri} // add item locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item
locker.removeEntryIfExists(nlrip) locker.ll.removeEntryIfExists(nlrip)
{ {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -81,32 +81,32 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
timeLastCheck: UTCNow(), timeLastCheck: UTCNow(),
} }
locker.lockMap["name"] = []lockRequesterInfo{ locker.ll.lockMap["name"] = []lockRequesterInfo{
lockRequesterInfo1, lockRequesterInfo1,
lockRequesterInfo2, lockRequesterInfo2,
} }
lri, _ := locker.lockMap["name"] lri, _ := locker.ll.lockMap["name"]
// test unknown uid // test unknown uid
if locker.removeEntry("name", "unknown-uid", &lri) { if locker.ll.removeEntry("name", "unknown-uid", &lri) {
t.Errorf("Expected %#v, got %#v", false, true) t.Errorf("Expected %#v, got %#v", false, true)
} }
if !locker.removeEntry("name", "0123-4567", &lri) { if !locker.ll.removeEntry("name", "0123-4567", &lri) {
t.Errorf("Expected %#v, got %#v", true, false) t.Errorf("Expected %#v, got %#v", true, false)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{lockRequesterInfo2} expectedLri := []lockRequesterInfo{lockRequesterInfo2}
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
} }
} }
if !locker.removeEntry("name", "89ab-cdef", &lri) { if !locker.ll.removeEntry("name", "89ab-cdef", &lri) {
t.Errorf("Expected %#v, got %#v", true, false) t.Errorf("Expected %#v, got %#v", true, false)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)

@ -60,9 +60,7 @@ func isWriteLock(lri []lockRequesterInfo) bool {
// lockServer is type for RPC handlers // lockServer is type for RPC handlers
type lockServer struct { type lockServer struct {
AuthRPCServer AuthRPCServer
serviceEndpoint string ll localLocker
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
} }
// Start lock maintenance from all lock servers. // Start lock maintenance from all lock servers.
@ -91,30 +89,11 @@ func startLockMaintainence(lockServers []*lockServer) {
// Register distributed NS lock handlers. // Register distributed NS lock handlers.
func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error { func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error {
// Initialize a new set of lock servers.
lockServers := newLockServers(endpoints)
// Start lock maintenance from all lock servers. // Start lock maintenance from all lock servers.
startLockMaintainence(lockServers) startLockMaintainence(globalLockServers)
// Register initialized lock servers to their respective rpc endpoints. // Register initialized lock servers to their respective rpc endpoints.
return registerStorageLockers(mux, lockServers) return registerStorageLockers(mux, globalLockServers)
}
// Create one lock server for every local storage rpc server.
func newLockServers(endpoints EndpointList) (lockServers []*lockServer) {
for _, endpoint := range endpoints {
// Initialize new lock server for each local node.
if endpoint.IsLocal {
lockServers = append(lockServers, &lockServer{
serviceEndpoint: endpoint.Path,
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
})
}
}
return lockServers
} }
// registerStorageLockers - register locker rpc handlers for net/rpc library clients // registerStorageLockers - register locker rpc handlers for net/rpc library clients
@ -125,129 +104,178 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
return traceError(err) return traceError(err)
} }
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(path.Join(lockServicePath, lockServer.serviceEndpoint)).Handler(lockRPCServer) lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer)
} }
return nil return nil
} }
/// Distributed lock handlers // localLocker implements Dsync.NetLocker
type localLocker struct {
mutex sync.Mutex
serviceEndpoint string
serverAddr string
lockMap map[string][]lockRequesterInfo
}
// Lock - rpc handler for (single) write lock operation. func (l *localLocker) ServerAddr() string {
func (l *lockServer) Lock(args *LockArgs, reply *bool) error { return l.serverAddr
}
func (l *localLocker) ServiceEndpoint() string {
return l.serviceEndpoint
}
func (l *localLocker) Lock(args dsync.LockArgs) (reply bool, err error) {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil { _, isLockTaken := l.lockMap[args.Resource]
return err if !isLockTaken { // No locks held on the given name, so claim write lock
} l.lockMap[args.Resource] = []lockRequesterInfo{
_, *reply = l.lockMap[args.LockArgs.Resource]
if !*reply { // No locks held on the given name, so claim write lock
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
{ {
writer: true, writer: true,
node: args.LockArgs.ServerAddr, node: args.ServerAddr,
serviceEndpoint: args.LockArgs.ServiceEndpoint, serviceEndpoint: args.ServiceEndpoint,
uid: args.LockArgs.UID, uid: args.UID,
timestamp: UTCNow(), timestamp: UTCNow(),
timeLastCheck: UTCNow(), timeLastCheck: UTCNow(),
}, },
} }
} }
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise // return reply=true if lock was granted.
return nil return !isLockTaken, nil
} }
// Unlock - rpc handler for (single) write unlock operation. func (l *localLocker) Unlock(args dsync.LockArgs) (reply bool, err error) {
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil {
return err
}
var lri []lockRequesterInfo var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name if lri, reply = l.lockMap[args.Resource]; !reply {
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.LockArgs.Resource) // No lock is held on the given name
return reply, fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resource)
} }
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock if reply = isWriteLock(lri); !reply {
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.LockArgs.Resource, len(lri)) // Unless it is a write lock
return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resource, len(lri))
} }
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) { if !l.removeEntry(args.Resource, args.UID, &lri) {
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.LockArgs.UID) return false, fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
} }
return nil return true, nil
} }
// RLock - rpc handler for read lock operation. func (l *localLocker) RLock(args dsync.LockArgs) (reply bool, err error) {
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil {
return err
}
lrInfo := lockRequesterInfo{ lrInfo := lockRequesterInfo{
writer: false, writer: false,
node: args.LockArgs.ServerAddr, node: args.ServerAddr,
serviceEndpoint: args.LockArgs.ServiceEndpoint, serviceEndpoint: args.ServiceEndpoint,
uid: args.LockArgs.UID, uid: args.UID,
timestamp: UTCNow(), timestamp: UTCNow(),
timeLastCheck: UTCNow(), timeLastCheck: UTCNow(),
} }
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok { if lri, ok := l.lockMap[args.Resource]; ok {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock if reply = !isWriteLock(lri); reply {
l.lockMap[args.LockArgs.Resource] = append(l.lockMap[args.LockArgs.Resource], lrInfo) // Unless there is a write lock
l.lockMap[args.Resource] = append(l.lockMap[args.Resource], lrInfo)
} }
} else { // No locks held on the given name, so claim (first) read lock } else {
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{lrInfo} // No locks held on the given name, so claim (first) read lock
*reply = true l.lockMap[args.Resource] = []lockRequesterInfo{lrInfo}
reply = true
} }
return nil return reply, nil
} }
// RUnlock - rpc handler for read unlock operation. func (l *localLocker) RUnlock(args dsync.LockArgs) (reply bool, err error) {
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil {
return err
}
var lri []lockRequesterInfo var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name if lri, reply = l.lockMap[args.Resource]; !reply {
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.LockArgs.Resource) // No lock is held on the given name
return reply, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resource)
} }
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock if reply = !isWriteLock(lri); !reply {
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.LockArgs.Resource) // A write-lock is held, cannot release a read lock
return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resource)
} }
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) { if !l.removeEntry(args.Resource, args.UID, &lri) {
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.LockArgs.UID) return false, fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
} }
return nil return reply, nil
} }
// ForceUnlock - rpc handler for force unlock operation. func (l *localLocker) ForceUnlock(args dsync.LockArgs) (reply bool, err error) {
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil { if len(args.UID) != 0 {
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
}
if _, ok := l.lockMap[args.Resource]; ok {
// Only clear lock when it is taken
// Remove the lock (irrespective of write or read lock)
delete(l.lockMap, args.Resource)
}
return true, nil
}
/// Distributed lock handlers
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) (err error) {
if err = args.IsAuthenticated(); err != nil {
return err return err
} }
if len(args.LockArgs.UID) != 0 { *reply, err = l.ll.Lock(args.LockArgs)
return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.LockArgs.UID) return err
}
// Unlock - rpc handler for (single) write unlock operation.
func (l *lockServer) Unlock(args *LockArgs, reply *bool) (err error) {
if err = args.IsAuthenticated(); err != nil {
return err
} }
if _, ok := l.lockMap[args.LockArgs.Resource]; ok { // Only clear lock when set *reply, err = l.ll.Unlock(args.LockArgs)
delete(l.lockMap, args.LockArgs.Resource) // Remove the lock (irrespective of write or read lock) return err
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) (err error) {
if err = args.IsAuthenticated(); err != nil {
return err
} }
*reply = true *reply, err = l.ll.RLock(args.LockArgs)
return nil return err
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) (err error) {
if err = args.IsAuthenticated(); err != nil {
return err
}
*reply, err = l.ll.RUnlock(args.LockArgs)
return err
}
// ForceUnlock - rpc handler for force unlock operation.
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) (err error) {
if err = args.IsAuthenticated(); err != nil {
return err
}
*reply, err = l.ll.ForceUnlock(args.LockArgs)
return err
} }
// Expired - rpc handler for expired lock status. // Expired - rpc handler for expired lock status.
func (l *lockServer) Expired(args *LockArgs, reply *bool) error { func (l *lockServer) Expired(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := args.IsAuthenticated(); err != nil { if err := args.IsAuthenticated(); err != nil {
return err return err
} }
l.ll.mutex.Lock()
defer l.ll.mutex.Unlock()
// Lock found, proceed to verify if belongs to given uid. // Lock found, proceed to verify if belongs to given uid.
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok { if lri, ok := l.ll.lockMap[args.LockArgs.Resource]; ok {
// Check whether uid is still active // Check whether uid is still active
for _, entry := range lri { for _, entry := range lri {
if entry.uid == args.LockArgs.UID { if entry.uid == args.LockArgs.UID {
@ -277,10 +305,10 @@ type nameLockRequesterInfoPair struct {
// //
// We will ignore the error, and we will retry later to get a resolve on this lock // We will ignore the error, and we will retry later to get a resolve on this lock
func (l *lockServer) lockMaintenance(interval time.Duration) { func (l *lockServer) lockMaintenance(interval time.Duration) {
l.mutex.Lock() l.ll.mutex.Lock()
// Get list of long lived locks to check for staleness. // Get list of long lived locks to check for staleness.
nlripLongLived := getLongLivedLocks(l.lockMap, interval) nlripLongLived := getLongLivedLocks(l.ll.lockMap, interval)
l.mutex.Unlock() l.ll.mutex.Unlock()
serverCred := serverConfig.GetCredential() serverCred := serverConfig.GetCredential()
// Validate if long lived locks are indeed clean. // Validate if long lived locks are indeed clean.
@ -308,9 +336,9 @@ func (l *lockServer) lockMaintenance(interval time.Duration) {
if expired { if expired {
// The lock is no longer active at server that originated the lock // The lock is no longer active at server that originated the lock
// So remove the lock from the map. // So remove the lock from the map.
l.mutex.Lock() l.ll.mutex.Lock()
l.removeEntryIfExists(nlrip) // Purge the stale entry if it exists. l.ll.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
l.mutex.Unlock() l.ll.mutex.Unlock()
} }
} }
} }

@ -49,10 +49,12 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) {
} }
locker := &lockServer{ locker := &lockServer{
AuthRPCServer: AuthRPCServer{}, AuthRPCServer: AuthRPCServer{},
serviceEndpoint: "rpc-path", ll: localLocker{
mutex: sync.Mutex{}, mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo), serviceEndpoint: "rpc-path",
lockMap: make(map[string][]lockRequesterInfo),
},
} }
creds := serverConfig.GetCredential() creds := serverConfig.GetCredential()
loginArgs := LoginRPCArgs{ loginArgs := LoginRPCArgs{
@ -93,7 +95,7 @@ func TestLockRpcServerLock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
writer: true, writer: true,
@ -163,7 +165,7 @@ func TestLockRpcServerUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !testLockEquality(expectedLri, gotLri) { if !testLockEquality(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -194,7 +196,7 @@ func TestLockRpcServerRLock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
writer: false, writer: false,
@ -281,7 +283,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
writer: false, writer: false,
@ -305,7 +307,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.lockMap["name"] gotLri, _ := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !testLockEquality(expectedLri, gotLri) { if !testLockEquality(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -427,6 +429,12 @@ func TestLockServers(t *testing.T) {
return return
} }
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
defer removeAll(rootPath)
currentIsDistXL := globalIsDistXL currentIsDistXL := globalIsDistXL
defer func() { defer func() {
globalIsDistXL = currentIsDistXL globalIsDistXL = currentIsDistXL
@ -471,9 +479,13 @@ func TestLockServers(t *testing.T) {
// Validates lock server initialization. // Validates lock server initialization.
for i, testCase := range testCases { for i, testCase := range testCases {
globalIsDistXL = testCase.isDistXL globalIsDistXL = testCase.isDistXL
lockServers := newLockServers(testCase.endpoints) globalLockServers = nil
if len(lockServers) != testCase.totalLockServers { _, _ = newDsyncNodes(testCase.endpoints)
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(lockServers)) if err != nil {
t.Fatalf("Got unexpected error initializing lock servers: %v", err)
}
if len(globalLockServers) != testCase.totalLockServers {
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(globalLockServers))
} }
} }
} }

@ -27,6 +27,9 @@ import (
// Global name space lock. // Global name space lock.
var globalNSMutex *nsLockMap var globalNSMutex *nsLockMap
// Global lock servers
var globalLockServers []*lockServer
// RWLocker - locker interface extends sync.Locker // RWLocker - locker interface extends sync.Locker
// to introduce RLock, RUnlock. // to introduce RLock, RUnlock.
type RWLocker interface { type RWLocker interface {
@ -36,27 +39,45 @@ type RWLocker interface {
} }
// Initialize distributed locking only in case of distributed setup. // Initialize distributed locking only in case of distributed setup.
// Returns if the setup is distributed or not on success. // Returns lock clients and the node index for the current server.
func initDsyncNodes() error { func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) {
cred := serverConfig.GetCredential() cred := serverConfig.GetCredential()
// Initialize rpc lock client information only if this instance is a distributed setup. clnts = make([]dsync.NetLocker, len(endpoints))
clnts := make([]dsync.NetLocker, len(globalEndpoints)) myNode = -1
myNode := -1 for index, endpoint := range endpoints {
for index, endpoint := range globalEndpoints { if !endpoint.IsLocal {
clnts[index] = newLockRPCClient(authConfig{ // For a remote endpoints setup a lock RPC client.
accessKey: cred.AccessKey, clnts[index] = newLockRPCClient(authConfig{
secretKey: cred.SecretKey, accessKey: cred.AccessKey,
serverAddr: endpoint.Host, secretKey: cred.SecretKey,
secureConn: globalIsSSL, serverAddr: endpoint.Host,
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path), secureConn: globalIsSSL,
serviceName: lockServiceName, serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
}) serviceName: lockServiceName,
if endpoint.IsLocal && myNode == -1 { })
continue
}
// Local endpoint
if myNode == -1 {
myNode = index myNode = index
} }
// For a local endpoint, setup a local lock server to
// avoid network requests.
localLockServer := lockServer{
AuthRPCServer: AuthRPCServer{},
ll: localLocker{
mutex: sync.Mutex{},
serviceEndpoint: endpoint.Path,
serverAddr: endpoint.Host,
lockMap: make(map[string][]lockRequesterInfo),
},
}
globalLockServers = append(globalLockServers, &localLockServer)
clnts[index] = &(localLockServer.ll)
} }
return dsync.Init(clnts, myNode) return clnts, myNode
} }
// initNSLock - initialize name space lock map. // initNSLock - initialize name space lock map.

@ -25,6 +25,7 @@ import (
"time" "time"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/dsync"
) )
var serverFlags = []cli.Flag{ var serverFlags = []cli.Flag{
@ -244,7 +245,8 @@ func serverMain(ctx *cli.Context) {
// Set nodes for dsync for distributed setup. // Set nodes for dsync for distributed setup.
if globalIsDistXL { if globalIsDistXL {
fatalIf(initDsyncNodes(), "Unable to initialize distributed locking clients") clnts, myNode := newDsyncNodes(globalEndpoints)
fatalIf(dsync.Init(clnts, myNode), "Unable to initialize distributed locking clients")
} }
// Initialize name space lock. // Initialize name space lock.

@ -340,10 +340,11 @@ func TestServerListenAndServePlain(t *testing.T) {
} }
func TestServerListenAndServeTLS(t *testing.T) { func TestServerListenAndServeTLS(t *testing.T) {
_, err := newTestConfig(globalMinioDefaultRegion) rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil { if err != nil {
t.Fatalf("Init Test config failed") t.Fatalf("Init Test config failed")
} }
defer removeAll(rootPath)
wait := make(chan struct{}) wait := make(chan struct{})
addr := net.JoinHostPort("127.0.0.1", getFreePort()) addr := net.JoinHostPort("127.0.0.1", getFreePort())

Loading…
Cancel
Save