From 2ab0681c0c5eb28f3feede16048073b11f2fbe8f Mon Sep 17 00:00:00 2001 From: Krishna Srinivas <634494+krishnasrinivas@users.noreply.github.com> Date: Wed, 28 Aug 2019 16:12:57 -0700 Subject: [PATCH] Do not ignore Lock()'s return value (#8142) --- cmd/lock-rest-client.go | 27 ++++++++++++++++--- cmd/lock-rest-server-common.go | 4 +++ cmd/lock-rest-server-common_test.go | 2 +- cmd/lock-rest-server.go | 42 ++++++++++++++++++++--------- cmd/namespace-lock.go | 4 +-- 5 files changed, 59 insertions(+), 20 deletions(-) diff --git a/cmd/lock-rest-client.go b/cmd/lock-rest-client.go index 4e4cd6633..bdf4ecfcd 100644 --- a/cmd/lock-rest-client.go +++ b/cmd/lock-rest-client.go @@ -43,6 +43,20 @@ type lockRESTClient struct { timer *time.Timer } +func toLockError(err error) error { + if err == nil { + return nil + } + + switch err.Error() { + case errLockConflict.Error(): + return errLockConflict + case errLockNotExpired.Error(): + return errLockNotExpired + } + return err +} + // ServerAddr - dsync.NetLocker interface compatible method. func (client *lockRESTClient) ServerAddr() string { return client.serverURL.Host @@ -88,7 +102,6 @@ func (client *lockRESTClient) markHostDown() { // permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() // after verifying format.json func (client *lockRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { - if !client.isHostUp() { return nil, errors.New("Lock rest server node is down") } @@ -98,7 +111,6 @@ func (client *lockRESTClient) call(method string, values url.Values, body io.Rea } respBody, err = client.restClient.Call(method, values, body, length) - if err == nil { return respBody, nil } @@ -107,7 +119,7 @@ func (client *lockRESTClient) call(method string, values url.Values, body io.Rea client.markHostDown() } - return nil, err + return nil, toLockError(err) } // Stringer provides a canonicalized representation of node. @@ -138,7 +150,14 @@ func (client *lockRESTClient) restCall(call string, args dsync.LockArgs) (reply respBody, err := client.call(call, values, nil, -1) defer http.DrainBody(respBody) - return err == nil, err + switch err { + case nil: + return true, nil + case errLockConflict, errLockNotExpired: + return false, nil + default: + return false, err + } } // RLock calls read lock REST API. diff --git a/cmd/lock-rest-server-common.go b/cmd/lock-rest-server-common.go index cb933df09..7efeec7b2 100644 --- a/cmd/lock-rest-server-common.go +++ b/cmd/lock-rest-server-common.go @@ -17,6 +17,7 @@ package cmd import ( + "errors" "path" "time" ) @@ -53,6 +54,9 @@ type nameLockRequesterInfoPair struct { lri lockRequesterInfo } +var errLockConflict = errors.New("lock conflict") +var errLockNotExpired = errors.New("lock not expired") + // Similar to removeEntry but only removes an entry only if the lock entry exists in map. func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) { // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) diff --git a/cmd/lock-rest-server-common_test.go b/cmd/lock-rest-server-common_test.go index 9a5ec3a2c..b5f0bcdae 100644 --- a/cmd/lock-rest-server-common_test.go +++ b/cmd/lock-rest-server-common_test.go @@ -35,7 +35,7 @@ func createLockTestServer(t *testing.T) (string, *lockRESTServer, string) { } locker := &lockRESTServer{ - ll: localLocker{ + ll: &localLocker{ mutex: sync.Mutex{}, serviceEndpoint: "rpc-path", lockMap: make(map[string][]lockRequesterInfo), diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index b1b2b6b4f..4ba7ef75b 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -42,7 +42,7 @@ const ( // To abstract a node over network. type lockRESTServer struct { - ll localLocker + ll *localLocker } func (l *lockRESTServer) writeErrorResponse(w http.ResponseWriter, err error) { @@ -62,6 +62,7 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { func getLockArgs(r *http.Request) dsync.LockArgs { return dsync.LockArgs{ UID: r.URL.Query().Get(lockRESTUID), + Source: r.URL.Query().Get(lockRESTSource), Resource: r.URL.Query().Get(lockRESTResource), ServerAddr: r.URL.Query().Get(lockRESTServerAddr), ServiceEndpoint: r.URL.Query().Get(lockRESTServerEndpoint), @@ -75,7 +76,11 @@ func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) { return } - if _, err := l.ll.Lock(getLockArgs(r)); err != nil { + success, err := l.ll.Lock(getLockArgs(r)) + if err == nil && !success { + err = errLockConflict + } + if err != nil { l.writeErrorResponse(w, err) return } @@ -88,7 +93,10 @@ func (l *lockRESTServer) UnlockHandler(w http.ResponseWriter, r *http.Request) { return } - if _, err := l.ll.Unlock(getLockArgs(r)); err != nil { + _, err := l.ll.Unlock(getLockArgs(r)) + // Ignore the Unlock() "reply" return value because if err == nil, "reply" is always true + // Consequently, if err != nil, reply is always false + if err != nil { l.writeErrorResponse(w, err) return } @@ -101,7 +109,11 @@ func (l *lockRESTServer) RLockHandler(w http.ResponseWriter, r *http.Request) { return } - if _, err := l.ll.RLock(getLockArgs(r)); err != nil { + success, err := l.ll.RLock(getLockArgs(r)) + if err == nil && !success { + err = errLockConflict + } + if err != nil { l.writeErrorResponse(w, err) return } @@ -114,7 +126,10 @@ func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request) return } - if _, err := l.ll.RUnlock(getLockArgs(r)); err != nil { + // Ignore the RUnlock() "reply" return value because if err == nil, "reply" is always true. + // Consequently, if err != nil, reply is always false + _, err := l.ll.RUnlock(getLockArgs(r)) + if err != nil { l.writeErrorResponse(w, err) return } @@ -127,6 +142,8 @@ func (l *lockRESTServer) ForceUnlockHandler(w http.ResponseWriter, r *http.Reque return } + // Ignore the ForceUnlock() "reply" return value because if err == nil, "reply" is always true + // Consequently, if err != nil, reply is always false if _, err := l.ll.ForceUnlock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return @@ -142,7 +159,6 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request) lockArgs := getLockArgs(r) - success := true l.ll.mutex.Lock() defer l.ll.mutex.Unlock() // Lock found, proceed to verify if belongs to given uid. @@ -150,15 +166,11 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request) // Check whether uid is still active for _, entry := range lri { if entry.UID == lockArgs.UID { - success = false // When uid found, lock is still active so return not expired. - break + l.writeErrorResponse(w, errLockNotExpired) + return } } } - if !success { - l.writeErrorResponse(w, errors.New("lock already expired")) - return - } } // lockMaintenance loops over locks that have been active for some time and checks back @@ -189,11 +201,15 @@ func (l *lockRESTServer) lockMaintenance(interval time.Duration) { } // Call back to original server verify whether the lock is still active (based on name & uid) - expired, _ := c.Expired(dsync.LockArgs{ + expired, err := c.Expired(dsync.LockArgs{ UID: nlrip.lri.UID, Resource: nlrip.name, }) + if err != nil { + continue + } + // For successful response, verify if lock was indeed active or stale. if expired { // The lock is no longer active at server that originated diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 65f3c8e60..0170cb383 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -67,7 +67,7 @@ func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) myNode = len(clnts) receiver := &lockRESTServer{ - ll: localLocker{ + ll: &localLocker{ serverAddr: endpoint.Host, serviceEndpoint: lockServicePath, lockMap: make(map[string][]lockRequesterInfo), @@ -75,7 +75,7 @@ func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) } globalLockServer = receiver - locker = &(receiver.ll) + locker = receiver.ll } else { host, err := xnet.ParseHost(endpoint.Host) logger.FatalIf(err, "Unable to parse Lock RPC Host")