Do not ignore Lock()'s return value (#8142)

master
Krishna Srinivas 5 years ago committed by Harshavardhana
parent 83d4c5763c
commit 2ab0681c0c
  1. 27
      cmd/lock-rest-client.go
  2. 4
      cmd/lock-rest-server-common.go
  3. 2
      cmd/lock-rest-server-common_test.go
  4. 42
      cmd/lock-rest-server.go
  5. 4
      cmd/namespace-lock.go

@ -43,6 +43,20 @@ type lockRESTClient struct {
timer *time.Timer timer *time.Timer
} }
func toLockError(err error) error {
if err == nil {
return nil
}
switch err.Error() {
case errLockConflict.Error():
return errLockConflict
case errLockNotExpired.Error():
return errLockNotExpired
}
return err
}
// ServerAddr - dsync.NetLocker interface compatible method. // ServerAddr - dsync.NetLocker interface compatible method.
func (client *lockRESTClient) ServerAddr() string { func (client *lockRESTClient) ServerAddr() string {
return client.serverURL.Host return client.serverURL.Host
@ -88,7 +102,6 @@ func (client *lockRESTClient) markHostDown() {
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() // permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json // after verifying format.json
func (client *lockRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { func (client *lockRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
if !client.isHostUp() { if !client.isHostUp() {
return nil, errors.New("Lock rest server node is down") return nil, errors.New("Lock rest server node is down")
} }
@ -98,7 +111,6 @@ func (client *lockRESTClient) call(method string, values url.Values, body io.Rea
} }
respBody, err = client.restClient.Call(method, values, body, length) respBody, err = client.restClient.Call(method, values, body, length)
if err == nil { if err == nil {
return respBody, nil return respBody, nil
} }
@ -107,7 +119,7 @@ func (client *lockRESTClient) call(method string, values url.Values, body io.Rea
client.markHostDown() client.markHostDown()
} }
return nil, err return nil, toLockError(err)
} }
// Stringer provides a canonicalized representation of node. // Stringer provides a canonicalized representation of node.
@ -138,7 +150,14 @@ func (client *lockRESTClient) restCall(call string, args dsync.LockArgs) (reply
respBody, err := client.call(call, values, nil, -1) respBody, err := client.call(call, values, nil, -1)
defer http.DrainBody(respBody) defer http.DrainBody(respBody)
return err == nil, err switch err {
case nil:
return true, nil
case errLockConflict, errLockNotExpired:
return false, nil
default:
return false, err
}
} }
// RLock calls read lock REST API. // RLock calls read lock REST API.

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"errors"
"path" "path"
"time" "time"
) )
@ -53,6 +54,9 @@ type nameLockRequesterInfoPair struct {
lri lockRequesterInfo lri lockRequesterInfo
} }
var errLockConflict = errors.New("lock conflict")
var errLockNotExpired = errors.New("lock not expired")
// Similar to removeEntry but only removes an entry only if the lock entry exists in map. // Similar to removeEntry but only removes an entry only if the lock entry exists in map.
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) { func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)

@ -35,7 +35,7 @@ func createLockTestServer(t *testing.T) (string, *lockRESTServer, string) {
} }
locker := &lockRESTServer{ locker := &lockRESTServer{
ll: localLocker{ ll: &localLocker{
mutex: sync.Mutex{}, mutex: sync.Mutex{},
serviceEndpoint: "rpc-path", serviceEndpoint: "rpc-path",
lockMap: make(map[string][]lockRequesterInfo), lockMap: make(map[string][]lockRequesterInfo),

@ -42,7 +42,7 @@ const (
// To abstract a node over network. // To abstract a node over network.
type lockRESTServer struct { type lockRESTServer struct {
ll localLocker ll *localLocker
} }
func (l *lockRESTServer) writeErrorResponse(w http.ResponseWriter, err error) { func (l *lockRESTServer) writeErrorResponse(w http.ResponseWriter, err error) {
@ -62,6 +62,7 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
func getLockArgs(r *http.Request) dsync.LockArgs { func getLockArgs(r *http.Request) dsync.LockArgs {
return dsync.LockArgs{ return dsync.LockArgs{
UID: r.URL.Query().Get(lockRESTUID), UID: r.URL.Query().Get(lockRESTUID),
Source: r.URL.Query().Get(lockRESTSource),
Resource: r.URL.Query().Get(lockRESTResource), Resource: r.URL.Query().Get(lockRESTResource),
ServerAddr: r.URL.Query().Get(lockRESTServerAddr), ServerAddr: r.URL.Query().Get(lockRESTServerAddr),
ServiceEndpoint: r.URL.Query().Get(lockRESTServerEndpoint), ServiceEndpoint: r.URL.Query().Get(lockRESTServerEndpoint),
@ -75,7 +76,11 @@ func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if _, err := l.ll.Lock(getLockArgs(r)); err != nil { success, err := l.ll.Lock(getLockArgs(r))
if err == nil && !success {
err = errLockConflict
}
if err != nil {
l.writeErrorResponse(w, err) l.writeErrorResponse(w, err)
return return
} }
@ -88,7 +93,10 @@ func (l *lockRESTServer) UnlockHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if _, err := l.ll.Unlock(getLockArgs(r)); err != nil { _, err := l.ll.Unlock(getLockArgs(r))
// Ignore the Unlock() "reply" return value because if err == nil, "reply" is always true
// Consequently, if err != nil, reply is always false
if err != nil {
l.writeErrorResponse(w, err) l.writeErrorResponse(w, err)
return return
} }
@ -101,7 +109,11 @@ func (l *lockRESTServer) RLockHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if _, err := l.ll.RLock(getLockArgs(r)); err != nil { success, err := l.ll.RLock(getLockArgs(r))
if err == nil && !success {
err = errLockConflict
}
if err != nil {
l.writeErrorResponse(w, err) l.writeErrorResponse(w, err)
return return
} }
@ -114,7 +126,10 @@ func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request)
return return
} }
if _, err := l.ll.RUnlock(getLockArgs(r)); err != nil { // Ignore the RUnlock() "reply" return value because if err == nil, "reply" is always true.
// Consequently, if err != nil, reply is always false
_, err := l.ll.RUnlock(getLockArgs(r))
if err != nil {
l.writeErrorResponse(w, err) l.writeErrorResponse(w, err)
return return
} }
@ -127,6 +142,8 @@ func (l *lockRESTServer) ForceUnlockHandler(w http.ResponseWriter, r *http.Reque
return return
} }
// Ignore the ForceUnlock() "reply" return value because if err == nil, "reply" is always true
// Consequently, if err != nil, reply is always false
if _, err := l.ll.ForceUnlock(getLockArgs(r)); err != nil { if _, err := l.ll.ForceUnlock(getLockArgs(r)); err != nil {
l.writeErrorResponse(w, err) l.writeErrorResponse(w, err)
return return
@ -142,7 +159,6 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request)
lockArgs := getLockArgs(r) lockArgs := getLockArgs(r)
success := true
l.ll.mutex.Lock() l.ll.mutex.Lock()
defer l.ll.mutex.Unlock() defer l.ll.mutex.Unlock()
// Lock found, proceed to verify if belongs to given uid. // Lock found, proceed to verify if belongs to given uid.
@ -150,15 +166,11 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request)
// Check whether uid is still active // Check whether uid is still active
for _, entry := range lri { for _, entry := range lri {
if entry.UID == lockArgs.UID { if entry.UID == lockArgs.UID {
success = false // When uid found, lock is still active so return not expired. l.writeErrorResponse(w, errLockNotExpired)
break return
} }
} }
} }
if !success {
l.writeErrorResponse(w, errors.New("lock already expired"))
return
}
} }
// lockMaintenance loops over locks that have been active for some time and checks back // lockMaintenance loops over locks that have been active for some time and checks back
@ -189,11 +201,15 @@ func (l *lockRESTServer) lockMaintenance(interval time.Duration) {
} }
// Call back to original server verify whether the lock is still active (based on name & uid) // Call back to original server verify whether the lock is still active (based on name & uid)
expired, _ := c.Expired(dsync.LockArgs{ expired, err := c.Expired(dsync.LockArgs{
UID: nlrip.lri.UID, UID: nlrip.lri.UID,
Resource: nlrip.name, Resource: nlrip.name,
}) })
if err != nil {
continue
}
// For successful response, verify if lock was indeed active or stale. // For successful response, verify if lock was indeed active or stale.
if expired { if expired {
// The lock is no longer active at server that originated // The lock is no longer active at server that originated

@ -67,7 +67,7 @@ func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int)
myNode = len(clnts) myNode = len(clnts)
receiver := &lockRESTServer{ receiver := &lockRESTServer{
ll: localLocker{ ll: &localLocker{
serverAddr: endpoint.Host, serverAddr: endpoint.Host,
serviceEndpoint: lockServicePath, serviceEndpoint: lockServicePath,
lockMap: make(map[string][]lockRequesterInfo), lockMap: make(map[string][]lockRequesterInfo),
@ -75,7 +75,7 @@ func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int)
} }
globalLockServer = receiver globalLockServer = receiver
locker = &(receiver.ll) locker = receiver.ll
} else { } else {
host, err := xnet.ParseHost(endpoint.Host) host, err := xnet.ParseHost(endpoint.Host)
logger.FatalIf(err, "Unable to parse Lock RPC Host") logger.FatalIf(err, "Unable to parse Lock RPC Host")

Loading…
Cancel
Save