vendorize new dsync with new changes of logging.

master
Harshavardhana 8 years ago
parent cbe87cb2ed
commit f655592ff5
  1. 118
      vendor/github.com/minio/dsync/drwmutex.go
  2. 6
      vendor/vendor.json

@ -17,27 +17,36 @@
package dsync package dsync
import ( import (
"log"
"math" "math"
"math/rand" "math/rand"
"net" "net"
"os"
"sync" "sync"
"time" "time"
) )
const DRWMutexAcquireTimeout = 25 * time.Millisecond // Indicator if logging is enabled.
var dsyncLog bool
func init() {
// Check for DSYNC_LOG env variable, if set logging will be enabled for failed RPC operations.
dsyncLog = os.Getenv("DSYNC_LOG") == "1"
}
// DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
const DRWMutexAcquireTimeout = 25 * time.Millisecond // 25ms.
// A DRWMutex is a distributed mutual exclusion lock. // A DRWMutex is a distributed mutual exclusion lock.
type DRWMutex struct { type DRWMutex struct {
Name string Name string
locks []bool // Array of nodes that granted a lock locks []bool // Array of nodes that granted a lock
uids []string // Array of uids for verification of sending correct release messages
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
} }
type Granted struct { type Granted struct {
index int index int
locked bool locked bool
uid string
} }
type LockArgs struct { type LockArgs struct {
@ -58,7 +67,6 @@ func NewDRWMutex(name string) *DRWMutex {
return &DRWMutex{ return &DRWMutex{
Name: name, Name: name,
locks: make([]bool, dnodeCount), locks: make([]bool, dnodeCount),
uids: make([]string, dnodeCount),
} }
} }
@ -78,15 +86,13 @@ func (dm *DRWMutex) RLock() {
// create temp arrays on stack // create temp arrays on stack
locks := make([]bool, dnodeCount) locks := make([]bool, dnodeCount)
ids := make([]string, dnodeCount)
// try to acquire the lock // try to acquire the lock
isReadLock := true isReadLock := true
success := lock(clnts, &locks, &ids, dm.Name, isReadLock) success := lock(clnts, &locks, dm.Name, isReadLock)
if success { if success {
// if success, copy array to object // if success, copy array to object
copy(dm.locks, locks[:]) copy(dm.locks, locks[:])
copy(dm.uids, ids[:])
return return
} }
@ -121,15 +127,13 @@ func (dm *DRWMutex) Lock() {
for { for {
// create temp arrays on stack // create temp arrays on stack
locks := make([]bool, dnodeCount) locks := make([]bool, dnodeCount)
ids := make([]string, dnodeCount)
// try to acquire the lock // try to acquire the lock
isReadLock := false isReadLock := false
success := lock(clnts, &locks, &ids, dm.Name, isReadLock) success := lock(clnts, &locks, dm.Name, isReadLock)
if success { if success {
// if success, copy array to object // if success, copy array to object
copy(dm.locks, locks[:]) copy(dm.locks, locks[:])
copy(dm.uids, ids[:])
return return
} }
@ -150,10 +154,10 @@ func (dm *DRWMutex) Lock() {
// lock tries to acquire the distributed lock, returning true or false // lock tries to acquire the distributed lock, returning true or false
// //
func lock(clnts []RPC, locks *[]bool, uids *[]string, lockName string, isReadLock bool) bool { func lock(clnts []RPC, locks *[]bool, lockName string, isReadLock bool) bool {
// Create buffered channel of quorum size // Create buffered channel of quorum size
ch := make(chan Granted, dquorum) ch := make(chan Granted, dnodeCount)
for index, c := range clnts { for index, c := range clnts {
@ -161,51 +165,52 @@ func lock(clnts []RPC, locks *[]bool, uids *[]string, lockName string, isReadLoc
go func(index int, isReadLock bool, c RPC) { go func(index int, isReadLock bool, c RPC) {
// All client methods issuing RPCs are thread-safe and goroutine-safe, // All client methods issuing RPCs are thread-safe and goroutine-safe,
// i.e. it is safe to call them from multiple concurrently running go routines. // i.e. it is safe to call them from multiple concurrently running go routines.
var status bool var locked bool
var err error
if isReadLock { if isReadLock {
err = c.Call("Dsync.RLock", &LockArgs{Name: lockName}, &status) if err := c.Call("Dsync.RLock", &LockArgs{Name: lockName}, &locked); err != nil {
if dsyncLog {
log.Println("Unable to call Dsync.RLock", err)
}
}
} else { } else {
err = c.Call("Dsync.Lock", &LockArgs{Name: lockName}, &status) if err := c.Call("Dsync.Lock", &LockArgs{Name: lockName}, &locked); err != nil {
if dsyncLog {
log.Println("Unable to call Dsync.Lock", err)
}
} }
locked, uid := false, ""
if err == nil {
locked = status
// TODO: Get UIOD again
uid = ""
} }
// silently ignore error, retry later
ch <- Granted{index: index, locked: locked, uid: uid} ch <- Granted{index: index, locked: locked}
}(index, isReadLock, c) }(index, isReadLock, c)
} }
var wg sync.WaitGroup
wg.Add(1)
quorum := false quorum := false
var wg sync.WaitGroup
wg.Add(1)
go func(isReadLock bool) { go func(isReadLock bool) {
// Wait until we have received (minimally) quorum number of responses or timeout // Wait until we have either a) received all lock responses, b) received too many 'non-'locks for quorum to be or c) time out
i := 0 i, locksFailed := 0, 0
done := false done := false
timeout := time.After(DRWMutexAcquireTimeout) timeout := time.After(DRWMutexAcquireTimeout)
for ; i < dnodeCount; i++ { for ; i < dnodeCount; i++ { // Loop until we acquired all locks
select { select {
case grant := <-ch: case grant := <-ch:
if grant.locked { if grant.locked {
// Mark that this node has acquired the lock // Mark that this node has acquired the lock
(*locks)[grant.index] = true (*locks)[grant.index] = true
(*uids)[grant.index] = grant.uid
} else { } else {
locksFailed++
if locksFailed > dnodeCount - dquorum {
// We know that we are not going to get the lock anymore, so exit out
// and release any locks that did get acquired
done = true done = true
//fmt.Println("one lock failed before quorum -- release locks acquired") releaseAll(clnts, locks, lockName, isReadLock)
releaseAll(clnts, locks, uids, lockName, isReadLock) }
} }
case <-timeout: case <-timeout:
@ -213,8 +218,7 @@ func lock(clnts []RPC, locks *[]bool, uids *[]string, lockName string, isReadLoc
// timeout happened, maybe one of the nodes is slow, count // timeout happened, maybe one of the nodes is slow, count
// number of locks to check whether we have quorum or not // number of locks to check whether we have quorum or not
if !quorumMet(locks) { if !quorumMet(locks) {
//fmt.Println("timed out -- release locks acquired") releaseAll(clnts, locks, lockName, isReadLock)
releaseAll(clnts, locks, uids, lockName, isReadLock)
} }
} }
@ -236,7 +240,7 @@ func lock(clnts []RPC, locks *[]bool, uids *[]string, lockName string, isReadLoc
grantToBeReleased := <-ch grantToBeReleased := <-ch
if grantToBeReleased.locked { if grantToBeReleased.locked {
// release lock // release lock
sendRelease(clnts[grantToBeReleased.index], lockName, grantToBeReleased.uid, isReadLock) sendRelease(clnts[grantToBeReleased.index], lockName, isReadLock)
} }
} }
}(isReadLock) }(isReadLock)
@ -260,13 +264,11 @@ func quorumMet(locks *[]bool) bool {
} }
// releaseAll releases all locks that are marked as locked // releaseAll releases all locks that are marked as locked
func releaseAll(clnts []RPC, locks *[]bool, ids *[]string, lockName string, isReadLock bool) { func releaseAll(clnts []RPC, locks *[]bool, lockName string, isReadLock bool) {
for lock := 0; lock < dnodeCount; lock++ { for lock := 0; lock < dnodeCount; lock++ {
if (*locks)[lock] { if (*locks)[lock] {
sendRelease(clnts[lock], lockName, (*ids)[lock], isReadLock) sendRelease(clnts[lock], lockName, isReadLock)
(*locks)[lock] = false (*locks)[lock] = false
(*ids)[lock] = ""
} }
} }
@ -288,7 +290,7 @@ func (dm *DRWMutex) RUnlock() {
if dm.locks[index] { if dm.locks[index] {
// broadcast lock release to all nodes the granted the lock // broadcast lock release to all nodes the granted the lock
isReadLock := true isReadLock := true
sendRelease(c, dm.Name, dm.uids[index], isReadLock) sendRelease(c, dm.Name, isReadLock)
dm.locks[index] = false dm.locks[index] = false
} }
@ -312,7 +314,7 @@ func (dm *DRWMutex) Unlock() {
if dm.locks[index] { if dm.locks[index] {
// broadcast lock release to all nodes the granted the lock // broadcast lock release to all nodes the granted the lock
isReadLock := false isReadLock := false
sendRelease(c, dm.Name, dm.uids[index], isReadLock) sendRelease(c, dm.Name, isReadLock)
dm.locks[index] = false dm.locks[index] = false
} }
@ -320,34 +322,46 @@ func (dm *DRWMutex) Unlock() {
} }
// sendRelease sends a release message to a node that previously granted a lock // sendRelease sends a release message to a node that previously granted a lock
func sendRelease(c RPC, name, uid string, isReadLock bool) { func sendRelease(c RPC, name string, isReadLock bool) {
backOffArray := []time.Duration{30 * time.Second, 1 * time.Minute, 3 * time.Minute, 10 * time.Minute, 30 * time.Minute, 1 * time.Hour} backOffArray := []time.Duration{
30 * time.Second, // 30secs.
1 * time.Minute, // 1min.
3 * time.Minute, // 3min.
10 * time.Minute, // 10min.
30 * time.Minute, // 30min.
1 * time.Hour, // 1hr.
}
go func(c RPC, name, uid string) { go func(c RPC, name string) {
for _, backOff := range backOffArray { for _, backOff := range backOffArray {
// All client methods issuing RPCs are thread-safe and goroutine-safe, // All client methods issuing RPCs are thread-safe and goroutine-safe,
// i.e. it is safe to call them from multiple concurrently running goroutines. // i.e. it is safe to call them from multiple concurrently running goroutines.
var status bool var unlocked bool
var err error
// TODO: Send UID to server
if isReadLock { if isReadLock {
if err = c.Call("Dsync.RUnlock", &LockArgs{Name: name}, &status); err == nil { if err := c.Call("Dsync.RUnlock", &LockArgs{Name: name}, &unlocked); err == nil {
// RUnlock delivered, exit out // RUnlock delivered, exit out
return return
} else if err != nil { } else if err != nil {
if dsyncLog {
log.Println("Unable to call Dsync.RUnlock", err)
}
if nErr, ok := err.(net.Error); ok && nErr.Timeout() { if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
// RUnlock possibly failed with server timestamp mismatch, server may have restarted. // RUnlock possibly failed with server timestamp mismatch, server may have restarted.
return return
} }
} }
} else { } else {
if err = c.Call("Dsync.Unlock", &LockArgs{Name: name}, &status); err == nil { if err := c.Call("Dsync.Unlock", &LockArgs{Name: name}, &unlocked); err == nil {
// Unlock delivered, exit out // Unlock delivered, exit out
return return
} else if err != nil { } else if err != nil {
if dsyncLog {
log.Println("Unable to call Dsync.Unlock", err)
}
if nErr, ok := err.(net.Error); ok && nErr.Timeout() { if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
// Unlock possibly failed with server timestamp mismatch, server may have restarted. // Unlock possibly failed with server timestamp mismatch, server may have restarted.
return return
@ -355,8 +369,8 @@ func sendRelease(c RPC, name, uid string, isReadLock bool) {
} }
} }
// wait // Wait..
time.Sleep(backOff) time.Sleep(backOff)
} }
}(c, name, uid) }(c, name)
} }

@ -98,10 +98,10 @@
"revisionTime": "2015-11-18T20:00:48-08:00" "revisionTime": "2015-11-18T20:00:48-08:00"
}, },
{ {
"checksumSHA1": "OOADbvXPHaDRzp8WEvNw6esmfu0=", "checksumSHA1": "0raNaLP/AhxXhEeB5CdSnbED3O4=",
"path": "github.com/minio/dsync", "path": "github.com/minio/dsync",
"revision": "a2d8949cd8284e6cfa5b2ff9b617e4edb87f513f", "revision": "1f615ccd013d35489becfe710e0ba7dce98b59e5",
"revisionTime": "2016-08-24T09:12:34Z" "revisionTime": "2016-08-29T17:06:27Z"
}, },
{ {
"path": "github.com/minio/go-homedir", "path": "github.com/minio/go-homedir",

Loading…
Cancel
Save