|
|
@ -18,18 +18,17 @@ package cmd |
|
|
|
|
|
|
|
|
|
|
|
import ( |
|
|
|
import ( |
|
|
|
"context" |
|
|
|
"context" |
|
|
|
"errors" |
|
|
|
|
|
|
|
pathutil "path" |
|
|
|
pathutil "path" |
|
|
|
"runtime" |
|
|
|
"runtime" |
|
|
|
"sort" |
|
|
|
"sort" |
|
|
|
"strings" |
|
|
|
"strings" |
|
|
|
"sync" |
|
|
|
"sync" |
|
|
|
|
|
|
|
"sync/atomic" |
|
|
|
|
|
|
|
|
|
|
|
"fmt" |
|
|
|
"fmt" |
|
|
|
"time" |
|
|
|
"time" |
|
|
|
|
|
|
|
|
|
|
|
"github.com/minio/lsync" |
|
|
|
"github.com/minio/lsync" |
|
|
|
"github.com/minio/minio/cmd/logger" |
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/dsync" |
|
|
|
"github.com/minio/minio/pkg/dsync" |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
@ -58,8 +57,8 @@ func newNSLock(isDistXL bool) *nsLockMap { |
|
|
|
|
|
|
|
|
|
|
|
// nsLock - provides primitives for locking critical namespace regions.
|
|
|
|
// nsLock - provides primitives for locking critical namespace regions.
|
|
|
|
type nsLock struct { |
|
|
|
type nsLock struct { |
|
|
|
|
|
|
|
ref uint32 |
|
|
|
*lsync.LRWMutex |
|
|
|
*lsync.LRWMutex |
|
|
|
ref uint |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// nsLockMap - namespace lock map, provides primitives to Lock,
|
|
|
|
// nsLockMap - namespace lock map, provides primitives to Lock,
|
|
|
@ -68,7 +67,7 @@ type nsLockMap struct { |
|
|
|
// Indicates if namespace is part of a distributed setup.
|
|
|
|
// Indicates if namespace is part of a distributed setup.
|
|
|
|
isDistXL bool |
|
|
|
isDistXL bool |
|
|
|
lockMap map[string]*nsLock |
|
|
|
lockMap map[string]*nsLock |
|
|
|
lockMapMutex sync.RWMutex |
|
|
|
lockMapMutex sync.Mutex |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Lock the namespace resource.
|
|
|
|
// Lock the namespace resource.
|
|
|
@ -78,17 +77,16 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo |
|
|
|
resource := pathJoin(volume, path) |
|
|
|
resource := pathJoin(volume, path) |
|
|
|
|
|
|
|
|
|
|
|
n.lockMapMutex.Lock() |
|
|
|
n.lockMapMutex.Lock() |
|
|
|
nsLk, found := n.lockMap[resource] |
|
|
|
if _, found := n.lockMap[resource]; !found { |
|
|
|
if !found { |
|
|
|
n.lockMap[resource] = &nsLock{ |
|
|
|
nsLk = &nsLock{ |
|
|
|
|
|
|
|
LRWMutex: lsync.NewLRWMutex(ctx), |
|
|
|
LRWMutex: lsync.NewLRWMutex(ctx), |
|
|
|
ref: 1, |
|
|
|
ref: 1, |
|
|
|
} |
|
|
|
} |
|
|
|
n.lockMap[resource] = nsLk |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// Update ref count here to avoid multiple races.
|
|
|
|
// Update ref count here to avoid multiple races.
|
|
|
|
nsLk.ref++ |
|
|
|
atomic.AddUint32(&n.lockMap[resource].ref, 1) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
nsLk = n.lockMap[resource] |
|
|
|
n.lockMapMutex.Unlock() |
|
|
|
n.lockMapMutex.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
// Locking here will block (until timeout).
|
|
|
|
// Locking here will block (until timeout).
|
|
|
@ -101,43 +99,35 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo |
|
|
|
if !locked { // We failed to get the lock
|
|
|
|
if !locked { // We failed to get the lock
|
|
|
|
|
|
|
|
|
|
|
|
// Decrement ref count since we failed to get the lock
|
|
|
|
// Decrement ref count since we failed to get the lock
|
|
|
|
n.lockMapMutex.Lock() |
|
|
|
if atomic.AddUint32(&nsLk.ref, ^uint32(0)) == 0 { |
|
|
|
nsLk.ref-- |
|
|
|
|
|
|
|
if nsLk.ref == 0 { |
|
|
|
|
|
|
|
// Remove from the map if there are no more references.
|
|
|
|
// Remove from the map if there are no more references.
|
|
|
|
|
|
|
|
n.lockMapMutex.Lock() |
|
|
|
delete(n.lockMap, resource) |
|
|
|
delete(n.lockMap, resource) |
|
|
|
} |
|
|
|
|
|
|
|
n.lockMapMutex.Unlock() |
|
|
|
n.lockMapMutex.Unlock() |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Unlock the namespace resource.
|
|
|
|
// Unlock the namespace resource.
|
|
|
|
func (n *nsLockMap) unlock(volume string, path string, readLock bool) { |
|
|
|
func (n *nsLockMap) unlock(volume string, path string, readLock bool) { |
|
|
|
resource := pathJoin(volume, path) |
|
|
|
resource := pathJoin(volume, path) |
|
|
|
n.lockMapMutex.RLock() |
|
|
|
|
|
|
|
nsLk, found := n.lockMap[resource] |
|
|
|
n.lockMapMutex.Lock() |
|
|
|
n.lockMapMutex.RUnlock() |
|
|
|
defer n.lockMapMutex.Unlock() |
|
|
|
if !found { |
|
|
|
if _, found := n.lockMap[resource]; !found { |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
if readLock { |
|
|
|
if readLock { |
|
|
|
nsLk.RUnlock() |
|
|
|
n.lockMap[resource].RUnlock() |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
nsLk.Unlock() |
|
|
|
n.lockMap[resource].Unlock() |
|
|
|
} |
|
|
|
} |
|
|
|
n.lockMapMutex.Lock() |
|
|
|
if atomic.AddUint32(&n.lockMap[resource].ref, ^uint32(0)) == 0 { |
|
|
|
if nsLk.ref == 0 { |
|
|
|
|
|
|
|
logger.LogIf(GlobalContext, errors.New("Namespace reference count cannot be 0")) |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
nsLk.ref-- |
|
|
|
|
|
|
|
if nsLk.ref == 0 { |
|
|
|
|
|
|
|
// Remove from the map if there are no more references.
|
|
|
|
// Remove from the map if there are no more references.
|
|
|
|
delete(n.lockMap, resource) |
|
|
|
delete(n.lockMap, resource) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
n.lockMapMutex.Unlock() |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// dsync's distributed lock instance.
|
|
|
|
// dsync's distributed lock instance.
|
|
|
|
type distLockInstance struct { |
|
|
|
type distLockInstance struct { |
|
|
@ -147,7 +137,7 @@ type distLockInstance struct { |
|
|
|
|
|
|
|
|
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
|
|
|
func (di *distLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
func (di *distLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
lockSource := getSource() |
|
|
|
lockSource := getSource(2) |
|
|
|
start := UTCNow() |
|
|
|
start := UTCNow() |
|
|
|
|
|
|
|
|
|
|
|
if !di.rwMutex.GetLock(di.opsID, lockSource, timeout.Timeout()) { |
|
|
|
if !di.rwMutex.GetLock(di.opsID, lockSource, timeout.Timeout()) { |
|
|
@ -165,7 +155,7 @@ func (di *distLockInstance) Unlock() { |
|
|
|
|
|
|
|
|
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
|
|
|
func (di *distLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
func (di *distLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
lockSource := getSource() |
|
|
|
lockSource := getSource(2) |
|
|
|
start := UTCNow() |
|
|
|
start := UTCNow() |
|
|
|
if !di.rwMutex.GetRLock(di.opsID, lockSource, timeout.Timeout()) { |
|
|
|
if !di.rwMutex.GetRLock(di.opsID, lockSource, timeout.Timeout()) { |
|
|
|
timeout.LogFailure() |
|
|
|
timeout.LogFailure() |
|
|
@ -206,7 +196,7 @@ func (n *nsLockMap) NewNSLock(ctx context.Context, lockersFn func() []dsync.NetL |
|
|
|
|
|
|
|
|
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
|
|
|
func (li *localLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
func (li *localLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
lockSource := getSource() |
|
|
|
lockSource := getSource(2) |
|
|
|
start := UTCNow() |
|
|
|
start := UTCNow() |
|
|
|
readLock := false |
|
|
|
readLock := false |
|
|
|
var success []int |
|
|
|
var success []int |
|
|
@ -234,7 +224,7 @@ func (li *localLockInstance) Unlock() { |
|
|
|
|
|
|
|
|
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
|
|
|
func (li *localLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
func (li *localLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) { |
|
|
|
lockSource := getSource() |
|
|
|
lockSource := getSource(2) |
|
|
|
start := UTCNow() |
|
|
|
start := UTCNow() |
|
|
|
readLock := true |
|
|
|
readLock := true |
|
|
|
var success []int |
|
|
|
var success []int |
|
|
@ -260,9 +250,9 @@ func (li *localLockInstance) RUnlock() { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func getSource() string { |
|
|
|
func getSource(n int) string { |
|
|
|
var funcName string |
|
|
|
var funcName string |
|
|
|
pc, filename, lineNum, ok := runtime.Caller(2) |
|
|
|
pc, filename, lineNum, ok := runtime.Caller(n) |
|
|
|
if ok { |
|
|
|
if ok { |
|
|
|
filename = pathutil.Base(filename) |
|
|
|
filename = pathutil.Base(filename) |
|
|
|
funcName = strings.TrimPrefix(runtime.FuncForPC(pc).Name(), |
|
|
|
funcName = strings.TrimPrefix(runtime.FuncForPC(pc).Name(), |
|
|
|