instrumentation: instrumentation for locks. (#2584)
- Instrumentation for locks. - Detailed test coverage. - Adding RPC control handler to fetch lock instrumentation. - RPC control handlers suite tests with a test RPC server.master
parent
de67bca211
commit
07d232c7b4
@ -0,0 +1,139 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"net/url" |
||||
"path" |
||||
"time" |
||||
|
||||
"github.com/minio/cli" |
||||
) |
||||
|
||||
// SystemLockState - Structure to fill the lock state of entire object storage.
|
||||
// That is the total locks held, total calls blocked on locks and state of all the locks for the entire system.
|
||||
type SystemLockState struct { |
||||
TotalLocks int64 `json:"totalLocks"` |
||||
TotalBlockedLocks int64 `json:"totalBlockedLocks"` // count of operations which are blocked waiting for the lock to be released.
|
||||
TotalAcquiredLocks int64 `json:"totalAcquiredLocks"` // count of operations which has successfully acquired the lock but hasn't unlocked yet( operation in progress).
|
||||
LocksInfoPerObject []VolumeLockInfo `json:"locksInfoPerObject"` |
||||
} |
||||
|
||||
// VolumeLockInfo - Structure to contain the lock state info for volume, path pair.
|
||||
type VolumeLockInfo struct { |
||||
Bucket string `json:"bucket"` |
||||
Object string `json:"object"` |
||||
LocksOnObject int64 `json:"locksOnObject"` // All locks blocked + running for given <volume,path> pair.
|
||||
LocksAcquiredOnObject int64 `json:"locksAcquiredOnObject"` // count of operations which has successfully acquired the lock but hasn't unlocked yet( operation in progress).
|
||||
TotalBlockedLocks int64 `json:"locksBlockedOnObject"` // count of operations which are blocked waiting for the lock to be released.
|
||||
LockDetailsOnObject []OpsLockState `json:"lockDetailsOnObject"` // state information containing state of the locks for all operations on given <volume,path> pair.
|
||||
} |
||||
|
||||
// OpsLockState - structure to fill in state information of the lock.
|
||||
// structure to fill in status information for each operation with given operation ID.
|
||||
type OpsLockState struct { |
||||
OperationID string `json:"opsID"` // string containing operation ID.
|
||||
LockOrigin string `json:"lockOrigin"` // contant which mentions the operation type (Get Obejct, PutObject...)
|
||||
LockType string `json:"lockType"` |
||||
Status string `json:"status"` // status can be running/ready/blocked.
|
||||
StatusSince string `json:"statusSince"` // time info of the since how long the status holds true, value in seconds.
|
||||
} |
||||
|
||||
// Read entire state of the locks in the system and return.
|
||||
func generateSystemLockResponse() (SystemLockState, error) { |
||||
nsMutex.lockMapMutex.Lock() |
||||
defer nsMutex.lockMapMutex.Unlock() |
||||
|
||||
if nsMutex.debugLockMap == nil { |
||||
return SystemLockState{}, LockInfoNil{} |
||||
} |
||||
|
||||
lockState := SystemLockState{} |
||||
|
||||
lockState.TotalBlockedLocks = nsMutex.blockedCounter |
||||
lockState.TotalLocks = nsMutex.globalLockCounter |
||||
lockState.TotalAcquiredLocks = nsMutex.runningLockCounter |
||||
|
||||
for param := range nsMutex.debugLockMap { |
||||
volLockInfo := VolumeLockInfo{} |
||||
volLockInfo.Bucket = param.volume |
||||
volLockInfo.Object = param.path |
||||
volLockInfo.TotalBlockedLocks = nsMutex.debugLockMap[param].blocked |
||||
volLockInfo.LocksAcquiredOnObject = nsMutex.debugLockMap[param].running |
||||
volLockInfo.LocksOnObject = nsMutex.debugLockMap[param].ref |
||||
for opsID := range nsMutex.debugLockMap[param].lockInfo { |
||||
opsState := OpsLockState{} |
||||
opsState.OperationID = opsID |
||||
opsState.LockOrigin = nsMutex.debugLockMap[param].lockInfo[opsID].lockOrigin |
||||
opsState.LockType = nsMutex.debugLockMap[param].lockInfo[opsID].lockType |
||||
opsState.Status = nsMutex.debugLockMap[param].lockInfo[opsID].status |
||||
opsState.StatusSince = time.Now().Sub(nsMutex.debugLockMap[param].lockInfo[opsID].since).String() |
||||
|
||||
volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, opsState) |
||||
} |
||||
lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo) |
||||
} |
||||
|
||||
return lockState, nil |
||||
} |
||||
|
||||
var lockCmd = cli.Command{ |
||||
Name: "lock", |
||||
Usage: "info about the locks in the node.", |
||||
Action: lockControl, |
||||
CustomHelpTemplate: `NAME: |
||||
minio control {{.Name}} - {{.Usage}} |
||||
|
||||
USAGE: |
||||
minio control {{.Name}} http://localhost:9000/
|
||||
|
||||
EAMPLES: |
||||
1. Get all the info about the blocked/held locks in the node: |
||||
$ minio control lock http://localhost:9000/
|
||||
`, |
||||
} |
||||
|
||||
// "minio control lock" entry point.
|
||||
func lockControl(c *cli.Context) { |
||||
if len(c.Args()) != 1 { |
||||
cli.ShowCommandHelpAndExit(c, "lock", 1) |
||||
} |
||||
|
||||
parsedURL, err := url.Parse(c.Args()[0]) |
||||
fatalIf(err, "Unable to parse URL.") |
||||
|
||||
authCfg := &authConfig{ |
||||
accessKey: serverConfig.GetCredential().AccessKeyID, |
||||
secretKey: serverConfig.GetCredential().SecretAccessKey, |
||||
address: parsedURL.Host, |
||||
path: path.Join(reservedBucket, controlPath), |
||||
loginMethod: "Controller.LoginHandler", |
||||
} |
||||
client := newAuthClient(authCfg) |
||||
|
||||
args := &GenericArgs{} |
||||
reply := &SystemLockState{} |
||||
err = client.Call("Control.LockInfo", args, reply) |
||||
// logs the error and returns if err != nil.
|
||||
fatalIf(err, "RPC Control.LockInfo call failed") |
||||
// print the lock info on the console.
|
||||
b, err := json.MarshalIndent(*reply, "", " ") |
||||
fatalIf(err, "Failed to parse the RPC lock info response") |
||||
fmt.Print(string(b)) |
||||
} |
@ -1,63 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
// "net/rpc"
|
||||
"testing" |
||||
) |
||||
|
||||
// Wrapper for calling heal disk metadata rpc Handler
|
||||
func TestControllerHandlerHealDiskMetadata(t *testing.T) { |
||||
ExecObjectLayerTest(t, testHealDiskMetadataControllerHandler) |
||||
} |
||||
|
||||
// testHealDiskMetadataControllerHandler - Test Heal Disk Metadata handler
|
||||
func testHealDiskMetadataControllerHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { |
||||
// Register the API end points with XL/FS object layer.
|
||||
serverAddress, random, err := initTestControllerRPCEndPoint(obj) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
rootPath, err := newTestConfig("us-east-1") |
||||
if err != nil { |
||||
t.Fatalf("Init Test config failed") |
||||
} |
||||
// remove the root folder after the test ends.
|
||||
defer removeAll(rootPath) |
||||
|
||||
authCfg := &authConfig{ |
||||
accessKey: serverConfig.GetCredential().AccessKeyID, |
||||
secretKey: serverConfig.GetCredential().SecretAccessKey, |
||||
address: serverAddress, |
||||
path: "/controller" + random, |
||||
loginMethod: "Controller.LoginHandler", |
||||
} |
||||
client := newAuthClient(authCfg) |
||||
|
||||
args := &GenericArgs{} |
||||
reply := &GenericReply{} |
||||
err = client.Call("Controller.HealDiskMetadataHandler", args, reply) |
||||
if instanceType == "FS" && err == nil { |
||||
t.Errorf("Test should fail with FS") |
||||
} |
||||
if instanceType == "XL" && err != nil { |
||||
t.Errorf("Test should succeed with XL %s", err.Error()) |
||||
} |
||||
} |
@ -0,0 +1,298 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"path" |
||||
"strconv" |
||||
"sync" |
||||
"time" |
||||
|
||||
. "gopkg.in/check.v1" |
||||
) |
||||
|
||||
// API suite container common to both FS and XL.
|
||||
type TestRPCControllerSuite struct { |
||||
serverType string |
||||
testServer TestServer |
||||
endPoint string |
||||
accessKey string |
||||
secretKey string |
||||
} |
||||
|
||||
// Init and run test on XL backend.
|
||||
var _ = Suite(&TestRPCControllerSuite{serverType: "XL"}) |
||||
|
||||
// Setting up the test suite.
|
||||
// Starting the Test server with temporary FS backend.
|
||||
func (s *TestRPCControllerSuite) SetUpSuite(c *C) { |
||||
s.testServer = StartTestRPCServer(c, s.serverType) |
||||
s.endPoint = s.testServer.Server.Listener.Addr().String() |
||||
s.accessKey = s.testServer.AccessKey |
||||
s.secretKey = s.testServer.SecretKey |
||||
} |
||||
|
||||
// Called implicitly by "gopkg.in/check.v1" after all tests are run.
|
||||
func (s *TestRPCControllerSuite) TearDownSuite(c *C) { |
||||
s.testServer.Stop() |
||||
} |
||||
|
||||
// Tests to validate the correctness of lock instrumentation control RPC end point.
|
||||
func (s *TestRPCControllerSuite) TestRPCControlLock(c *C) { |
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true |
||||
// initializing the locks.
|
||||
initNSLock(false) |
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
defer func() { |
||||
globalDebugLock = false |
||||
nsMutex.debugLockMap = nil |
||||
}() |
||||
|
||||
expectedResult := []lockStateCase{ |
||||
// Test case - 1.
|
||||
// Case where 10 read locks are held.
|
||||
// Entry for any of the 10 reads locks has to be found.
|
||||
// Since they held in a loop, Lock origin for first 10 read locks (opsID 0-9) should be the same.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
opsID: "0", |
||||
readLock: true, |
||||
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]", |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Running", |
||||
|
||||
expectedGlobalLockCount: 10, |
||||
expectedRunningLockCount: 10, |
||||
expectedBlockedLockCount: 0, |
||||
|
||||
expectedVolPathLockCount: 10, |
||||
expectedVolPathRunningCount: 10, |
||||
expectedVolPathBlockCount: 0, |
||||
}, |
||||
// Test case 2.
|
||||
// Testing the existance of entry for the last read lock (read lock with opsID "9").
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
opsID: "9", |
||||
readLock: true, |
||||
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]", |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Running", |
||||
|
||||
expectedGlobalLockCount: 10, |
||||
expectedRunningLockCount: 10, |
||||
expectedBlockedLockCount: 0, |
||||
|
||||
expectedVolPathLockCount: 10, |
||||
expectedVolPathRunningCount: 10, |
||||
expectedVolPathBlockCount: 0, |
||||
}, |
||||
|
||||
// Test case 3.
|
||||
// Hold a write lock, and it should block since 10 read locks
|
||||
// on <"my-bucket", "my-object"> are still held.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
opsID: "10", |
||||
readLock: false, |
||||
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]", |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Blocked", |
||||
|
||||
expectedGlobalLockCount: 11, |
||||
expectedRunningLockCount: 10, |
||||
expectedBlockedLockCount: 1, |
||||
|
||||
expectedVolPathLockCount: 11, |
||||
expectedVolPathRunningCount: 10, |
||||
expectedVolPathBlockCount: 1, |
||||
}, |
||||
|
||||
// Test case 4.
|
||||
// Expected result when all the read locks are released and the blocked write lock acquires the lock.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
opsID: "10", |
||||
readLock: false, |
||||
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]", |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Running", |
||||
|
||||
expectedGlobalLockCount: 1, |
||||
expectedRunningLockCount: 1, |
||||
expectedBlockedLockCount: 0, |
||||
|
||||
expectedVolPathLockCount: 1, |
||||
expectedVolPathRunningCount: 1, |
||||
expectedVolPathBlockCount: 0, |
||||
}, |
||||
// Test case - 5.
|
||||
// At the end after locks are released, its verified whether the counters are set to 0.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Blocked", |
||||
|
||||
expectedGlobalLockCount: 0, |
||||
expectedRunningLockCount: 0, |
||||
expectedBlockedLockCount: 0, |
||||
}, |
||||
} |
||||
|
||||
// used to make sure that the tests don't end till locks held in other go routines are released.
|
||||
var wg sync.WaitGroup |
||||
|
||||
// Hold 5 read locks. We should find the info about these in the RPC response.
|
||||
|
||||
// hold 10 read locks.
|
||||
// Then call the RPC control end point for obtaining lock instrumentation info.
|
||||
|
||||
for i := 0; i < 10; i++ { |
||||
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i)) |
||||
} |
||||
|
||||
authCfg := &authConfig{ |
||||
accessKey: s.accessKey, |
||||
secretKey: s.secretKey, |
||||
address: s.endPoint, |
||||
path: path.Join(reservedBucket, controlPath), |
||||
loginMethod: "Controller.LoginHandler", |
||||
} |
||||
|
||||
client := newAuthClient(authCfg) |
||||
|
||||
defer client.Close() |
||||
|
||||
args := &GenericArgs{} |
||||
reply := &SystemLockState{} |
||||
// Call the lock instrumentation RPC end point.
|
||||
err := client.Call("Controller.LockInfo", args, reply) |
||||
if err != nil { |
||||
c.Errorf("Add: expected no error but got string %q", err.Error()) |
||||
} |
||||
// expected lock info.
|
||||
expectedLockStats := expectedResult[0] |
||||
// verify the actual lock info with the expected one.
|
||||
// verify the existance entry for first read lock (read lock with opsID "0").
|
||||
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 1) |
||||
expectedLockStats = expectedResult[1] |
||||
// verify the actual lock info with the expected one.
|
||||
// verify the existance entry for last read lock (read lock with opsID "9").
|
||||
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 2) |
||||
|
||||
// now hold a write lock in a different go routine and it should block since 10 read locks are
|
||||
// still held.
|
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
// blocks till all read locks are released.
|
||||
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(10)) |
||||
// Once the above attempt to lock is unblocked/acquired, we verify the stats and release the lock.
|
||||
expectedWLockStats := expectedResult[3] |
||||
// Since the write lock acquired here, the number of blocked locks should reduce by 1 and
|
||||
// count of running locks should increase by 1.
|
||||
|
||||
// Call the RPC control handle to fetch the lock instrumentation info.
|
||||
reply = &SystemLockState{} |
||||
// Call the lock instrumentation RPC end point.
|
||||
err = client.Call("Controller.LockInfo", args, reply) |
||||
if err != nil { |
||||
c.Errorf("Add: expected no error but got string %q", err.Error()) |
||||
} |
||||
verifyRPCLockInfoResponse(expectedWLockStats, *reply, c, 4) |
||||
|
||||
// release the write lock.
|
||||
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10)) |
||||
|
||||
}() |
||||
// waiting for a second so that the attempt to acquire the write lock in
|
||||
// the above go routines gets blocked.
|
||||
time.Sleep(1 * time.Second) |
||||
// The write lock should have got blocked by now,
|
||||
// check whether the entry for one blocked lock exists.
|
||||
expectedLockStats = expectedResult[2] |
||||
|
||||
// Call the RPC control handle to fetch the lock instrumentation info.
|
||||
reply = &SystemLockState{} |
||||
// Call the lock instrumentation RPC end point.
|
||||
err = client.Call("Controller.LockInfo", args, reply) |
||||
if err != nil { |
||||
c.Errorf("Add: expected no error but got string %q", err.Error()) |
||||
} |
||||
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 3) |
||||
// Release all the read locks held.
|
||||
// the blocked write lock in the above go routines should get unblocked.
|
||||
for i := 0; i < 10; i++ { |
||||
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i)) |
||||
} |
||||
wg.Wait() |
||||
// Since all the locks are released. There shouldnt be any entry in the lock info.
|
||||
// and all the counters should be set to 0.
|
||||
reply = &SystemLockState{} |
||||
// Call the lock instrumentation RPC end point.
|
||||
err = client.Call("Controller.LockInfo", args, reply) |
||||
if err != nil { |
||||
c.Errorf("Add: expected no error but got string %q", err.Error()) |
||||
} |
||||
|
||||
if reply.TotalAcquiredLocks != 0 && reply.TotalLocks != 0 && reply.TotalBlockedLocks != 0 { |
||||
c.Fatalf("The counters are not reset properly after all locks are released") |
||||
} |
||||
if len(reply.LocksInfoPerObject) != 0 { |
||||
c.Fatalf("Since all locks are released there shouldn't have been any lock info entry, but found %d", len(reply.LocksInfoPerObject)) |
||||
} |
||||
} |
||||
|
||||
// TestControllerHandlerHealDiskMetadata - Registers and call the `HealDiskMetadataHandler`,
|
||||
// asserts to validate the success.
|
||||
func (s *TestRPCControllerSuite) TestControllerHandlerHealDiskMetadata(c *C) { |
||||
// The suite has already started the test RPC server, just send RPC calls.
|
||||
authCfg := &authConfig{ |
||||
accessKey: s.accessKey, |
||||
secretKey: s.secretKey, |
||||
address: s.endPoint, |
||||
path: path.Join(reservedBucket, controlPath), |
||||
loginMethod: "Controller.LoginHandler", |
||||
} |
||||
|
||||
client := newAuthClient(authCfg) |
||||
defer client.Close() |
||||
|
||||
args := &GenericArgs{} |
||||
reply := &GenericReply{} |
||||
err := client.Call("Controller.HealDiskMetadataHandler", args, reply) |
||||
|
||||
if err != nil { |
||||
c.Errorf("Heal Meta Disk Handler test failed with <ERROR> %s", err.Error()) |
||||
} |
||||
} |
@ -0,0 +1,283 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
debugRLockStr = "RLock" |
||||
debugWLockStr = "WLock" |
||||
) |
||||
|
||||
// struct containing information of status (ready/running/blocked) of an operation with given operation ID.
|
||||
type debugLockInfo struct { |
||||
lockType string // "Rlock" or "WLock".
|
||||
lockOrigin string // contains the trace of the function which invoked the lock, obtained from runtime.
|
||||
status string // status can be running/ready/blocked.
|
||||
since time.Time // time info of the since how long the status holds true.
|
||||
} |
||||
|
||||
// debugLockInfo - container for storing locking information for unique copy (volume,path) pair.
|
||||
// ref variable holds the reference count for locks held for.
|
||||
// `ref` values helps us understand the n locks held for given <volume, path> pair.
|
||||
// `running` value helps us understand the total successful locks held (not blocked) for given <volume, path> pair and the operation is under execution.
|
||||
// `blocked` value helps us understand the total number of operations blocked waiting on locks for given <volume,path> pair.
|
||||
type debugLockInfoPerVolumePath struct { |
||||
ref int64 // running + blocked operations.
|
||||
running int64 // count of successful lock acquire and running operations.
|
||||
blocked int64 // count of number of operations blocked waiting on lock.
|
||||
lockInfo (map[string]debugLockInfo) // map of [operationID] debugLockInfo{operation, status, since} .
|
||||
} |
||||
|
||||
// returns an instance of debugLockInfo.
|
||||
// need to create this for every unique pair of {volume,path}.
|
||||
// total locks, number of calls blocked on locks, and number of successful locks held but not unlocked yet.
|
||||
func newDebugLockInfoPerVolumePath() *debugLockInfoPerVolumePath { |
||||
return &debugLockInfoPerVolumePath{ |
||||
lockInfo: make(map[string]debugLockInfo), |
||||
ref: 0, |
||||
blocked: 0, |
||||
running: 0, |
||||
} |
||||
} |
||||
|
||||
// LockInfoNil - Returned if the lock info map is not initialized.
|
||||
type LockInfoNil struct { |
||||
} |
||||
|
||||
func (l LockInfoNil) Error() string { |
||||
return fmt.Sprintf("Debug Lock Map not initialized:\n1. Enable Lock Debugging using right ENV settings \n2. Make sure initNSLock() is called.") |
||||
} |
||||
|
||||
// LockInfoOriginNotFound - While changing the state of the lock info its important that the entry for
|
||||
// lock at a given origin exists, if not `LockInfoOriginNotFound` is returned.
|
||||
type LockInfoOriginNotFound struct { |
||||
volume string |
||||
path string |
||||
operationID string |
||||
lockOrigin string |
||||
} |
||||
|
||||
func (l LockInfoOriginNotFound) Error() string { |
||||
return fmt.Sprintf("No lock state stored for the lock origined at \"%s\", for <volume> %s, <path> %s, <operationID> %s.", |
||||
l.lockOrigin, l.volume, l.path, l.operationID) |
||||
} |
||||
|
||||
// LockInfoVolPathMssing - Error interface. Returned when the info the
|
||||
type LockInfoVolPathMssing struct { |
||||
volume string |
||||
path string |
||||
} |
||||
|
||||
func (l LockInfoVolPathMssing) Error() string { |
||||
return fmt.Sprintf("No entry in debug Lock Map for Volume: %s, path: %s.", l.volume, l.path) |
||||
} |
||||
|
||||
// LockInfoOpsIDNotFound - Returned when the lock state info exists, but the entry for
|
||||
// given operation ID doesn't exist.
|
||||
type LockInfoOpsIDNotFound struct { |
||||
volume string |
||||
path string |
||||
operationID string |
||||
} |
||||
|
||||
func (l LockInfoOpsIDNotFound) Error() string { |
||||
return fmt.Sprintf("No entry in lock info for <Operation ID> %s, <volume> %s, <path> %s.", l.operationID, l.volume, l.path) |
||||
} |
||||
|
||||
// LockInfoStateNotBlocked - When an attempt to change the state of the lock form `blocked` to `running` is done,
|
||||
// its necessary that the state before the transsition is "blocked", otherwise LockInfoStateNotBlocked returned.
|
||||
type LockInfoStateNotBlocked struct { |
||||
volume string |
||||
path string |
||||
operationID string |
||||
} |
||||
|
||||
func (l LockInfoStateNotBlocked) Error() string { |
||||
return fmt.Sprintf("Lock state should be \"Blocked\" for <volume> %s, <path> %s, <operationID> %s.", l.volume, l.path, l.operationID) |
||||
} |
||||
|
||||
// change the state of the lock from Blocked to Running.
|
||||
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationID string, readLock bool) error { |
||||
// This operation is not executed under the scope nsLockMap.mutex.Lock(), lock has to be explicitly held here.
|
||||
n.lockMapMutex.Lock() |
||||
defer n.lockMapMutex.Unlock() |
||||
if n.debugLockMap == nil { |
||||
return LockInfoNil{} |
||||
} |
||||
// new state info to be set for the lock.
|
||||
newLockInfo := debugLockInfo{ |
||||
lockOrigin: lockOrigin, |
||||
status: "Running", |
||||
since: time.Now().UTC(), |
||||
} |
||||
|
||||
// set lock type.
|
||||
if readLock { |
||||
newLockInfo.lockType = debugRLockStr |
||||
} else { |
||||
newLockInfo.lockType = debugWLockStr |
||||
} |
||||
|
||||
// check whether the lock info entry for <volume, path> pair already exists and its not `nil`.
|
||||
if debugLockMap, ok := n.debugLockMap[param]; ok { |
||||
// ``*debugLockInfoPerVolumePath` entry containing lock info for `param <volume, path>` is `nil`.
|
||||
if debugLockMap == nil { |
||||
return LockInfoNil{} |
||||
} |
||||
} else { |
||||
// The lock state info foe given <volume, path> pair should already exist.
|
||||
// If not return `LockInfoVolPathMssing`.
|
||||
return LockInfoVolPathMssing{param.volume, param.path} |
||||
} |
||||
|
||||
// Lock info the for the given operation ID shouldn't be `nil`.
|
||||
if n.debugLockMap[param].lockInfo == nil { |
||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID} |
||||
} |
||||
|
||||
if lockInfo, ok := n.debugLockMap[param].lockInfo[operationID]; ok { |
||||
// The entry for the lock origined at `lockOrigin` should already exist.
|
||||
// If not return `LockInfoOriginNotFound`.
|
||||
if lockInfo.lockOrigin != lockOrigin { |
||||
return LockInfoOriginNotFound{param.volume, param.path, operationID, lockOrigin} |
||||
} |
||||
// Status of the lock should already be set to "Blocked".
|
||||
// If not return `LockInfoStateNotBlocked`.
|
||||
if lockInfo.status != "Blocked" { |
||||
return LockInfoStateNotBlocked{param.volume, param.path, operationID} |
||||
} |
||||
} else { |
||||
// The lock info entry for given `opsID` should already exist for given <volume, path> pair.
|
||||
// If not return `LockInfoOpsIDNotFound`.
|
||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID} |
||||
} |
||||
|
||||
// All checks finished.
|
||||
// changing the status of the operation from blocked to running and updating the time.
|
||||
n.debugLockMap[param].lockInfo[operationID] = newLockInfo |
||||
|
||||
// After locking unblocks decrease the blocked counter.
|
||||
n.blockedCounter-- |
||||
// Increase the running counter.
|
||||
n.runningLockCounter++ |
||||
n.debugLockMap[param].blocked-- |
||||
n.debugLockMap[param].running++ |
||||
return nil |
||||
} |
||||
|
||||
// change the state of the lock from Ready to Blocked.
|
||||
func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID string, readLock bool) error { |
||||
if n.debugLockMap == nil { |
||||
return LockInfoNil{} |
||||
} |
||||
|
||||
newLockInfo := debugLockInfo{ |
||||
lockOrigin: lockOrigin, |
||||
status: "Blocked", |
||||
since: time.Now().UTC(), |
||||
} |
||||
if readLock { |
||||
newLockInfo.lockType = debugRLockStr |
||||
} else { |
||||
newLockInfo.lockType = debugWLockStr |
||||
} |
||||
|
||||
if lockInfo, ok := n.debugLockMap[param]; ok { |
||||
if lockInfo == nil { |
||||
// *debugLockInfoPerVolumePath entry is nil, initialize here to avoid any case of `nil` pointer access.
|
||||
n.initLockInfoForVolumePath(param) |
||||
} |
||||
} else { |
||||
// State info entry for the given <volume, pair> doesn't exist, initializing it.
|
||||
n.initLockInfoForVolumePath(param) |
||||
} |
||||
|
||||
// lockInfo is a map[string]debugLockInfo, which holds map[OperationID]{status,time, origin} of the lock.
|
||||
if n.debugLockMap[param].lockInfo == nil { |
||||
n.debugLockMap[param].lockInfo = make(map[string]debugLockInfo) |
||||
} |
||||
// The status of the operation with the given operation ID is marked blocked till its gets unblocked from the lock.
|
||||
n.debugLockMap[param].lockInfo[operationID] = newLockInfo |
||||
// Increment the Global lock counter.
|
||||
n.globalLockCounter++ |
||||
// Increment the counter for number of blocked opertions, decrement it after the locking unblocks.
|
||||
n.blockedCounter++ |
||||
// increment the reference of the lock for the given <volume,path> pair.
|
||||
n.debugLockMap[param].ref++ |
||||
// increment the blocked counter for the given <volume, path> pair.
|
||||
n.debugLockMap[param].blocked++ |
||||
return nil |
||||
} |
||||
|
||||
// deleteLockInfoEntry - Deletes the lock state information for given <volume, path> pair. Called when nsLk.ref count is 0.
|
||||
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error { |
||||
if n.debugLockMap == nil { |
||||
return LockInfoNil{} |
||||
} |
||||
// delete the lock info for the given operation.
|
||||
if _, found := n.debugLockMap[param]; found { |
||||
// Remove from the map if there are no more references for the given (volume,path) pair.
|
||||
delete(n.debugLockMap, param) |
||||
} else { |
||||
return LockInfoVolPathMssing{param.volume, param.path} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// deleteLockInfoEntry - Deletes the entry for given opsID in the lock state information of given <volume, path> pair.
|
||||
// called when the nsLk ref count for the given <volume, path> pair is not 0.
|
||||
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, operationID string) error { |
||||
if n.debugLockMap == nil { |
||||
return LockInfoNil{} |
||||
} |
||||
// delete the lock info for the given operation.
|
||||
if infoMap, found := n.debugLockMap[param]; found { |
||||
// the opertion finished holding the lock on the resource, remove the entry for the given operation with the operation ID.
|
||||
if _, foundInfo := infoMap.lockInfo[operationID]; foundInfo { |
||||
// decrease the global running and lock reference counter.
|
||||
n.runningLockCounter-- |
||||
n.globalLockCounter-- |
||||
// decrease the lock referene counter for the lock info for given <volume,path> pair.
|
||||
// decrease the running operation number. Its assumed that the operation is over once an attempt to release the lock is made.
|
||||
infoMap.running-- |
||||
// decrease the total reference count of locks jeld on <volume,path> pair.
|
||||
infoMap.ref-- |
||||
delete(infoMap.lockInfo, operationID) |
||||
} else { |
||||
// Unlock request with invalid opertion ID not accepted.
|
||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID} |
||||
} |
||||
} else { |
||||
return LockInfoVolPathMssing{param.volume, param.path} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// return randomly generated string ID if lock debug is enabled,
|
||||
// else returns empty string
|
||||
func getOpsID() (opsID string) { |
||||
// check if lock debug is enabled.
|
||||
if globalDebugLock { |
||||
// generated random ID.
|
||||
opsID = string(generateRequestID()) |
||||
} |
||||
return opsID |
||||
} |
@ -0,0 +1,744 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
) |
||||
|
||||
type lockStateCase struct { |
||||
volume string |
||||
path string |
||||
lockOrigin string |
||||
opsID string |
||||
readLock bool // lock type.
|
||||
setBlocked bool // initialize the initial state to blocked.
|
||||
expectedErr error |
||||
// expected global lock stats.
|
||||
expectedLockStatus string // Status of the lock Blocked/Running.
|
||||
|
||||
expectedGlobalLockCount int // Total number of locks held across the system, includes blocked + held locks.
|
||||
expectedBlockedLockCount int // Total blocked lock across the system.
|
||||
expectedRunningLockCount int // Total succesfully held locks (non-blocking).
|
||||
// expected lock statu for given <volume, path> pair.
|
||||
expectedVolPathLockCount int // Total locks held for given <volume,path> pair, includes blocked locks.
|
||||
expectedVolPathRunningCount int // Total succcesfully held locks for given <volume, path> pair.
|
||||
expectedVolPathBlockCount int // Total locks blocked on the given <volume, path> pair.
|
||||
} |
||||
|
||||
// Used for validating the Lock info obtaining from contol RPC end point for obtaining lock related info.
|
||||
func verifyRPCLockInfoResponse(l lockStateCase, rpcLockInfoResponse SystemLockState, t TestErrHandler, testNum int) { |
||||
// Assert the total number of locks (locked + acquired) in the system.
|
||||
if rpcLockInfoResponse.TotalLocks != int64(l.expectedGlobalLockCount) { |
||||
t.Fatalf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount), |
||||
rpcLockInfoResponse.TotalLocks) |
||||
} |
||||
|
||||
// verify the count for total blocked locks.
|
||||
if rpcLockInfoResponse.TotalBlockedLocks != int64(l.expectedBlockedLockCount) { |
||||
t.Fatalf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount), |
||||
rpcLockInfoResponse.TotalBlockedLocks) |
||||
} |
||||
|
||||
// verify the count for total running locks.
|
||||
if rpcLockInfoResponse.TotalAcquiredLocks != int64(l.expectedRunningLockCount) { |
||||
t.Fatalf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount), |
||||
rpcLockInfoResponse.TotalAcquiredLocks) |
||||
} |
||||
|
||||
for _, locksInfoPerObject := range rpcLockInfoResponse.LocksInfoPerObject { |
||||
// See whether the entry for the <bucket, object> exists in the RPC response.
|
||||
if locksInfoPerObject.Bucket == l.volume && locksInfoPerObject.Object == l.path { |
||||
// Assert the total number of locks (blocked + acquired) for the given <buckt, object> pair.
|
||||
if locksInfoPerObject.LocksOnObject != int64(l.expectedVolPathLockCount) { |
||||
t.Errorf("Test %d: Expected the total lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum, |
||||
l.volume, l.path, int64(l.expectedVolPathLockCount), locksInfoPerObject.LocksOnObject) |
||||
} |
||||
// Assert the total number of acquired locks for the given <buckt, object> pair.
|
||||
if locksInfoPerObject.LocksAcquiredOnObject != int64(l.expectedVolPathRunningCount) { |
||||
t.Errorf("Test %d: Expected the acquired lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum, |
||||
l.volume, l.path, int64(l.expectedVolPathRunningCount), locksInfoPerObject.LocksAcquiredOnObject) |
||||
} |
||||
// Assert the total number of blocked locks for the given <buckt, object> pair.
|
||||
if locksInfoPerObject.TotalBlockedLocks != int64(l.expectedVolPathBlockCount) { |
||||
t.Errorf("Test %d: Expected the blocked lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum, |
||||
l.volume, l.path, int64(l.expectedVolPathBlockCount), locksInfoPerObject.TotalBlockedLocks) |
||||
} |
||||
// Flag to mark whether there's an entry in the RPC lock info response for given opsID.
|
||||
var opsIDfound bool |
||||
for _, opsLockState := range locksInfoPerObject.LockDetailsOnObject { |
||||
// first check whether the entry for the given operation ID exists.
|
||||
if opsLockState.OperationID == l.opsID { |
||||
opsIDfound = true |
||||
// asserting the type of lock (RLock/WLock) from the RPC lock info response.
|
||||
if l.readLock { |
||||
if opsLockState.LockType != debugRLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type to be \"%s\"", testNum, debugRLockStr) |
||||
} |
||||
} else { |
||||
if opsLockState.LockType != debugWLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type to be \"%s\"", testNum, debugWLockStr) |
||||
} |
||||
} |
||||
|
||||
if opsLockState.Status != l.expectedLockStatus { |
||||
t.Errorf("Test case %d: Expected the status of the operation to be \"%s\", got \"%s\"", testNum, l.expectedLockStatus, opsLockState.Status) |
||||
} |
||||
|
||||
// if opsLockState.LockOrigin != l.lockOrigin {
|
||||
// t.Fatalf("Test case %d: Expected the origin of the lock to be \"%s\", got \"%s\"", testNum, opsLockState.LockOrigin, l.lockOrigin)
|
||||
// }
|
||||
// all check satisfied, return here.
|
||||
// Any mismatch in the earlier checks would have ended the tests due to `Fatalf`,
|
||||
// control reaching here implies that all checks are satisfied.
|
||||
return |
||||
} |
||||
} |
||||
// opsID not found.
|
||||
// No entry for an operation with given operation ID exists.
|
||||
if !opsIDfound { |
||||
t.Fatalf("Test case %d: Entry for OpsId: \"%s\" not found in <bucket>: \"%s\", <path>: \"%s\" doesn't exist in the RPC response", testNum, l.opsID, l.volume, l.path) |
||||
} |
||||
} |
||||
} |
||||
// No entry exists for given <bucket, object> pair in the RPC response.
|
||||
t.Errorf("Test case %d: Entry for <bucket>: \"%s\", <object>: \"%s\" doesn't exist in the RPC response", testNum, l.volume, l.path) |
||||
} |
||||
|
||||
// Asserts the lock counter from the global nsMutex inmemory lock with the expected one.
|
||||
func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) { |
||||
nsMutex.lockMapMutex.Lock() |
||||
|
||||
// Verifying the lock stats.
|
||||
if nsMutex.globalLockCounter != int64(l.expectedGlobalLockCount) { |
||||
t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount), |
||||
nsMutex.globalLockCounter) |
||||
} |
||||
// verify the count for total blocked locks.
|
||||
if nsMutex.blockedCounter != int64(l.expectedBlockedLockCount) { |
||||
t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount), |
||||
nsMutex.blockedCounter) |
||||
} |
||||
// verify the count for total running locks.
|
||||
if nsMutex.runningLockCounter != int64(l.expectedRunningLockCount) { |
||||
t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount), |
||||
nsMutex.runningLockCounter) |
||||
} |
||||
nsMutex.lockMapMutex.Unlock() |
||||
// Verifying again with the JSON response of the lock info.
|
||||
// Verifying the lock stats.
|
||||
sysLockState, err := generateSystemLockResponse() |
||||
if err != nil { |
||||
t.Fatalf("Obtaining lock info failed with <ERROR> %s", err) |
||||
|
||||
} |
||||
if sysLockState.TotalLocks != int64(l.expectedGlobalLockCount) { |
||||
t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount), |
||||
sysLockState.TotalLocks) |
||||
} |
||||
// verify the count for total blocked locks.
|
||||
if sysLockState.TotalBlockedLocks != int64(l.expectedBlockedLockCount) { |
||||
t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount), |
||||
sysLockState.TotalBlockedLocks) |
||||
} |
||||
// verify the count for total running locks.
|
||||
if sysLockState.TotalAcquiredLocks != int64(l.expectedRunningLockCount) { |
||||
t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount), |
||||
sysLockState.TotalAcquiredLocks) |
||||
} |
||||
} |
||||
|
||||
// Verify the lock counter for entries of given <volume, path> pair.
|
||||
func verifyLockStats(l lockStateCase, t *testing.T, testNum int) { |
||||
nsMutex.lockMapMutex.Lock() |
||||
defer nsMutex.lockMapMutex.Unlock() |
||||
param := nsParam{l.volume, l.path} |
||||
|
||||
// Verify the total locks (blocked+running) for given <vol,path> pair.
|
||||
if nsMutex.debugLockMap[param].ref != int64(l.expectedVolPathLockCount) { |
||||
t.Errorf("Test %d: Expected the total lock count for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, |
||||
param.volume, param.path, int64(l.expectedVolPathLockCount), nsMutex.debugLockMap[param].ref) |
||||
} |
||||
// Verify the total running locks for given <volume, path> pair.
|
||||
if nsMutex.debugLockMap[param].running != int64(l.expectedVolPathRunningCount) { |
||||
t.Errorf("Test %d: Expected the total running locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path, |
||||
int64(l.expectedVolPathRunningCount), nsMutex.debugLockMap[param].running) |
||||
} |
||||
// Verify the total blocked locks for givne <volume, path> pair.
|
||||
if nsMutex.debugLockMap[param].blocked != int64(l.expectedVolPathBlockCount) { |
||||
t.Errorf("Test %d: Expected the total blocked locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path, |
||||
int64(l.expectedVolPathBlockCount), nsMutex.debugLockMap[param].blocked) |
||||
} |
||||
} |
||||
|
||||
// verifyLockState - function which asserts the expected lock info in the system with the actual values in the nsMutex.
|
||||
func verifyLockState(l lockStateCase, t *testing.T, testNum int) { |
||||
param := nsParam{l.volume, l.path} |
||||
|
||||
verifyGlobalLockStats(l, t, testNum) |
||||
nsMutex.lockMapMutex.Lock() |
||||
// Verifying the lock statuS fields.
|
||||
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok { |
||||
if lockInfo, ok := debugLockMap.lockInfo[l.opsID]; ok { |
||||
// Validating the lock type filed in the debug lock information.
|
||||
if l.readLock { |
||||
if lockInfo.lockType != debugRLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", testNum, debugRLockStr) |
||||
} |
||||
} else { |
||||
if lockInfo.lockType != debugWLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", testNum, debugWLockStr) |
||||
} |
||||
} |
||||
|
||||
// // validating the lock origin.
|
||||
// if l.lockOrigin != lockInfo.lockOrigin {
|
||||
// t.Fatalf("Test %d: Expected the lock origin info to be \"%s\", but got \"%s\"", testNum, l.lockOrigin, lockInfo.lockOrigin)
|
||||
// }
|
||||
// validating the status of the lock.
|
||||
if lockInfo.status != l.expectedLockStatus { |
||||
t.Errorf("Test %d: Expected the status of the lock to be \"%s\", but got \"%s\"", testNum, l.expectedLockStatus, lockInfo.status) |
||||
} |
||||
} else { |
||||
// Stop the tests if lock debug entry for given <volume, path> pair is not found.
|
||||
t.Errorf("Test case %d: Expected an debug lock entry for opsID \"%s\"", testNum, l.opsID) |
||||
} |
||||
} else { |
||||
// To change the status the entry for given <volume, path> should exist in the lock info struct.
|
||||
t.Errorf("Test case %d: Debug lock entry for volume: %s, path: %s doesn't exist", testNum, param.volume, param.path) |
||||
} |
||||
// verifyLockStats holds its own lock.
|
||||
nsMutex.lockMapMutex.Unlock() |
||||
|
||||
// verify the lock count.
|
||||
verifyLockStats(l, t, testNum) |
||||
} |
||||
|
||||
// TestNewDebugLockInfoPerVolumePath - Validates the values initialized by newDebugLockInfoPerVolumePath().
|
||||
func TestNewDebugLockInfoPerVolumePath(t *testing.T) { |
||||
lockInfo := newDebugLockInfoPerVolumePath() |
||||
|
||||
if lockInfo.ref != 0 { |
||||
t.Errorf("Expected initial reference value of total locks to be 0, got %d", lockInfo.ref) |
||||
} |
||||
if lockInfo.blocked != 0 { |
||||
t.Errorf("Expected initial reference of blocked locks to be 0, got %d", lockInfo.blocked) |
||||
} |
||||
if lockInfo.running != 0 { |
||||
t.Errorf("Expected initial reference value of held locks to be 0, got %d", lockInfo.running) |
||||
} |
||||
} |
||||
|
||||
// TestNsLockMapStatusBlockedToRunning - Validates the function for changing the lock state from blocked to running.
|
||||
func TestNsLockMapStatusBlockedToRunning(t *testing.T) { |
||||
|
||||
testCases := []struct { |
||||
volume string |
||||
path string |
||||
lockOrigin string |
||||
opsID string |
||||
readLock bool // lock type.
|
||||
setBlocked bool // initialize the initial state to blocked.
|
||||
expectedErr error |
||||
}{ |
||||
// Test case - 1.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: true, |
||||
setBlocked: true, |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
}, |
||||
// Test case - 2.
|
||||
// No entry for <volume, path> pair.
|
||||
// So an attempt to change the state of the lock from `Blocked`->`Running` should fail.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object-2", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: false, |
||||
setBlocked: false, |
||||
// expected metrics.
|
||||
expectedErr: LockInfoVolPathMssing{"my-bucket", "my-object-2"}, |
||||
}, |
||||
// Test case - 3.
|
||||
// Entry for the given operationID doesn't exist in the lock state info.
|
||||
{ |
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "ops-Id-not-registered", |
||||
readLock: true, |
||||
setBlocked: false, |
||||
// expected metrics.
|
||||
expectedErr: LockInfoOpsIDNotFound{"my-bucket", "my-object", "ops-Id-not-registered"}, |
||||
}, |
||||
// Test case - 4.
|
||||
// Test case with non-existent lock origin.
|
||||
{ |
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "Bad Origin", |
||||
opsID: "abcd1234", |
||||
readLock: true, |
||||
setBlocked: false, |
||||
// expected metrics.
|
||||
expectedErr: LockInfoOriginNotFound{"my-bucket", "my-object", "abcd1234", "Bad Origin"}, |
||||
}, |
||||
// Test case - 5.
|
||||
// Test case with write lock.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: false, |
||||
setBlocked: true, |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
}, |
||||
} |
||||
|
||||
param := nsParam{testCases[0].volume, testCases[0].path} |
||||
// Testing before the initialization done.
|
||||
// Since the data structures for
|
||||
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, |
||||
testCases[0].opsID, testCases[0].readLock) |
||||
|
||||
expectedNilErr := LockInfoNil{} |
||||
if actualErr != expectedNilErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) |
||||
} |
||||
|
||||
nsMutex = &nsLockMap{ |
||||
// entries of <volume,path> -> stateInfo of locks, for instrumentation purpose.
|
||||
debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath), |
||||
lockMap: make(map[nsParam]*nsLock), |
||||
} |
||||
// Entry for <volume, path> pair is set to nil.
|
||||
// Should fail with `LockInfoNil{}`.
|
||||
nsMutex.debugLockMap[param] = nil |
||||
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, |
||||
testCases[0].opsID, testCases[0].readLock) |
||||
|
||||
expectedNilErr = LockInfoNil{} |
||||
if actualErr != expectedNilErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) |
||||
} |
||||
|
||||
// Setting the lock info the be `nil`.
|
||||
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{ |
||||
lockInfo: nil, // setting the lockinfo to nil.
|
||||
ref: 0, |
||||
blocked: 0, |
||||
running: 0, |
||||
} |
||||
|
||||
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, |
||||
testCases[0].opsID, testCases[0].readLock) |
||||
|
||||
expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID} |
||||
if actualErr != expectedOpsErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsErr, actualErr) |
||||
} |
||||
|
||||
// Next case: ase whether an attempt to change the state of the lock to "Running" done,
|
||||
// but the initial state if already "Running". Such an attempt should fail
|
||||
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{ |
||||
lockInfo: make(map[string]debugLockInfo), |
||||
ref: 0, |
||||
blocked: 0, |
||||
running: 0, |
||||
} |
||||
|
||||
// Setting the status of the lock to be "Running".
|
||||
// The initial state of the lock should set to "Blocked", otherwise its not possible to change the state from "Blocked" -> "Running".
|
||||
nsMutex.debugLockMap[param].lockInfo[testCases[0].opsID] = debugLockInfo{ |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
status: "Running", // State set to "Running". Should fail with `LockInfoStateNotBlocked`.
|
||||
since: time.Now().UTC(), |
||||
} |
||||
|
||||
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, |
||||
testCases[0].opsID, testCases[0].readLock) |
||||
|
||||
expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID} |
||||
if actualErr != expectedBlockErr { |
||||
t.Fatalf("Errors mismatch: Expected: \"%s\", got: \"%s\"", expectedBlockErr, actualErr) |
||||
} |
||||
|
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true |
||||
// initializing the locks.
|
||||
initNSLock(false) |
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
defer func() { |
||||
globalDebugLock = false |
||||
nsMutex.debugLockMap = nil |
||||
}() |
||||
// Iterate over the cases and assert the result.
|
||||
for i, testCase := range testCases { |
||||
param := nsParam{testCase.volume, testCase.path} |
||||
// status of the lock to be set to "Blocked", before setting Blocked->Running.
|
||||
if testCase.setBlocked { |
||||
nsMutex.lockMapMutex.Lock() |
||||
err := nsMutex.statusNoneToBlocked(param, testCase.lockOrigin, testCase.opsID, testCase.readLock) |
||||
if err != nil { |
||||
t.Fatalf("Test %d: Initializing the initial state to Blocked failed <ERROR> %s", i+1, err) |
||||
} |
||||
nsMutex.lockMapMutex.Unlock() |
||||
} |
||||
// invoking the method under test.
|
||||
actualErr = nsMutex.statusBlockedToRunning(param, testCase.lockOrigin, testCase.opsID, testCase.readLock) |
||||
if actualErr != testCase.expectedErr { |
||||
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr) |
||||
} |
||||
// In case of no error proceed with validating the lock state information.
|
||||
if actualErr == nil { |
||||
// debug entry for given <volume, path> pair should exist.
|
||||
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok { |
||||
if lockInfo, ok := debugLockMap.lockInfo[testCase.opsID]; ok { |
||||
// Validating the lock type filed in the debug lock information.
|
||||
if testCase.readLock { |
||||
if lockInfo.lockType != debugRLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", i+1, debugRLockStr) |
||||
} |
||||
} else { |
||||
if lockInfo.lockType != debugWLockStr { |
||||
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", i+1, debugWLockStr) |
||||
} |
||||
} |
||||
|
||||
// validating the lock origin.
|
||||
if testCase.lockOrigin != lockInfo.lockOrigin { |
||||
t.Errorf("Test %d: Expected the lock origin info to be \"%s\", but got \"%s\"", i+1, testCase.lockOrigin, lockInfo.lockOrigin) |
||||
} |
||||
// validating the status of the lock.
|
||||
if lockInfo.status != "Running" { |
||||
t.Errorf("Test %d: Expected the status of the lock to be \"%s\", but got \"%s\"", i+1, "Running", lockInfo.status) |
||||
} |
||||
} else { |
||||
// Stop the tests if lock debug entry for given <volume, path> pair is not found.
|
||||
t.Fatalf("Test case %d: Expected an debug lock entry for opsID \"%s\"", i+1, testCase.opsID) |
||||
} |
||||
} else { |
||||
// To change the status the entry for given <volume, path> should exist in the lock info struct.
|
||||
t.Fatalf("Test case %d: Debug lock entry for volume: %s, path: %s doesn't exist", i+1, param.volume, param.path) |
||||
} |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
// TestNsLockMapStatusNoneToBlocked - Validates the function for changing the lock state to blocked
|
||||
func TestNsLockMapStatusNoneToBlocked(t *testing.T) { |
||||
|
||||
testCases := []lockStateCase{ |
||||
// Test case - 1.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: true, |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Blocked", |
||||
|
||||
expectedGlobalLockCount: 1, |
||||
expectedRunningLockCount: 0, |
||||
expectedBlockedLockCount: 1, |
||||
|
||||
expectedVolPathLockCount: 1, |
||||
expectedVolPathRunningCount: 0, |
||||
expectedVolPathBlockCount: 1, |
||||
}, |
||||
// Test case - 2.
|
||||
// No entry for <volume, path> pair.
|
||||
// So an attempt to change the state of the lock from `Blocked`->`Running` should fail.
|
||||
{ |
||||
|
||||
volume: "my-bucket", |
||||
path: "my-object-2", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: false, |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Blocked", |
||||
|
||||
expectedGlobalLockCount: 2, |
||||
expectedRunningLockCount: 0, |
||||
expectedBlockedLockCount: 2, |
||||
|
||||
expectedVolPathLockCount: 1, |
||||
expectedVolPathRunningCount: 0, |
||||
expectedVolPathBlockCount: 1, |
||||
}, |
||||
// Test case - 3.
|
||||
// Entry for the given operationID doesn't exist in the lock state info.
|
||||
// The entry should be created and relevant counters should be set.
|
||||
{ |
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "ops-Id-not-registered", |
||||
readLock: true, |
||||
// expected metrics.
|
||||
expectedErr: nil, |
||||
expectedLockStatus: "Blocked", |
||||
|
||||
expectedGlobalLockCount: 3, |
||||
expectedRunningLockCount: 0, |
||||
expectedBlockedLockCount: 3, |
||||
|
||||
expectedVolPathLockCount: 2, |
||||
expectedVolPathRunningCount: 0, |
||||
expectedVolPathBlockCount: 2, |
||||
}, |
||||
} |
||||
|
||||
param := nsParam{testCases[0].volume, testCases[0].path} |
||||
// Testing before the initialization done.
|
||||
// Since the data structures for
|
||||
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, |
||||
testCases[0].opsID, testCases[0].readLock) |
||||
|
||||
expectedNilErr := LockInfoNil{} |
||||
if actualErr != expectedNilErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) |
||||
} |
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true |
||||
// initializing the locks.
|
||||
initNSLock(false) |
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
defer func() { |
||||
globalDebugLock = false |
||||
nsMutex.debugLockMap = nil |
||||
}() |
||||
// Iterate over the cases and assert the result.
|
||||
for i, testCase := range testCases { |
||||
nsMutex.lockMapMutex.Lock() |
||||
param := nsParam{testCase.volume, testCase.path} |
||||
actualErr := nsMutex.statusNoneToBlocked(param, testCase.lockOrigin, testCase.opsID, testCase.readLock) |
||||
if actualErr != testCase.expectedErr { |
||||
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr) |
||||
} |
||||
nsMutex.lockMapMutex.Unlock() |
||||
if actualErr == nil { |
||||
verifyLockState(testCase, t, i+1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestNsLockMapDeleteLockInfoEntryForOps - Validates the removal of entry for given Operational ID from the lock info.
|
||||
func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) { |
||||
testCases := []lockStateCase{ |
||||
// Test case - 1.
|
||||
{ |
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: true, |
||||
// expected metrics.
|
||||
}, |
||||
} |
||||
// case - 1.
|
||||
// Testing the case where delete lock info is attempted even before the lock is initialized.
|
||||
param := nsParam{testCases[0].volume, testCases[0].path} |
||||
// Testing before the initialization done.
|
||||
|
||||
actualErr := nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID) |
||||
|
||||
expectedNilErr := LockInfoNil{} |
||||
if actualErr != expectedNilErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) |
||||
} |
||||
|
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true |
||||
// initializing the locks.
|
||||
initNSLock(false) |
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
defer func() { |
||||
globalDebugLock = false |
||||
nsMutex.debugLockMap = nil |
||||
}() |
||||
// case - 2.
|
||||
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
|
||||
// Set the status of the lock to blocked and then to running.
|
||||
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"} |
||||
actualErr = nsMutex.deleteLockInfoEntryForOps(nonExistParam, testCases[0].opsID) |
||||
|
||||
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path} |
||||
if actualErr != expectedVolPathErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr) |
||||
} |
||||
|
||||
// Case - 3.
|
||||
// Lock state is set to Running and then an attempt to delete the info for non-existant opsID done.
|
||||
nsMutex.lockMapMutex.Lock() |
||||
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock) |
||||
if err != nil { |
||||
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err) |
||||
} |
||||
nsMutex.lockMapMutex.Unlock() |
||||
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock) |
||||
if err != nil { |
||||
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err) |
||||
} |
||||
actualErr = nsMutex.deleteLockInfoEntryForOps(param, "non-existant-OpsID") |
||||
|
||||
expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existant-OpsID"} |
||||
if actualErr != expectedOpsIDErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsIDErr, actualErr) |
||||
} |
||||
// case - 4.
|
||||
// Attempt to delete an registered entry is done.
|
||||
// All metrics should be 0 after deleting the entry.
|
||||
|
||||
// Verify that the entry the opsID exists.
|
||||
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok { |
||||
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; !ok { |
||||
t.Fatalf("Entry for OpsID \"%s\" in <volume> %s, <path> %s should have existed. ", testCases[0].opsID, param.volume, param.path) |
||||
} |
||||
} else { |
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path) |
||||
} |
||||
|
||||
actualErr = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID) |
||||
if actualErr != nil { |
||||
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr) |
||||
} |
||||
|
||||
// Verify that the entry for the opsId doesn't exists.
|
||||
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok { |
||||
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; ok { |
||||
t.Fatalf("The entry for opsID \"%s\" should have been deleted", testCases[0].opsID) |
||||
} |
||||
} else { |
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path) |
||||
} |
||||
if nsMutex.runningLockCounter != int64(0) { |
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter) |
||||
} |
||||
if nsMutex.blockedCounter != int64(0) { |
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter) |
||||
} |
||||
if nsMutex.globalLockCounter != int64(0) { |
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter) |
||||
} |
||||
} |
||||
|
||||
// TestNsLockMapDeleteLockInfoEntryForVolumePath - Tests validate the logic for removal
|
||||
// of entry for given <volume, path> pair from lock info.
|
||||
func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) { |
||||
testCases := []lockStateCase{ |
||||
// Test case - 1.
|
||||
{ |
||||
volume: "my-bucket", |
||||
path: "my-object", |
||||
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a", |
||||
opsID: "abcd1234", |
||||
readLock: true, |
||||
// expected metrics.
|
||||
}, |
||||
} |
||||
// case - 1.
|
||||
// Testing the case where delete lock info is attempted even before the lock is initialized.
|
||||
param := nsParam{testCases[0].volume, testCases[0].path} |
||||
// Testing before the initialization done.
|
||||
|
||||
actualErr := nsMutex.deleteLockInfoEntryForVolumePath(param) |
||||
|
||||
expectedNilErr := LockInfoNil{} |
||||
if actualErr != expectedNilErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) |
||||
} |
||||
|
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true |
||||
// initializing the locks.
|
||||
initNSLock(false) |
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
defer func() { |
||||
globalDebugLock = false |
||||
nsMutex.debugLockMap = nil |
||||
}() |
||||
// case - 2.
|
||||
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
|
||||
// Set the status of the lock to blocked and then to running.
|
||||
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"} |
||||
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(nonExistParam) |
||||
|
||||
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path} |
||||
if actualErr != expectedVolPathErr { |
||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr) |
||||
} |
||||
|
||||
// case - 3.
|
||||
// Attempt to delete an registered entry is done.
|
||||
// All metrics should be 0 after deleting the entry.
|
||||
|
||||
// Registering the entry first.
|
||||
nsMutex.lockMapMutex.Lock() |
||||
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock) |
||||
if err != nil { |
||||
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err) |
||||
} |
||||
nsMutex.lockMapMutex.Unlock() |
||||
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock) |
||||
if err != nil { |
||||
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err) |
||||
} |
||||
// Verify that the entry the for given <volume, path> exists.
|
||||
if _, ok := nsMutex.debugLockMap[param]; !ok { |
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path) |
||||
} |
||||
// first delete the entry for the operation ID.
|
||||
err = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID) |
||||
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param) |
||||
if actualErr != nil { |
||||
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr) |
||||
} |
||||
|
||||
// Verify that the entry for the opsId doesn't exists.
|
||||
if _, ok := nsMutex.debugLockMap[param]; ok { |
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path) |
||||
} |
||||
// The lock count values should be 0.
|
||||
if nsMutex.runningLockCounter != int64(0) { |
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter) |
||||
} |
||||
if nsMutex.blockedCounter != int64(0) { |
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter) |
||||
} |
||||
if nsMutex.globalLockCounter != int64(0) { |
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter) |
||||
} |
||||
} |
Loading…
Reference in new issue