Remove control command from minio binary (Fixes #3264) (#3265)

master
Aditya Manthramurthy 8 years ago committed by Harshavardhana
parent 7abcededf2
commit e216201901
  1. 277
      cmd/control-handlers.go
  2. 227
      cmd/control-heal-main.go
  3. 235
      cmd/control-lock-main.go
  4. 181
      cmd/control-lock-main_test.go
  5. 49
      cmd/control-main.go
  6. 249
      cmd/control-mains_test.go
  7. 90
      cmd/control-router.go
  8. 94
      cmd/control-router_test.go
  9. 106
      cmd/control-service-main.go
  10. 388
      cmd/control_test.go
  11. 95
      cmd/lockinfo-handlers.go
  12. 1
      cmd/main.go
  13. 6
      cmd/routers.go
  14. 56
      cmd/test-utils_test.go
  15. 9
      cmd/typed-errors.go

@ -1,277 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"sync"
"time"
)
// errServerNotInitialized - server not initialized.
var errServerNotInitialized = errors.New("Server not initialized, please try again.")
// errServerVersionMismatch - server versions do not match.
var errServerVersionMismatch = errors.New("Server versions do not match.")
// errServerTimeMismatch - server times are too far apart.
var errServerTimeMismatch = errors.New("Server times are too far apart.")
/// Auth operations
// Login - login handler.
func (c *controlAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultInterNodeJWTExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = time.Now().UTC()
reply.ServerVersion = Version
return nil
}
// HealListArgs - argument for ListObjects RPC.
type HealListArgs struct {
// Authentication token generated by Login.
GenericArgs
Bucket string
Prefix string
Marker string
Delimiter string
MaxKeys int
}
// HealListReply - reply object by ListObjects RPC.
type HealListReply struct {
IsTruncated bool
NextMarker string
Objects []ObjectInfo
}
// ListObjects - list all objects that needs healing.
func (c *controlAPIHandlers) ListObjectsHealHandler(args *HealListArgs, reply *HealListReply) error {
objAPI := c.ObjectAPI()
if objAPI == nil {
return errServerNotInitialized
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
if !c.IsXL {
return nil
}
info, err := objAPI.ListObjectsHeal(args.Bucket, args.Prefix, args.Marker, args.Delimiter, args.MaxKeys)
if err != nil {
return err
}
reply.IsTruncated = info.IsTruncated
reply.NextMarker = info.NextMarker
reply.Objects = info.Objects
return nil
}
// HealBucketArgs - arguments for HealBucket RPC.
type HealBucketArgs struct {
// Authentication token generated by Login.
GenericArgs
// Bucket to be healed.
Bucket string
}
// Heals missing buckets across disks, if we have enough quorum.
func (c *controlAPIHandlers) HealBucketHandler(args *HealBucketArgs, reply *GenericReply) error {
objAPI := c.ObjectAPI()
if objAPI == nil {
return errServerNotInitialized
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
if !c.IsXL {
return nil
}
// Proceed to heal the bucket.
return objAPI.HealBucket(args.Bucket)
}
// HealObjectArgs - argument for HealObject RPC.
type HealObjectArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the bucket where the object
// needs to be healed.
Bucket string
// Name of the object to be healed.
Objects []ObjectInfo
}
// HealObjectReply - reply by HealObject RPC.
type HealObjectReply struct {
Results []string
}
// HealObject heals 1000 objects at a time for missing chunks, missing metadata on a given bucket.
func (c *controlAPIHandlers) HealObjectsHandler(args *HealObjectArgs, reply *HealObjectReply) error {
objAPI := c.ObjectAPI()
if objAPI == nil {
return errServerNotInitialized
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
if !c.IsXL {
return nil
}
// Heal all objects that need healing.
var errs = make([]error, len(args.Objects))
for idx, objInfo := range args.Objects {
errs[idx] = objAPI.HealObject(args.Bucket, objInfo.Name)
}
// Get all the error causes.
var causes = make([]string, len(args.Objects))
for id, err := range errs {
if err != nil {
causes[id] = err.Error()
}
}
// Save the causes.
reply.Results = causes
return nil
}
// Heals backend storage format.
func (c *controlAPIHandlers) HealFormatHandler(args *GenericArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
if !c.IsXL {
return nil
}
err := healFormatXL(c.StorageDisks)
if err != nil {
return err
}
go func() {
globalWakeupCh <- struct{}{}
}()
return err
}
// ServiceArgs - argument for Service RPC.
type ServiceArgs struct {
// Authentication token generated by Login.
GenericArgs
// Represents the type of operation server is requested
// to perform. Currently supported signals are
// stop, restart and status.
Signal serviceSignal
}
// ServiceReply - represents service operation success info.
type ServiceReply struct {
StorageInfo StorageInfo
}
// Remote procedure call, calls serviceMethod with given input args.
func (c *controlAPIHandlers) remoteServiceCall(args *ServiceArgs, replies []*ServiceReply) error {
var wg sync.WaitGroup
var errs = make([]error, len(c.RemoteControls))
// Send remote call to all neighboring peers to restart minio servers.
for index, clnt := range c.RemoteControls {
wg.Add(1)
go func(index int, client *AuthRPCClient) {
defer wg.Done()
errs[index] = client.Call("Control.ServiceHandler", args, replies[index])
errorIf(errs[index], "Unable to initiate control service request to remote node %s", client.Node())
}(index, clnt)
}
wg.Wait()
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// Service - handler for sending service signals across many servers.
func (c *controlAPIHandlers) ServiceHandler(args *ServiceArgs, reply *ServiceReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
objAPI := c.ObjectAPI()
if objAPI == nil {
return errServerNotInitialized
}
if args.Signal == serviceStatus {
reply.StorageInfo = objAPI.StorageInfo()
return nil
}
var replies = make([]*ServiceReply, len(c.RemoteControls))
switch args.Signal {
case serviceRestart:
if args.Remote {
// Set remote as false for remote calls.
args.Remote = false
if err := c.remoteServiceCall(args, replies); err != nil {
return err
}
}
globalServiceSignalCh <- serviceRestart
case serviceStop:
if args.Remote {
// Set remote as false for remote calls.
args.Remote = false
if err := c.remoteServiceCall(args, replies); err != nil {
return err
}
}
globalServiceSignalCh <- serviceStop
}
return nil
}
// TryInitHandler - generic RPC control handler
func (c *controlAPIHandlers) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
if !c.IsXL {
return nil
}
go func() {
globalWakeupCh <- struct{}{}
}()
*reply = GenericReply{}
return nil
}

@ -1,227 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"net/url"
"path"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var healCmd = cli.Command{
Name: "heal",
Usage: "To heal objects.",
Action: healControl,
Flags: globalFlags,
CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}}
USAGE:
minio control {{.Name}} http[s]://[access_key[:secret_key]@]server_ip:port/
FLAGS:
{{range .Flags}}{{.}}
{{end}}
EXAMPLES:
1. Heal missing on-disk format across all inconsistent nodes.
$ minio control {{.Name}} http://localhost:9000
2. Heals a specific object.
$ minio control {{.Name}} http://localhost:9000/songs/classical/western/piano.mp3
3. Heal bucket and all objects in a bucket recursively.
$ minio control {{.Name}} http://localhost:9000/songs
4. Heal all objects with a given prefix recursively.
$ minio control {{.Name}} http://localhost:9000/songs/classical/
`,
}
// heals backend storage format, useful in restoring `format.json` missing on a
// fresh or corrupted disks. This call does deep inspection of backend layout
// and applies appropriate `format.json` to the disk.
func healStorageFormat(authClnt *AuthRPCClient) error {
args := &GenericArgs{}
reply := &GenericReply{}
return authClnt.Call("Control.HealFormatHandler", args, reply)
}
// lists all objects which needs to be healed, this is a precursor helper function called before
// calling actual healing operation. Returns a maximum of 1000 objects that needs healing at a time.
// Marker indicates the next entry point where the listing will start.
func listObjectsHeal(authClnt *AuthRPCClient, bucketName, prefixName, markerName string) (*HealListReply, error) {
args := &HealListArgs{
Bucket: bucketName,
Prefix: prefixName,
Marker: markerName,
Delimiter: "",
MaxKeys: 1000,
}
reply := &HealListReply{}
err := authClnt.Call("Control.ListObjectsHealHandler", args, reply)
if err != nil {
return nil, err
}
return reply, nil
}
// Internal custom struct encapsulates pretty msg to be printed by the caller.
type healMsg struct {
Msg string
Err error
}
// Prettifies heal results and returns them over a channel, caller reads from this channel and prints.
func prettyHealResults(healedObjects []ObjectInfo, healReply *HealObjectReply) <-chan healMsg {
var msgCh = make(chan healMsg)
// Starts writing to message channel for the list of results sent back
// by a previous healing operation.
go func(msgCh chan<- healMsg) {
defer close(msgCh)
// Go through all the results and validate if we have success or failure.
for i, healStr := range healReply.Results {
objPath := path.Join(healedObjects[i].Bucket, healedObjects[i].Name)
// TODO: We need to still print heal error cause.
if healStr != "" {
msgCh <- healMsg{
Msg: fmt.Sprintf("%s %s", colorRed("FAILED"), objPath),
Err: errors.New(healStr),
}
continue
}
msgCh <- healMsg{
Msg: fmt.Sprintf("%s %s", colorGreen("SUCCESS"), objPath),
}
}
}(msgCh)
// Return ..
return msgCh
}
var scanBar = scanBarFactory()
// Heals all the objects under a given bucket, optionally you can specify an
// object prefix to heal objects under this prefix.
func healObjects(authClnt *AuthRPCClient, bucketName, prefixName string) error {
if authClnt == nil || bucketName == "" {
return errInvalidArgument
}
// Save marker for the next request.
var markerName string
for {
healListReply, err := listObjectsHeal(authClnt, bucketName, prefixName, markerName)
if err != nil {
return err
}
// Attempt to heal only if there are any objects to heal.
if len(healListReply.Objects) > 0 {
healArgs := &HealObjectArgs{
Bucket: bucketName,
Objects: healListReply.Objects,
}
healReply := &HealObjectReply{}
err = authClnt.Call("Control.HealObjectsHandler", healArgs, healReply)
if err != nil {
return err
}
// Pretty print all the heal results.
for msg := range prettyHealResults(healArgs.Objects, healReply) {
if msg.Err != nil {
// TODO we need to print the error cause as well.
scanBar(msg.Msg)
continue
}
// Success.
scanBar(msg.Msg)
}
}
// End of listing objects for healing.
if !healListReply.IsTruncated {
break
}
// Set the marker to list the next set of keys.
markerName = healListReply.NextMarker
}
return nil
}
// Heals your bucket for any missing entries.
func healBucket(authClnt *AuthRPCClient, bucketName string) error {
if authClnt == nil || bucketName == "" {
return errInvalidArgument
}
return authClnt.Call("Control.HealBucketHandler", &HealBucketArgs{
Bucket: bucketName,
}, &GenericReply{})
}
// Entry point for minio control heal command.
func healControl(ctx *cli.Context) {
if ctx.Args().Present() && len(ctx.Args()) != 1 {
cli.ShowCommandHelpAndExit(ctx, "heal", 1)
}
parsedURL, err := url.Parse(ctx.Args().Get(0))
fatalIf(err, "Unable to parse URL %s", ctx.Args().Get(0))
accessKey := serverConfig.GetCredential().AccessKeyID
secretKey := serverConfig.GetCredential().SecretAccessKey
// Username and password specified in URL will override prior configuration
if parsedURL.User != nil {
accessKey = parsedURL.User.Username()
if key, set := parsedURL.User.Password(); set {
secretKey = key
}
}
authCfg := &authConfig{
accessKey: accessKey,
secretKey: secretKey,
secureConn: parsedURL.Scheme == "https",
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Control.LoginHandler",
}
client := newAuthClient(authCfg)
if parsedURL.Path == "/" || parsedURL.Path == "" {
err = healStorageFormat(client)
fatalIf(err, "Unable to heal disk metadata.")
return
}
bucketName, prefixName := urlPathSplit(parsedURL.Path)
// Heal the bucket.
err = healBucket(client, bucketName)
fatalIf(err, "Unable to heal bucket %s", bucketName)
// Heal all the objects.
err = healObjects(client, bucketName, prefixName)
fatalIf(err, "Unable to heal objects on bucket %s at prefix %s", bucketName, prefixName)
console.Println()
}

@ -1,235 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"net/url"
"path"
"strings"
"time"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var lockFlags = []cli.Flag{
cli.StringFlag{
Name: "older-than",
Usage: "Include locks older than given time.",
Value: "24h",
},
cli.BoolFlag{
Name: "verbose",
Usage: "Lists more information about locks.",
},
cli.BoolFlag{
Name: "recursive",
Usage: "Recursively clear locks.",
},
}
var lockCmd = cli.Command{
Name: "lock",
Usage: "Prints current lock information.",
Action: lockControl,
Flags: append(lockFlags, globalFlags...),
CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}}
USAGE:
minio control {{.Name}} [list|clear] http[s]://[access_key[:secret_key]@]server_ip:port/
FLAGS:
{{range .Flags}}{{.}}
{{end}}
EAMPLES:
1. List all currently active locks from all nodes. Defaults to list locks held longer than 24hrs.
$ minio control {{.Name}} list http://localhost:9000/
2. List all currently active locks from all nodes. Request locks older than 1minute.
$ minio control {{.Name}} --older-than=1m list http://localhost:9000/
3. Clear lock named 'bucket/object' (exact match).
$ minio control {{.Name}} clear http://localhost:9000/bucket/object
4. Clear all locks with names that start with 'bucket/prefix' (wildcard match).
$ minio control {{.Name}} --recursive clear http://localhost:9000/bucket/prefix
5. Clear all locks older than 10minutes.
$ minio control {{.Name}} --older-than=10m clear http://localhost:9000/
6. Clear all locks with names that start with 'bucket/a' and that are older than 1hour.
$ minio control {{.Name}} --recursive --older-than=1h clear http://localhost:9000/bucket/a
`,
}
// printLockStateVerbose - pretty prints systemLockState, additionally this filters out based on a given duration.
func printLockStateVerbose(lkStateRep map[string]SystemLockState, olderThan time.Duration) {
console.Println("Duration Server LockType LockAcquired Status LockOrigin Resource")
for server, lockState := range lkStateRep {
for _, lockInfo := range lockState.LocksInfoPerObject {
lockedResource := path.Join(lockInfo.Bucket, lockInfo.Object)
for _, lockDetails := range lockInfo.LockDetailsOnObject {
if lockDetails.Duration < olderThan {
continue
}
console.Println(lockDetails.Duration, server,
lockDetails.LockType, lockDetails.Since,
lockDetails.Status, lockDetails.LockOrigin,
lockedResource)
}
}
}
}
// printLockState - pretty prints systemLockState, additionally this filters out based on a given duration.
func printLockState(lkStateRep map[string]SystemLockState, olderThan time.Duration) {
console.Println("Duration Server LockType Resource")
for server, lockState := range lkStateRep {
for _, lockInfo := range lockState.LocksInfoPerObject {
lockedResource := path.Join(lockInfo.Bucket, lockInfo.Object)
for _, lockDetails := range lockInfo.LockDetailsOnObject {
if lockDetails.Duration < olderThan {
continue
}
console.Println(lockDetails.Duration, server,
lockDetails.LockType, lockedResource)
}
}
}
}
// clearLockState - clear locks based on a filter for a given duration and a name or prefix to match
func clearLockState(f func(bucket, object string), lkStateRep map[string]SystemLockState, olderThan time.Duration, match string, recursive bool) {
console.Println("Status Duration Server LockType Resource")
for server, lockState := range lkStateRep {
for _, lockInfo := range lockState.LocksInfoPerObject {
lockedResource := path.Join(lockInfo.Bucket, lockInfo.Object)
for _, lockDetails := range lockInfo.LockDetailsOnObject {
if lockDetails.Duration < olderThan {
continue
}
if match != "" {
if recursive {
if !strings.HasPrefix(lockedResource, match) {
continue
}
} else if lockedResource != match {
continue
}
}
f(lockInfo.Bucket, lockInfo.Object)
console.Println("CLEARED", lockDetails.Duration, server,
lockDetails.LockType, lockedResource)
}
}
}
}
// "minio control lock" entry point.
func lockControl(c *cli.Context) {
if !c.Args().Present() && len(c.Args()) != 2 {
cli.ShowCommandHelpAndExit(c, "lock", 1)
}
parsedURL, err := url.Parse(c.Args().Get(1))
fatalIf(err, "Unable to parse URL.")
accessKey := serverConfig.GetCredential().AccessKeyID
secretKey := serverConfig.GetCredential().SecretAccessKey
// Username and password specified in URL will override prior configuration
if parsedURL.User != nil {
accessKey = parsedURL.User.Username()
if key, set := parsedURL.User.Password(); set {
secretKey = key
}
}
// Parse older than string.
olderThanStr := c.String("older-than")
olderThan, err := time.ParseDuration(olderThanStr)
fatalIf(err, "Unable to parse older-than time duration.")
// Verbose flag.
verbose := c.Bool("verbose")
// Recursive flag.
recursive := c.Bool("recursive")
authCfg := &authConfig{
accessKey: accessKey,
secretKey: secretKey,
secureConn: parsedURL.Scheme == "https",
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Control.LoginHandler",
}
client := newAuthClient(authCfg)
args := &GenericArgs{
// This is necessary so that the remotes,
// don't end up sending requests back and forth.
Remote: true,
}
subCommand := c.Args().Get(0)
switch subCommand {
case "list":
lkStateRep := make(map[string]SystemLockState)
// Request lock info, fetches from all the nodes in the cluster.
err = client.Call("Control.LockInfo", args, &lkStateRep)
fatalIf(err, "Unable to fetch system lockInfo.")
if !verbose {
printLockState(lkStateRep, olderThan)
} else {
printLockStateVerbose(lkStateRep, olderThan)
}
case "clear":
path := parsedURL.Path
if strings.HasPrefix(path, "/") {
path = path[1:] // Strip leading slash
}
if path == "" && c.NumFlags() == 0 {
fatalIf(errors.New("Bad arguments"), "Need to either pass a path or older-than argument")
}
if !c.IsSet("older-than") { // If not set explicitly, change default to 0 instead of 24h
olderThan = 0
}
lkStateRep := make(map[string]SystemLockState)
// Request lock info, fetches from all the nodes in the cluster.
err = client.Call("Control.LockInfo", args, &lkStateRep)
fatalIf(err, "Unable to fetch system lockInfo.")
// Helper function to call server for actual removal of lock
f := func(bucket, object string) {
args := LockClearArgs{
Bucket: bucket,
Object: object,
}
reply := GenericReply{}
// Call server to clear the lock based on the name of the object.
err := client.Call("Control.LockClear", &args, &reply)
fatalIf(err, "Unable to clear lock.")
}
// Loop over all locks and determine whether to clear or not.
clearLockState(f, lkStateRep, olderThan, path, recursive)
default:
fatalIf(errInvalidArgument, "Unsupported lock control operation %s", c.Args().Get(0))
}
}

@ -1,181 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
"time"
)
// Test print systemState.
func TestPrintLockState(t *testing.T) {
testLock := nsMutex.NewNSLock("testbucket", "1.txt")
testLock.Lock()
sysLockState, err := getSystemLockState()
if err != nil {
t.Fatal(err)
}
testLock.Unlock()
sysLockStateMap := map[string]SystemLockState{}
sysLockStateMap["bucket"] = sysLockState
// Print lock state.
printLockState(sysLockStateMap, 0)
// Print lock state verbose.
printLockStateVerbose(sysLockStateMap, 0)
// Does not print any lock state in normal print mode.
printLockState(sysLockStateMap, 10*time.Second)
// Does not print any lock state in debug print mode.
printLockStateVerbose(sysLockStateMap, 10*time.Second)
}
// Helper function to test equality of locks (without taking timing info into account)
func testLockStateEquality(vliLeft, vliRight VolumeLockInfo) bool {
if vliLeft.Bucket != vliRight.Bucket ||
vliLeft.Object != vliRight.Object ||
vliLeft.LocksOnObject != vliRight.LocksOnObject ||
vliLeft.LocksAcquiredOnObject != vliRight.LocksAcquiredOnObject ||
vliLeft.TotalBlockedLocks != vliRight.TotalBlockedLocks {
return false
}
return true
}
// Test clearing of locks.
func TestLockStateClear(t *testing.T) {
// Helper function to circumvent RPC call to LockClear and call msMutex.ForceUnlock immediately.
f := func(bucket, object string) {
nsMutex.ForceUnlock(bucket, object)
}
testLock := nsMutex.NewNSLock("testbucket", "1.txt")
testLock.Lock()
sysLockState, err := getSystemLockState()
if err != nil {
t.Fatal(err)
}
expectedVli := VolumeLockInfo{
Bucket: "testbucket",
Object: "1.txt",
LocksOnObject: 1,
LocksAcquiredOnObject: 1,
TotalBlockedLocks: 0,
}
// Test initial condition.
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
}
sysLockStateMap := map[string]SystemLockState{}
sysLockStateMap["testnode1"] = sysLockState
// Clear locks that are 10 seconds old (which is a no-op in this case)
clearLockState(f, sysLockStateMap, 10*time.Second, "", false)
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
}
// Clear all locks (older than 0 seconds)
clearLockState(f, sysLockStateMap, 0, "", false)
// Verify that there are no locks
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
if len(sysLockState.LocksInfoPerObject) != 0 {
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
}
// Create another lock
blobLock := nsMutex.NewNSLock("testbucket", "blob.txt")
blobLock.RLock()
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
sysLockStateMap["testnode1"] = sysLockState
// Correct wildcard match but bad age.
clearLockState(f, sysLockStateMap, 10*time.Second, "testbucket/blob", true)
// Ensure lock is still there.
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
expectedVli.Object = "blob.txt"
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
}
// Clear lock based on wildcard match.
clearLockState(f, sysLockStateMap, 0, "testbucket/blob", true)
// Verify that there are no locks
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
if len(sysLockState.LocksInfoPerObject) != 0 {
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
}
// Create yet another lock
exactLock := nsMutex.NewNSLock("testbucket", "exact.txt")
exactLock.RLock()
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
sysLockStateMap["testnode1"] = sysLockState
// Make sure that exact match can fail.
clearLockState(f, sysLockStateMap, 0, "testbucket/exact.txT", false)
// Ensure lock is still there.
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
expectedVli.Object = "exact.txt"
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
}
// Clear lock based on exact match.
clearLockState(f, sysLockStateMap, 0, "testbucket/exact.txt", false)
// Verify that there are no locks
if sysLockState, err = getSystemLockState(); err != nil {
t.Fatal(err)
}
if len(sysLockState.LocksInfoPerObject) != 0 {
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
}
// reset lock states for further tests
initNSLock(false)
}

@ -1,49 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "github.com/minio/cli"
// "minio control" command.
var controlCmd = cli.Command{
Name: "control",
Usage: "Control and manage minio server.",
Flags: globalFlags,
Action: mainControl,
Subcommands: []cli.Command{
lockCmd,
healCmd,
serviceCmd,
},
CustomHelpTemplate: `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} [FLAGS] COMMAND
FLAGS:
{{range .Flags}}{{.}}
{{end}}
COMMANDS:
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}
`,
}
func mainControl(ctx *cli.Context) {
cli.ShowAppHelp(ctx)
}

@ -1,249 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"crypto/rand"
"os"
"path"
"testing"
"github.com/minio/cli"
)
// Test to call healControl() in control-heal-main.go
func TestControlHealMain(t *testing.T) {
// create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// start test server
testServer := StartTestServer(t, "XL")
// schedule cleanup at the end
defer testServer.Stop()
// fetch http server endpoint
url := testServer.Server.URL
// create args to call
args := []string{"./minio", "control", "heal", url}
// run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Heal-Format-Main test failed with - %s", err.Error())
}
obj := newObjectLayerFn()
// Create "bucket"
err = obj.MakeBucket("bucket")
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
data := make([]byte, 1*1024*1024)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {
t.Fatal(err)
}
_, err = obj.PutObject(bucket, object, length, bytes.NewReader(data), nil, "")
if err != nil {
t.Fatal(err)
}
// Remove the object - to simulate the case where the disk was down when the object was created.
err = os.RemoveAll(path.Join(testServer.Disks[0].Path, bucket, object))
if err != nil {
t.Fatal(err)
}
args = []string{"./minio", "control", "heal", url + "/bucket"}
// run app
err = app.Run(args)
if err != nil {
t.Errorf("Control-Heal-Bucket-Main test failed with - %s", err.Error())
}
args = []string{"./minio", "control", "heal", url + "/bucket/object"}
// run app
err = app.Run(args)
if err != nil {
t.Errorf("Control-Heal-Bucket-With-Prefix-Main test failed with - %s", err.Error())
}
}
// Test to call lockControl() in control-lock-main.go
func TestControlLockMain(t *testing.T) {
// Create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// Start test server
testServer := StartTestServer(t, "XL")
// Schedule cleanup at the end
defer testServer.Stop()
// Fetch http server endpoint
url := testServer.Server.URL
// Create args to call
args := []string{"./minio", "control", "lock", "list", url}
// Run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Lock-Main test failed with - %s", err.Error())
}
}
// Test to call serviceControl(stop) in control-service-main.go
func TestControlServiceStopMain(t *testing.T) {
// create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Initialize signal channel specifically for each tests.
globalServiceSignalCh = make(chan serviceSignal, 1)
// start test server
testServer := StartTestServer(t, "XL")
// schedule cleanup at the end
defer testServer.Stop()
// fetch http server endpoint
url := testServer.Server.URL
// create args to call
args := []string{"./minio", "control", "service", "stop", url}
// run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Service-Stop-Main test failed with - %s", err)
}
}
// Test to call serviceControl(status) in control-service-main.go
func TestControlServiceStatusMain(t *testing.T) {
// create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Initialize signal channel specifically for each tests.
globalServiceSignalCh = make(chan serviceSignal, 1)
// start test server
testServer := StartTestServer(t, "XL")
// schedule cleanup at the end
defer testServer.Stop()
// fetch http server endpoint
url := testServer.Server.URL
// Create args to call
args := []string{"./minio", "control", "service", "status", url}
// run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Service-Status-Main test failed with - %s", err)
}
// Create args to call
args = []string{"./minio", "control", "service", "stop", url}
// run app
err = app.Run(args)
if err != nil {
t.Errorf("Control-Service-Stop-Main test failed with - %s", err)
}
}
// Test to call serviceControl(restart) in control-service-main.go
func TestControlServiceRestartMain(t *testing.T) {
// create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Initialize signal channel specifically for each tests.
globalServiceSignalCh = make(chan serviceSignal, 1)
// start test server
testServer := StartTestServer(t, "XL")
// schedule cleanup at the end
defer testServer.Stop()
// fetch http server endpoint
url := testServer.Server.URL
// Create args to call
args := []string{"./minio", "control", "service", "restart", url}
// run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Service-Restart-Main test failed with - %s", err)
}
// Initialize done channel specifically for each tests.
globalServiceDoneCh = make(chan struct{}, 1)
// Initialize signal channel specifically for each tests.
globalServiceSignalCh = make(chan serviceSignal, 1)
// Create args to call
args = []string{"./minio", "control", "service", "stop", url}
// run app
err = app.Run(args)
if err != nil {
t.Errorf("Control-Service-Stop-Main test failed with - %s", err)
}
}
// NOTE: This test practically always passes, but its the only way to
// execute mainControl in a test situation
func TestControlMain(t *testing.T) {
// create cli app for testing
app := cli.NewApp()
app.Commands = []cli.Command{controlCmd}
// create args to call
args := []string{"./minio", "control"}
// run app
err := app.Run(args)
if err != nil {
t.Errorf("Control-Main test failed with - %s", err)
}
}

@ -1,90 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/rpc"
"path"
router "github.com/gorilla/mux"
)
// Routes paths for "minio control" commands.
const (
controlPath = "/control"
)
// Initializes remote control clients for making remote requests.
func initRemoteControlClients(srvCmdConfig serverCmdConfig) []*AuthRPCClient {
if !globalIsDistXL {
return nil
}
// Initialize auth rpc clients.
var remoteControlClnts []*AuthRPCClient
localMap := make(map[string]int)
for _, ep := range srvCmdConfig.endpoints {
// Validates if remote disk is local.
if isLocalStorage(ep) {
continue
}
if localMap[ep.Host] == 1 {
continue
}
localMap[ep.Host]++
remoteControlClnts = append(remoteControlClnts, newAuthClient(&authConfig{
accessKey: serverConfig.GetCredential().AccessKeyID,
secretKey: serverConfig.GetCredential().SecretAccessKey,
secureConn: isSSL(),
address: ep.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Control.LoginHandler",
}))
}
return remoteControlClnts
}
// Represents control object which provides handlers for control
// operations on server.
type controlAPIHandlers struct {
ObjectAPI func() ObjectLayer
IsXL bool
RemoteControls []*AuthRPCClient
LocalNode string
StorageDisks []StorageAPI
}
// Register control RPC handlers.
func registerControlRPCRouter(mux *router.Router, srvCmdConfig serverCmdConfig) (err error) {
// Initialize Control.
ctrlHandlers := &controlAPIHandlers{
ObjectAPI: newObjectLayerFn,
IsXL: globalIsDistXL || len(srvCmdConfig.storageDisks) > 1,
RemoteControls: initRemoteControlClients(srvCmdConfig),
LocalNode: getLocalAddress(srvCmdConfig),
StorageDisks: srvCmdConfig.storageDisks,
}
ctrlRPCServer := rpc.NewServer()
err = ctrlRPCServer.RegisterName("Control", ctrlHandlers)
if err != nil {
return traceError(err)
}
ctrlRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
ctrlRouter.Path(controlPath).Handler(ctrlRPCServer)
return nil
}

@ -1,94 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/url"
"testing"
)
// Tests initialization of remote controller clients.
func TestInitRemoteControlClients(t *testing.T) {
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatal("Unable to initialize config", err)
}
defer removeAll(rootPath)
testCases := []struct {
isDistXL bool
srvCmdConfig serverCmdConfig
totalClients int
}{
// Test - 1 no allocation if server config is not distributed XL.
{
isDistXL: false,
srvCmdConfig: serverCmdConfig{},
totalClients: 0,
},
// Test - 2 two clients allocated with 4 disks with 2 disks on same node each.
{
isDistXL: true,
srvCmdConfig: serverCmdConfig{
endpoints: []*url.URL{{
Scheme: "http",
Host: "10.1.10.1:9000",
Path: "/mnt/disk1",
}, {
Scheme: "http",
Host: "10.1.10.1:9000", Path: "/mnt/disk2",
}, {
Scheme: "http",
Host: "10.1.10.2:9000", Path: "/mnt/disk1",
}, {
Scheme: "http",
Host: "10.1.10.2:9000", Path: "/mnt/disk2"},
},
},
totalClients: 2,
},
// Test - 3 4 clients allocated with 4 disks with 1 disk on each node.
{
isDistXL: true,
srvCmdConfig: serverCmdConfig{
endpoints: []*url.URL{{
Scheme: "http",
Host: "10.1.10.1:9000", Path: "/mnt/disk1",
}, {
Scheme: "http",
Host: "10.1.10.2:9000", Path: "/mnt/disk2",
}, {
Scheme: "http",
Host: "10.1.10.3:9000", Path: "/mnt/disk1",
}, {
Scheme: "http",
Host: "10.1.10.4:9000", Path: "/mnt/disk2"},
},
},
totalClients: 4,
},
}
// Evaluate and validate all test cases.
for i, testCase := range testCases {
globalIsDistXL = testCase.isDistXL
rclients := initRemoteControlClients(testCase.srvCmdConfig)
if len(rclients) != testCase.totalClients {
t.Errorf("Test %d, Expected %d, got %d RPC clients.", i+1, testCase.totalClients, len(rclients))
}
}
}

@ -1,106 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/url"
"path"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var serviceCmd = cli.Command{
Name: "service",
Usage: "Service command line to manage Minio server.",
Action: serviceControl,
Flags: globalFlags,
CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}}
USAGE:
minio control {{.Name}} [status|restart|stop] http[s]://[access_key[:secret_key]@]server_ip:port/
FLAGS:
{{range .Flags}}{{.}}
{{end}}
EXAMPLES:
1. Prints current status information of the cluster.
$ minio control service status http://10.1.10.92:9000/
2. Restarts the url and all the servers in the cluster.
$ minio control service restart http://localhost:9000/
3. Shuts down the url and all the servers in the cluster.
$ minio control service stop http://localhost:9000/
`,
}
// "minio control service" entry point.
func serviceControl(c *cli.Context) {
if !c.Args().Present() && len(c.Args()) != 2 {
cli.ShowCommandHelpAndExit(c, "service", 1)
}
var signal serviceSignal
switch c.Args().Get(0) {
case "status":
signal = serviceStatus
case "restart":
signal = serviceRestart
case "stop":
signal = serviceStop
default:
fatalIf(errInvalidArgument, "Unrecognized service %s", c.Args().Get(0))
}
parsedURL, err := url.Parse(c.Args().Get(1))
fatalIf(err, "Unable to parse URL %s", c.Args().Get(1))
accessKey := serverConfig.GetCredential().AccessKeyID
secretKey := serverConfig.GetCredential().SecretAccessKey
// Username and password specified in URL will override prior configuration
if parsedURL.User != nil {
accessKey = parsedURL.User.Username()
if key, set := parsedURL.User.Password(); set {
secretKey = key
}
}
authCfg := &authConfig{
accessKey: accessKey,
secretKey: secretKey,
secureConn: parsedURL.Scheme == "https",
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Control.LoginHandler",
}
client := newAuthClient(authCfg)
args := &ServiceArgs{
Signal: signal,
}
// This is necessary so that the remotes,
// don't end up sending requests back and forth.
args.Remote = true
reply := &ServiceReply{}
err = client.Call("Control.ServiceHandler", args, reply)
fatalIf(err, "Service command %s failed for %s", c.Args().Get(0), parsedURL.Host)
if signal == serviceStatus {
console.Println(getStorageInfoMsg(reply.StorageInfo))
}
}

@ -1,388 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"path"
"strconv"
"strings"
"sync"
"testing"
"time"
)
// API suite container common to both FS and XL.
type TestRPCControlSuite struct {
serverType string
testServer TestServer
testAuthConf *authConfig
}
// Setting up the test suite.
// Starting the Test server with temporary FS backend.
func (s *TestRPCControlSuite) SetUpSuite(c *testing.T) {
s.testServer = StartTestControlRPCServer(c, s.serverType)
s.testAuthConf = &authConfig{
address: s.testServer.Server.Listener.Addr().String(),
accessKey: s.testServer.AccessKey,
secretKey: s.testServer.SecretKey,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Control.LoginHandler",
}
}
// No longer used with gocheck, but used in explicit teardown code in
// each test function. // Called implicitly by "gopkg.in/check.v1"
// after all tests are run.
func (s *TestRPCControlSuite) TearDownSuite(c *testing.T) {
s.testServer.Stop()
}
func TestRPCControlLock(t *testing.T) {
//setup code
s := &TestRPCControlSuite{serverType: "XL"}
s.SetUpSuite(t)
//run test
s.testRPCControlLock(t)
//teardown code
s.TearDownSuite(t)
}
// Tests to validate the correctness of lock instrumentation control RPC end point.
func (s *TestRPCControlSuite) testRPCControlLock(c *testing.T) {
expectedResult := []lockStateCase{
// Test case - 1.
// Case where 10 read locks are held.
// Entry for any of the 10 reads locks has to be found.
// Since they held in a loop, Lock origin for first 10 read locks (opsID 0-9) should be the same.
{
volume: "my-bucket",
path: "my-object",
opsID: "0",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 10,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 10,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 0,
},
// Test case 2.
// Testing the existence of entry for the last read lock (read lock with opsID "9").
{
volume: "my-bucket",
path: "my-object",
opsID: "9",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 10,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 10,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 0,
},
// Test case 3.
// Hold a write lock, and it should block since 10 read locks
// on <"my-bucket", "my-object"> are still held.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 11,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 1,
expectedVolPathLockCount: 11,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 1,
},
// Test case 4.
// Expected result when all the read locks are released and the blocked write lock acquires the lock.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 1,
expectedRunningLockCount: 1,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 1,
expectedVolPathBlockCount: 0,
},
// Test case - 5.
// At the end after locks are released, its verified whether the counters are set to 0.
{
volume: "my-bucket",
path: "my-object",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 0,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 0,
},
}
// used to make sure that the tests don't end till locks held in other go routines are released.
var wg sync.WaitGroup
// Hold 5 read locks. We should find the info about these in the RPC response.
// hold 10 read locks.
// Then call the RPC control end point for obtaining lock instrumentation info.
for i := 0; i < 10; i++ {
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
}
client := newAuthClient(s.testAuthConf)
defer client.Close()
args := &GenericArgs{}
reply := make(map[string]*SystemLockState)
// Call the lock instrumentation RPC end point.
err := client.Call("Control.LockInfo", args, &reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
// expected lock info.
expectedLockStats := expectedResult[0]
// verify the actual lock info with the expected one.
// verify the existence entry for first read lock (read lock with opsID "0").
verifyRPCLockInfoResponse(expectedLockStats, reply, c, 1)
expectedLockStats = expectedResult[1]
// verify the actual lock info with the expected one.
// verify the existence entry for last read lock (read lock with opsID "9").
verifyRPCLockInfoResponse(expectedLockStats, reply, c, 2)
// now hold a write lock in a different go routine and it should block since 10 read locks are
// still held.
wg.Add(1)
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(10))
// Once the above attempt to lock is unblocked/acquired, we verify the stats and release the lock.
expectedWLockStats := expectedResult[3]
// Since the write lock acquired here, the number of blocked locks should reduce by 1 and
// count of running locks should increase by 1.
// Call the RPC control handle to fetch the lock instrumentation info.
reply = make(map[string]*SystemLockState)
// Call the lock instrumentation RPC end point.
err = client.Call("Control.LockInfo", args, &reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
verifyRPCLockInfoResponse(expectedWLockStats, reply, c, 4)
// release the write lock.
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10))
}()
// waiting for a second so that the attempt to acquire the write lock in
// the above go routines gets blocked.
time.Sleep(1 * time.Second)
// The write lock should have got blocked by now,
// check whether the entry for one blocked lock exists.
expectedLockStats = expectedResult[2]
// Call the RPC control handle to fetch the lock instrumentation info.
reply = make(map[string]*SystemLockState)
// Call the lock instrumentation RPC end point.
err = client.Call("Control.LockInfo", args, &reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
verifyRPCLockInfoResponse(expectedLockStats, reply, c, 3)
// Release all the read locks held.
// the blocked write lock in the above go routines should get unblocked.
for i := 0; i < 10; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i))
}
wg.Wait()
// Since all the locks are released. There should not be any entry in the lock info.
// and all the counters should be set to 0.
reply = make(map[string]*SystemLockState)
// Call the lock instrumentation RPC end point.
err = client.Call("Control.LockInfo", args, &reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
for _, rpcLockInfo := range reply {
if rpcLockInfo.TotalAcquiredLocks != 0 && rpcLockInfo.TotalLocks != 0 && rpcLockInfo.TotalBlockedLocks != 0 {
c.Fatalf("The counters are not reset properly after all locks are released")
}
if len(rpcLockInfo.LocksInfoPerObject) != 0 {
c.Fatalf("Since all locks are released there shouldn't have been any lock info entry, but found %d", len(rpcLockInfo.LocksInfoPerObject))
}
}
}
func TestControlHealDiskMetadataH(t *testing.T) {
// Setup code
s := &TestRPCControlSuite{serverType: "XL"}
s.SetUpSuite(t)
// Run test
s.testControlHealFormatH(t)
// Teardown code
s.TearDownSuite(t)
}
// TestControlHandlerHealFormat - Registers and call the `HealFormatHandler`, asserts to validate the success.
func (s *TestRPCControlSuite) testControlHealFormatH(c *testing.T) {
// The suite has already started the test RPC server, just send RPC calls.
client := newAuthClient(s.testAuthConf)
defer client.Close()
args := &GenericArgs{}
reply := &GenericReply{}
err := client.Call("Control.HealFormatHandler", args, reply)
if err != nil {
c.Errorf("Test failed with <ERROR> %s", err)
}
}
func TestControlHealObjectH(t *testing.T) {
// Setup code
s := &TestRPCControlSuite{serverType: "XL"}
s.SetUpSuite(t)
// Run test
s.testControlHealObjectsH(t)
// Teardown code
s.TearDownSuite(t)
}
func (s *TestRPCControlSuite) testControlHealObjectsH(t *testing.T) {
client := newAuthClient(s.testAuthConf)
defer client.Close()
objAPI := newObjectLayerFn()
err := objAPI.MakeBucket("testbucket")
if err != nil {
t.Fatalf("Create bucket failed with <ERROR> %s", err)
}
datum := strings.NewReader("a")
_, err = objAPI.PutObject("testbucket", "testobject1", 1, datum, nil, "")
if err != nil {
t.Fatalf("Put object failed with <ERROR> %s", err)
}
datum = strings.NewReader("a")
_, err = objAPI.PutObject("testbucket", "testobject2", 1, datum, nil, "")
if err != nil {
t.Fatalf("Put object failed with <ERROR> %s", err)
}
args := &HealObjectArgs{
Bucket: "testbucket",
Objects: []ObjectInfo{
{
Name: "testobject1",
}, {
Name: "testobject2",
},
},
}
reply := &HealObjectReply{}
err = client.Call("Control.HealObjectsHandler", args, reply)
if err != nil {
t.Errorf("Test failed with <ERROR> %s", err)
}
}
func TestControlListObjectsHealH(t *testing.T) {
// Setup code
s := &TestRPCControlSuite{serverType: "XL"}
s.SetUpSuite(t)
// Run test
s.testControlListObjectsHealH(t)
// Teardown code
s.TearDownSuite(t)
}
func (s *TestRPCControlSuite) testControlListObjectsHealH(t *testing.T) {
client := newAuthClient(s.testAuthConf)
defer client.Close()
objAPI := newObjectLayerFn()
// Create a bucket
err := objAPI.MakeBucket("testbucket")
if err != nil {
t.Fatalf("Create bucket failed - %s", err)
}
r := strings.NewReader("0")
_, err = objAPI.PutObject("testbucket", "testObj-0", 1, r, nil, "")
if err != nil {
t.Fatalf("Object creation failed - %s", err)
}
args := &HealListArgs{
GenericArgs{}, "testbucket", "testObj-",
"", "", 100,
}
reply := &GenericReply{}
err = client.Call("Control.ListObjectsHealHandler", args, reply)
if err != nil {
t.Errorf("Test failed - %s", err)
}
}

@ -16,10 +16,7 @@
package cmd
import (
"sync"
"time"
)
import "time"
// SystemLockState - Structure to fill the lock state of entire object storage.
// That is the total locks held, total calls blocked on locks and state of all the locks for the entire system.
@ -94,93 +91,3 @@ func getSystemLockState() (SystemLockState, error) {
}
return lockState, nil
}
// Remote procedure call, calls LockInfo handler with given input args.
func (c *controlAPIHandlers) remoteLockInfoCall(args *GenericArgs, replies []SystemLockState) error {
var wg sync.WaitGroup
var errs = make([]error, len(c.RemoteControls))
// Send remote call to all neighboring peers fetch control lock info.
for index, clnt := range c.RemoteControls {
wg.Add(1)
go func(index int, client *AuthRPCClient) {
defer wg.Done()
errs[index] = client.Call("Control.RemoteLockInfo", args, &replies[index])
errorIf(errs[index], "Unable to initiate control lockInfo request to remote node %s", client.Node())
}(index, clnt)
}
wg.Wait()
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// RemoteLockInfo - RPC control handler for `minio control lock`, used internally by LockInfo to
// make calls to neighboring peers.
func (c *controlAPIHandlers) RemoteLockInfo(args *GenericArgs, reply *SystemLockState) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
// Obtain the lock state information of the local system.
lockState, err := getSystemLockState()
// In case of error, return err to the RPC client.
if err != nil {
return err
}
*reply = lockState
return nil
}
// LockInfo - RPC control handler for `minio control lock list`. Returns the info of the locks held in the cluster.
func (c *controlAPIHandlers) LockInfo(args *GenericArgs, reply *map[string]SystemLockState) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
var replies = make([]SystemLockState, len(c.RemoteControls))
if args.Remote {
// Fetch lock states from all the remote peers.
args.Remote = false
if err := c.remoteLockInfoCall(args, replies); err != nil {
return err
}
}
rep := make(map[string]SystemLockState)
// The response containing the lock info.
for index, client := range c.RemoteControls {
rep[client.Node()] = replies[index]
}
// Obtain the lock state information of the local system.
lockState, err := getSystemLockState()
// In case of error, return err to the RPC client.
if err != nil {
return err
}
// Save the local node lock state.
rep[c.LocalNode] = lockState
// Set the reply.
*reply = rep
// Success.
return nil
}
// LockClearArgs - arguments for LockClear handler
type LockClearArgs struct {
GenericArgs
Bucket string
Object string
}
// LockClear - RPC control handler for `minio control lock clear`.
func (c *controlAPIHandlers) LockClear(args *LockClearArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
nsMutex.ForceUnlock(args.Bucket, args.Object)
*reply = GenericReply{}
return nil
}

@ -114,7 +114,6 @@ func registerApp() *cli.App {
registerCommand(serverCmd)
registerCommand(versionCmd)
registerCommand(updateCmd)
registerCommand(controlCmd)
// Set up app.
app := cli.NewApp()

@ -101,12 +101,6 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) (http.Handler, error)
return nil, err
}
// Register controller rpc router.
err = registerControlRPCRouter(mux, srvCmdConfig)
if err != nil {
return nil, err
}
// Register RPC router for web related calls.
if err = registerBrowserPeerRPCRouter(mux); err != nil {
return nil, err

@ -420,62 +420,6 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer {
return testRPCServer
}
// Initializes control RPC endpoints.
// The object Layer will be a temp back used for testing purpose.
func initTestControlRPCEndPoint(srvCmdConfig serverCmdConfig) http.Handler {
// Initialize router.
muxRouter := router.NewRouter()
registerControlRPCRouter(muxRouter, srvCmdConfig)
return muxRouter
}
// StartTestControlRPCServer - Creates a temp XL/FS backend and initializes control RPC end points,
// then starts a test server with those control RPC end points registered.
func StartTestControlRPCServer(t TestErrHandler, instanceType string) TestServer {
// create temporary backend for the test server.
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal("Failed to create disks for the backend")
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatalf("%s", err)
}
root, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("%s", err)
}
// create an instance of TestServer.
testRPCServer := TestServer{}
// Get credential.
credentials := serverConfig.GetCredential()
testRPCServer.Root = root
testRPCServer.Disks = endpoints
testRPCServer.AccessKey = credentials.AccessKeyID
testRPCServer.SecretKey = credentials.SecretAccessKey
// create temporary backend for the test server.
objLayer, storageDisks, err := initObjectLayer(endpoints)
if err != nil {
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
// Run TestServer.
testRPCServer.Server = httptest.NewServer(initTestControlRPCEndPoint(serverCmdConfig{
storageDisks: storageDisks,
}))
return testRPCServer
}
// Configure the server for the test run.
func newTestConfig(bucketLocation string) (rootPath string, err error) {
// Get test root.

@ -41,3 +41,12 @@ var errDataTooLarge = errors.New("Object size larger than allowed limit")
// When upload object size is less than what was expected.
var errDataTooSmall = errors.New("Object size smaller than expected")
// errServerNotInitialized - server not initialized.
var errServerNotInitialized = errors.New("Server not initialized, please try again.")
// errServerVersionMismatch - server versions do not match.
var errServerVersionMismatch = errors.New("Server versions do not match.")
// errServerTimeMismatch - server times are too far apart.
var errServerTimeMismatch = errors.New("Server times are too far apart.")

Loading…
Cancel
Save