Introduce staticcheck for stricter builds (#7035)

master
Harshavardhana 6 years ago committed by Nitish Tiwari
parent 4ba77a916d
commit df35d7db9d
  1. 32
      Makefile
  2. 11
      cmd/admin-handlers.go
  3. 99
      cmd/admin-handlers_test.go
  4. 7
      cmd/benchmark-utils_test.go
  5. 2
      cmd/bitrot-streaming.go
  6. 6
      cmd/bucket-handlers_test.go
  7. 9
      cmd/config-versions.go
  8. 23
      cmd/config.go
  9. 167
      cmd/dummy-object-layer_test.go
  10. 2
      cmd/encryption-v1.go
  11. 2
      cmd/endpoint.go
  12. 2
      cmd/erasure-decode_test.go
  13. 52
      cmd/format-fs.go
  14. 2
      cmd/format-xl_test.go
  15. 2
      cmd/fs-v1-helpers.go
  16. 3
      cmd/fs-v1.go
  17. 3
      cmd/gateway/azure/gateway-azure.go
  18. 1
      cmd/gateway/gcs/gateway-gcs.go
  19. 3
      cmd/gateway/oss/gateway-oss_test.go
  20. 2
      cmd/handler-utils.go
  21. 2
      cmd/healthcheck-handler_test.go
  22. 8
      cmd/http/close.go
  23. 25
      cmd/iam.go
  24. 14
      cmd/local-locker.go
  25. 10
      cmd/lock-rpc-server-common_test.go
  26. 13
      cmd/lock-rpc-server_test.go
  27. 55
      cmd/lock-stat.go
  28. 11
      cmd/logger/logonce.go
  29. 3
      cmd/logger/target/http/http.go
  30. 11
      cmd/namespace-lock.go
  31. 1
      cmd/namespace-lock_test.go
  32. 26
      cmd/notification.go
  33. 5
      cmd/object-handlers-common.go
  34. 8
      cmd/object-handlers.go
  35. 6
      cmd/object-handlers_test.go
  36. 2
      cmd/object_api_suite_test.go
  37. 25
      cmd/policy.go
  38. 2
      cmd/posix.go
  39. 6
      cmd/posix_test.go
  40. 7
      cmd/rpc/pool.go
  41. 5
      cmd/rpc/server_test.go
  42. 6
      cmd/rpc_test.go
  43. 3
      cmd/signature-v4-parser.go
  44. 72
      cmd/test-utils_test.go
  45. 15
      cmd/utils.go
  46. 7
      cmd/web-handlers.go
  47. 22
      cmd/web-handlers_test.go
  48. 25
      cmd/xl-sets.go
  49. 11
      cmd/xl-v1-bucket.go
  50. 4
      cmd/xl-v1-healing-common_test.go
  51. 22
      cmd/xl-v1.go
  52. 2
      pkg/certs/certs_test.go
  53. 12
      pkg/disk/disk_test.go
  54. 6
      pkg/ellipses/ellipses.go
  55. 2
      pkg/event/config.go
  56. 18
      pkg/event/config_test.go
  57. 4
      pkg/handlers/http-tracer_test.go
  58. 3
      pkg/hash/reader_test.go
  59. 79
      pkg/madmin/api.go
  60. 3
      pkg/mimedb/db_test.go
  61. 11
      pkg/quick/errorutil.go
  62. 1
      pkg/s3select/message.go
  63. 4
      pkg/s3select/select.go
  64. 4
      pkg/s3select/sql/aggregation.go
  65. 16
      pkg/s3select/sql/funceval.go
  66. 2
      pkg/s3select/sql/parser.go
  67. 1
      pkg/s3select/sql/timestampfuncs.go
  68. 10
      pkg/s3select/sql/value.go
  69. 9
      pkg/trie/trie_test.go
  70. 1
      pkg/words/damerau-levenshtein_test.go
  71. 1
      staticcheck.conf

@ -14,43 +14,32 @@ checks:
getdeps: getdeps:
@echo "Installing golint" && go get -u golang.org/x/lint/golint @echo "Installing golint" && go get -u golang.org/x/lint/golint
@echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo @echo "Installing staticcheck" && go get -u honnef.co/go/tools/...
@echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode
@echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell @echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell
@echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign
crosscompile: crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh) @(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps vet fmt lint cyclo deadcode spelling verifiers: getdeps vet fmt lint staticcheck spelling
vet: vet:
@echo "Running $@" @echo "Running $@"
@go tool vet cmd @go vet github.com/minio/minio/...
@go tool vet pkg
fmt: fmt:
@echo "Running $@" @echo "Running $@"
@gofmt -d cmd @gofmt -d cmd/
@gofmt -d pkg @gofmt -d pkg/
lint: lint:
@echo "Running $@" @echo "Running $@"
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd... @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
@${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg... @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
ineffassign: staticcheck:
@echo "Running $@" @echo "Running $@"
@${GOPATH}/bin/ineffassign . @${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
@${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
cyclo:
@echo "Running $@"
@${GOPATH}/bin/gocyclo -over 200 cmd
@${GOPATH}/bin/gocyclo -over 200 pkg
deadcode:
@echo "Running $@"
@${GOPATH}/bin/deadcode -test $(shell go list ./...) || true
spelling: spelling:
@${GOPATH}/bin/misspell -locale US -error `find cmd/` @${GOPATH}/bin/misspell -locale US -error `find cmd/`
@ -105,6 +94,7 @@ install: build
clean: clean:
@echo "Cleaning up all the generated files" @echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv @find . -name '*.test' | xargs rm -fv
@find . -name '*~' | xargs rm -fv
@rm -rvf minio @rm -rvf minio
@rm -rvf build @rm -rvf build
@rm -rvf release @rm -rvf release

@ -381,7 +381,6 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
} else { } else {
writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
} }
return
} }
func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry { func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
@ -437,12 +436,20 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
return return
} }
// Method only allowed in XL mode. // Method only allowed in Distributed XL mode.
if !globalIsDistXL { if !globalIsDistXL {
writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return return
} }
// Authenticate request
// Setting the region as empty so as the mc server info command is irrespective to the region.
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return
}
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints)) thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
if err != nil { if err != nil {
writeErrorResponseJSON(w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(w, toAdminAPIErr(ctx, err), r.URL)

@ -29,7 +29,6 @@ import (
"strings" "strings"
"sync" "sync"
"testing" "testing"
"time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
@ -231,10 +230,9 @@ var (
// adminXLTestBed - encapsulates subsystems that need to be setup for // adminXLTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests. // admin-handler unit tests.
type adminXLTestBed struct { type adminXLTestBed struct {
configPath string xlDirs []string
xlDirs []string objLayer ObjectLayer
objLayer ObjectLayer router *mux.Router
router *mux.Router
} }
// prepareAdminXLTestBed - helper function that setups a single-node // prepareAdminXLTestBed - helper function that setups a single-node
@ -773,7 +771,7 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req) adminTestBed.router.ServeHTTP(rec, req)
respBody := string(rec.Body.Bytes()) respBody := rec.Body.String()
if rec.Code != http.StatusBadRequest || if rec.Code != http.StatusBadRequest ||
!strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") { !strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") {
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody) t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
@ -792,7 +790,7 @@ func TestSetConfigHandler(t *testing.T) {
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req) adminTestBed.router.ServeHTTP(rec, req)
respBody := string(rec.Body.Bytes()) respBody := rec.Body.String()
if rec.Code != http.StatusBadRequest || if rec.Code != http.StatusBadRequest ||
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") { !strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody) t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
@ -879,90 +877,3 @@ func TestToAdminAPIErrCode(t *testing.T) {
} }
} }
} }
func mkHealStartReq(t *testing.T, bucket, prefix string,
opts madmin.HealOpts) *http.Request {
body, err := json.Marshal(opts)
if err != nil {
t.Fatalf("Unable marshal heal opts")
}
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
if bucket != "" && prefix != "" {
path += "/" + prefix
}
req, err := newTestRequest("POST", path,
int64(len(body)), bytes.NewReader(body))
if err != nil {
t.Fatalf("Failed to construct request - %v", err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign request - %v", err)
}
return req
}
func mkHealStatusReq(t *testing.T, bucket, prefix,
clientToken string) *http.Request {
path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket)
if bucket != "" && prefix != "" {
path += "/" + prefix
}
path += fmt.Sprintf("?clientToken=%s", clientToken)
req, err := newTestRequest("POST", path, 0, nil)
if err != nil {
t.Fatalf("Failed to construct request - %v", err)
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
t.Fatalf("Failed to sign request - %v", err)
}
return req
}
func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket,
prefix, clientToken string, timeLimitSecs int) madmin.HealTaskStatus {
var res, cur madmin.HealTaskStatus
// loop and fetch heal status. have a time-limit to loop over
// all statuses.
timeLimit := UTCNow().Add(time.Second * time.Duration(timeLimitSecs))
for cur.Summary != healStoppedStatus && cur.Summary != healFinishedStatus {
if UTCNow().After(timeLimit) {
t.Fatalf("heal-status loop took too long - clientToken: %s", clientToken)
}
req := mkHealStatusReq(t, bucket, prefix, clientToken)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if http.StatusOK != rec.Code {
t.Errorf("Unexpected status code - got %d but expected %d",
rec.Code, http.StatusOK)
break
}
err := json.NewDecoder(rec.Body).Decode(&cur)
if err != nil {
t.Errorf("unable to unmarshal resp: %v", err)
break
}
// all results are accumulated into a slice
// and returned to caller in the end
allItems := append(res.Items, cur.Items...)
res = cur
res.Items = allItems
time.Sleep(time.Millisecond * 200)
}
return res
}

@ -100,7 +100,6 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
b.Fatal(err) b.Fatal(err)
} }
md5hex := getMD5Hash(textData)
sha256hex := "" sha256hex := ""
var textPartData []byte var textPartData []byte
@ -117,7 +116,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} else { } else {
textPartData = textData[j*partSize:] textPartData = textData[j*partSize:]
} }
md5hex = getMD5Hash([]byte(textPartData)) md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j, partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{}) mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
@ -230,10 +229,8 @@ func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// seeding the random number generator. // seeding the random number generator.
rand.Seed(UTCNow().UnixNano()) rand.Seed(UTCNow().UnixNano())
var b byte
// pick a character randomly. // pick a character randomly.
b = letterBytes[rand.Intn(len(letterBytes))] return []byte{letterBytes[rand.Intn(len(letterBytes))]}
return []byte{b}
} }
// picks a random byte and repeats it to size bytes. // picks a random byte and repeats it to size bytes.

@ -146,7 +146,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
} }
b.h.Write(buf) b.h.Write(buf)
if bytes.Compare(b.h.Sum(nil), b.hashBytes) != 0 { if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))} err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))}
logger.LogIf(context.Background(), err) logger.LogIf(context.Background(), err)
return 0, err return 0, err

@ -95,12 +95,12 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
} }
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass { if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes())) t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
} }
errorResponse := APIErrorResponse{} errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse) err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass { if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes())) t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
} }
if errorResponse.Resource != testCase.errorResponse.Resource { if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource) t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
@ -131,7 +131,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
errorResponse = APIErrorResponse{} errorResponse = APIErrorResponse{}
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse) err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass { if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(recV2.Body.Bytes())) t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
} }
if errorResponse.Resource != testCase.errorResponse.Resource { if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource) t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)

@ -263,9 +263,6 @@ type serverConfigV7 struct {
// Notification queue configuration. // Notification queue configuration.
Notify notifierV1 `json:"notify"` Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
} }
// serverConfigV8 server configuration version '8'. Adds NATS notifier // serverConfigV8 server configuration version '8'. Adds NATS notifier
@ -282,9 +279,6 @@ type serverConfigV8 struct {
// Notification queue configuration. // Notification queue configuration.
Notify notifierV1 `json:"notify"` Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
} }
// serverConfigV9 server configuration version '9'. Adds PostgreSQL // serverConfigV9 server configuration version '9'. Adds PostgreSQL
@ -301,9 +295,6 @@ type serverConfigV9 struct {
// Notification queue configuration. // Notification queue configuration.
Notify notifierV1 `json:"notify"` Notify notifierV1 `json:"notify"`
// Read Write mutex.
rwMutex *sync.RWMutex
} }
type loggerV7 struct { type loggerV7 struct {

@ -127,23 +127,18 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
// of the object layer. // of the object layer.
// - Write quorum not met when upgrading configuration // - Write quorum not met when upgrading configuration
// version is needed. // version is needed.
retryTimerCh := newRetryTimerSimple(doneCh) for range newRetryTimerSimple(doneCh) {
for { if err := initConfig(objAPI); err != nil {
select { if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
case _ = <-retryTimerCh: strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
err := initConfig(objAPI) logger.Info("Waiting for configuration to be initialized..")
if err != nil { continue
if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for configuration to be initialized..")
continue
}
return err
} }
return err
return nil
} }
break
} }
return nil
} }
// NewConfigSys - creates new config system object. // NewConfigSys - creates new config system object.

@ -1,167 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"io"
"net/http"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/policy"
)
type DummyObjectLayer struct{}
func (api *DummyObjectLayer) Shutdown(context.Context) (err error) {
return
}
func (api *DummyObjectLayer) StorageInfo(context.Context) (si StorageInfo) {
return
}
func (api *DummyObjectLayer) MakeBucketWithLocation(ctx context.Context, bucket string, location string) (err error) {
return
}
func (api *DummyObjectLayer) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) DeleteBucket(ctx context.Context, bucket string) (err error) {
return
}
func (api *DummyObjectLayer) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return
}
func (api *DummyObjectLayer) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lock LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
return
}
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
return
}
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) DeleteObject(ctx context.Context, bucket, object string) (err error) {
return
}
func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
return
}
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
return
}
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) {
return
}
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
return
}
func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
return
}
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return
}
func (api *DummyObjectLayer) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
return
}
func (api *DummyObjectLayer) HealFormat(ctx context.Context, dryRun bool) (item madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (items madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) HealObject(ctx context.Context, bucket, object string, dryRun, remove bool) (item madmin.HealResultItem, err error) {
return
}
func (api *DummyObjectLayer) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
return
}
func (api *DummyObjectLayer) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (info ListObjectsInfo, err error) {
return
}
func (api *DummyObjectLayer) SetBucketPolicy(context.Context, string, *policy.Policy) (err error) {
return
}
func (api *DummyObjectLayer) GetBucketPolicy(context.Context, string) (bucketPolicy *policy.Policy, err error) {
return
}
func (api *DummyObjectLayer) RefreshBucketPolicy(context.Context, string) (err error) {
return
}
func (api *DummyObjectLayer) DeleteBucketPolicy(context.Context, string) (err error) {
return
}
func (api *DummyObjectLayer) IsNotificationSupported() (b bool) {
return
}
func (api *DummyObjectLayer) IsListenBucketSupported() (b bool) {
return
}
func (api *DummyObjectLayer) IsEncryptionSupported() (b bool) {
return
}
func (api *DummyObjectLayer) IsCompressionSupported() (b bool) {
return
}

@ -742,7 +742,7 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
return writer, encStartOffset, encLength, nil return writer, encStartOffset, encLength, nil
} }
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo) _, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
var partStartIndex int var partStartIndex int
var partStartOffset = startOffset var partStartOffset = startOffset

@ -94,7 +94,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
// - Scheme field must contain "http" or "https" // - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path. // - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") && if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") { u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
return ep, fmt.Errorf("invalid URL endpoint format") return ep, fmt.Errorf("invalid URL endpoint format")
} }

@ -28,7 +28,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
) )
func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { func (a badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
return 0, errFaultyDisk return 0, errFaultyDisk
} }

@ -341,35 +341,35 @@ func formatFSFixDeploymentID(fsFormatPath string) error {
doneCh := make(chan struct{}) doneCh := make(chan struct{})
defer close(doneCh) defer close(doneCh)
retryTimerCh := newRetryTimerSimple(doneCh) var wlk *lock.LockedFile
for { for range newRetryTimerSimple(doneCh) {
select { wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0)
case <-retryTimerCh: if err == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again
wlk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0) logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime())
if err == lock.ErrAlreadyLocked { continue
// Lock already present, sleep and attempt again }
if err != nil {
break
}
logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime()) if err = jsonLoad(wlk, format); err != nil {
continue break
} }
if err != nil {
return err
}
defer wlk.Close()
err = jsonLoad(wlk, format) // Check if format needs to be updated
if err != nil { if format.ID != "" {
return err err = nil
} break
}
// Check if it needs to be updated format.ID = mustGetUUID()
if format.ID != "" { if err = jsonSave(wlk, format); err != nil {
return nil break
}
format.ID = mustGetUUID()
return jsonSave(wlk, format)
} }
} }
if wlk != nil {
wlk.Close()
}
return err
} }

@ -522,7 +522,7 @@ func TestGetXLID(t *testing.T) {
} }
formats[2].ID = "bad-id" formats[2].ID = "bad-id"
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
t.Fatal("Unexpected Success") t.Fatal("Unexpected Success")
} }
} }

@ -294,7 +294,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
// Seek to the requested offset. // Seek to the requested offset.
if offset > 0 { if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET) _, err = fr.Seek(offset, io.SeekStart)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return nil, 0, err return nil, 0, err

@ -532,8 +532,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
nsUnlocker() nsUnlocker()
return nil, toObjectErr(err, bucket, object) return nil, toObjectErr(err, bucket, object)
} }
var reader io.Reader reader := io.LimitReader(readCloser, length)
reader = io.LimitReader(readCloser, length)
closeFn := func() { closeFn := func() {
readCloser.Close() readCloser.Close()
} }

@ -740,6 +740,9 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
if data.Size() < azureBlockSize/10 { if data.Size() < azureBlockSize/10 {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined) blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined)
if err != nil {
return objInfo, azureToObjectError(err, bucket, object)
}
if err = blob.CreateBlockBlobFromReader(data, nil); err != nil { if err = blob.CreateBlockBlobFromReader(data, nil); err != nil {
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }

@ -888,6 +888,7 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
w := object.NewWriter(ctx) w := object.NewWriter(ctx)
// Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below // Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below
// the current chunk-size of the writer. This avoids an unnecessary memory allocation. // the current chunk-size of the writer. This avoids an unnecessary memory allocation.
if data.Size() < int64(w.ChunkSize) { if data.Size() < int64(w.ChunkSize) {

@ -111,9 +111,8 @@ func TestOSSToObjectError(t *testing.T) {
func TestS3MetaToOSSOptions(t *testing.T) { func TestS3MetaToOSSOptions(t *testing.T) {
var err error var err error
var headers map[string]string
headers = map[string]string{ headers := map[string]string{
"x-amz-meta-invalid_meta": "value", "x-amz-meta-invalid_meta": "value",
} }
_, err = appendS3MetaToOSSOptions(context.Background(), nil, headers) _, err = appendS3MetaToOSSOptions(context.Background(), nil, headers)

@ -367,11 +367,9 @@ func getResource(path string, host string, domain string) (string, error) {
// If none of the http routes match respond with MethodNotAllowed, in JSON // If none of the http routes match respond with MethodNotAllowed, in JSON
func notFoundHandlerJSON(w http.ResponseWriter, r *http.Request) { func notFoundHandlerJSON(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
} }
// If none of the http routes match respond with MethodNotAllowed // If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) { func notFoundHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r)) writeErrorResponse(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
} }

@ -34,7 +34,7 @@ func TestGoroutineCountCheck(t *testing.T) {
// Make goroutines -- to make sure number of go-routines is higher than threshold // Make goroutines -- to make sure number of go-routines is higher than threshold
if tt.threshold == 5 || tt.threshold == 6 { if tt.threshold == 5 || tt.threshold == 6 {
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
go time.Sleep(5) go time.Sleep(5 * time.Nanosecond)
} }
} }
if err := goroutineCountCheck(tt.threshold); (err != nil) != tt.wantErr { if err := goroutineCountCheck(tt.threshold); (err != nil) != tt.wantErr {

@ -19,16 +19,8 @@ package http
import ( import (
"io" "io"
"io/ioutil" "io/ioutil"
"sync"
) )
var b512pool = sync.Pool{
New: func() interface{} {
buf := make([]byte, 512)
return &buf
},
}
// DrainBody close non nil response with any response Body. // DrainBody close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body. // convenient wrapper to drain any remaining data on response body.
// //

@ -94,23 +94,20 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error {
// the following reasons: // the following reasons:
// - Read quorum is lost just after the initialization // - Read quorum is lost just after the initialization
// of the object layer. // of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh) for range newRetryTimerSimple(doneCh) {
for { // Load IAMSys once during boot.
select { if err := sys.refresh(objAPI); err != nil {
case _ = <-retryTimerCh: if err == errDiskNotFound ||
// Load IAMSys once during boot. strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
if err := sys.refresh(objAPI); err != nil { strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
if err == errDiskNotFound || logger.Info("Waiting for IAM subsystem to be initialized..")
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || continue
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for IAM subsystem to be initialized..")
continue
}
return err
} }
return nil return err
} }
break
} }
return nil
} }
// DeleteCannedPolicy - deletes a canned policy. // DeleteCannedPolicy - deletes a canned policy.

@ -145,11 +145,9 @@ func (l *localLocker) ForceUnlock(args dsync.LockArgs) (reply bool, err error) {
if len(args.UID) != 0 { if len(args.UID) != 0 {
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID) return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
} }
if _, ok := l.lockMap[args.Resource]; ok { // Only clear lock when it is taken
// Only clear lock when it is taken // Remove the lock (irrespective of write or read lock)
// Remove the lock (irrespective of write or read lock) delete(l.lockMap, args.Resource)
delete(l.lockMap, args.Resource)
}
return true, nil return true, nil
} }
@ -159,11 +157,7 @@ func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo {
lockCopy := make(map[string][]lockRequesterInfo) lockCopy := make(map[string][]lockRequesterInfo)
for k, v := range l.lockMap { for k, v := range l.lockMap {
var lockSlice []lockRequesterInfo lockCopy[k] = append(lockCopy[k], v...)
for _, lockInfo := range v {
lockSlice = append(lockSlice, lockInfo)
}
lockCopy[k] = lockSlice
} }
return lockCopy return lockCopy
} }

@ -41,7 +41,7 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
// first test by simulating item has already been deleted // first test by simulating item has already been deleted
locker.ll.removeEntryIfExists(nlrip) locker.ll.removeEntryIfExists(nlrip)
{ {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -52,7 +52,7 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item
locker.ll.removeEntryIfExists(nlrip) locker.ll.removeEntryIfExists(nlrip)
{ {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -87,7 +87,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
lockRequesterInfo2, lockRequesterInfo2,
} }
lri, _ := locker.ll.lockMap["name"] lri := locker.ll.lockMap["name"]
// test unknown uid // test unknown uid
if locker.ll.removeEntry("name", "unknown-uid", &lri) { if locker.ll.removeEntry("name", "unknown-uid", &lri) {
@ -97,7 +97,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
if !locker.ll.removeEntry("name", "0123-4567", &lri) { if !locker.ll.removeEntry("name", "0123-4567", &lri) {
t.Errorf("Expected %#v, got %#v", true, false) t.Errorf("Expected %#v, got %#v", true, false)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{lockRequesterInfo2} expectedLri := []lockRequesterInfo{lockRequesterInfo2}
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -107,7 +107,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
if !locker.ll.removeEntry("name", "89ab-cdef", &lri) { if !locker.ll.removeEntry("name", "89ab-cdef", &lri) {
t.Errorf("Expected %#v, got %#v", true, false) t.Errorf("Expected %#v, got %#v", true, false)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !reflect.DeepEqual(expectedLri, gotLri) { if !reflect.DeepEqual(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)

@ -94,7 +94,7 @@ func TestLockRpcServerLock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
Writer: true, Writer: true,
@ -174,7 +174,7 @@ func TestLockRpcServerUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !testLockEquality(expectedLri, gotLri) { if !testLockEquality(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -210,7 +210,7 @@ func TestLockRpcServerRLock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
Writer: false, Writer: false,
@ -312,7 +312,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo{ expectedLri := []lockRequesterInfo{
{ {
Writer: false, Writer: false,
@ -336,7 +336,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
if !result { if !result {
t.Errorf("Expected %#v, got %#v", true, result) t.Errorf("Expected %#v, got %#v", true, result)
} else { } else {
gotLri, _ := locker.ll.lockMap["name"] gotLri := locker.ll.lockMap["name"]
expectedLri := []lockRequesterInfo(nil) expectedLri := []lockRequesterInfo(nil)
if !testLockEquality(expectedLri, gotLri) { if !testLockEquality(expectedLri, gotLri) {
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
@ -531,9 +531,6 @@ func TestLockServerInit(t *testing.T) {
globalIsDistXL = testCase.isDistXL globalIsDistXL = testCase.isDistXL
globalLockServer = nil globalLockServer = nil
_, _ = newDsyncNodes(testCase.endpoints) _, _ = newDsyncNodes(testCase.endpoints)
if err != nil {
t.Fatalf("Got unexpected error initializing lock servers: %v", err)
}
if globalLockServer == nil && testCase.isDistXL { if globalLockServer == nil && testCase.isDistXL {
t.Errorf("Test %d: Expected initialized lock RPC receiver, but got uninitialized", i+1) t.Errorf("Test %d: Expected initialized lock RPC receiver, but got uninitialized", i+1)
} }

@ -1,55 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// lockStat - encapsulates total, blocked and granted lock counts.
type lockStat struct {
total int64
blocked int64
granted int64
}
// lockWaiting - updates lock stat when a lock becomes blocked.
func (ls *lockStat) lockWaiting() {
ls.blocked++
ls.total++
}
// lockGranted - updates lock stat when a lock is granted.
func (ls *lockStat) lockGranted() {
ls.blocked--
ls.granted++
}
// lockTimedOut - updates lock stat when a lock is timed out.
func (ls *lockStat) lockTimedOut() {
ls.blocked--
ls.total--
}
// lockRemoved - updates lock stat when a lock is removed, by Unlock
// or ForceUnlock.
func (ls *lockStat) lockRemoved(granted bool) {
if granted {
ls.granted--
ls.total--
} else {
ls.blocked--
ls.total--
}
}

@ -56,12 +56,11 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id interface{})
// Cleanup the map every 30 minutes so that the log message is printed again for the user to notice. // Cleanup the map every 30 minutes so that the log message is printed again for the user to notice.
func (l *logOnceType) cleanupRoutine() { func (l *logOnceType) cleanupRoutine() {
for { for {
select { l.Lock()
case <-time.After(time.Minute * 30): l.IDMap = make(map[interface{}]error)
l.Lock() l.Unlock()
l.IDMap = make(map[interface{}]error)
l.Unlock() time.Sleep(30 * time.Minute)
}
} }
} }

@ -50,6 +50,9 @@ func (h *Target) startHTTPLogger() {
} }
req, err := gohttp.NewRequest("POST", h.endpoint, bytes.NewBuffer(logJSON)) req, err := gohttp.NewRequest("POST", h.endpoint, bytes.NewBuffer(logJSON))
if err != nil {
continue
}
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
resp, err := h.client.Do(req) resp, err := h.client.Do(req)

@ -102,7 +102,6 @@ func newNSLock(isDistXL bool) *nsLockMap {
nsMutex := nsLockMap{ nsMutex := nsLockMap{
isDistXL: isDistXL, isDistXL: isDistXL,
lockMap: make(map[nsParam]*nsLock), lockMap: make(map[nsParam]*nsLock),
counters: &lockStat{},
} }
return &nsMutex return &nsMutex
} }
@ -127,9 +126,6 @@ type nsLock struct {
// nsLockMap - namespace lock map, provides primitives to Lock, // nsLockMap - namespace lock map, provides primitives to Lock,
// Unlock, RLock and RUnlock. // Unlock, RLock and RUnlock.
type nsLockMap struct { type nsLockMap struct {
// Lock counter used for lock debugging.
counters *lockStat
// Indicates if namespace is part of a distributed setup. // Indicates if namespace is part of a distributed setup.
isDistXL bool isDistXL bool
lockMap map[nsParam]*nsLock lockMap map[nsParam]*nsLock
@ -259,11 +255,8 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
dsync.NewDRWMutex(pathJoin(volume, path), globalDsync).ForceUnlock() dsync.NewDRWMutex(pathJoin(volume, path), globalDsync).ForceUnlock()
} }
param := nsParam{volume, path} // Remove lock from the map.
if _, found := n.lockMap[param]; found { delete(n.lockMap, nsParam{volume, path})
// Remove lock from the map.
delete(n.lockMap, param)
}
} }
// lockInstance - frontend/top-level interface for namespace locks. // lockInstance - frontend/top-level interface for namespace locks.

@ -47,7 +47,6 @@ func TestNamespaceLockTest(t *testing.T) {
unlk func(s1, s2, s3 string) unlk func(s1, s2, s3 string)
rlk func(s1, s2, s3 string, t time.Duration) bool rlk func(s1, s2, s3 string, t time.Duration) bool
runlk func(s1, s2, s3 string) runlk func(s1, s2, s3 string)
lkCount int
lockedRefCount uint lockedRefCount uint
unlockedRefCount uint unlockedRefCount uint
shouldPass bool shouldPass bool

@ -271,6 +271,9 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
isDir: false, isDir: false,
sys: nil, sys: nil,
}) })
if zerr != nil {
return profilingDataFound
}
zwriter, zerr := zipWriter.CreateHeader(header) zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil { if zerr != nil {
@ -602,22 +605,19 @@ func (sys *NotificationSys) Init(objAPI ObjectLayer) error {
// the following reasons: // the following reasons:
// - Read quorum is lost just after the initialization // - Read quorum is lost just after the initialization
// of the object layer. // of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh) for range newRetryTimerSimple(doneCh) {
for { if err := sys.refresh(objAPI); err != nil {
select { if err == errDiskNotFound ||
case _ = <-retryTimerCh: strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
if err := sys.refresh(objAPI); err != nil { strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
if err == errDiskNotFound || logger.Info("Waiting for notification subsystem to be initialized..")
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || continue
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for notification subsystem to be initialized..")
continue
}
return err
} }
return nil return err
} }
break
} }
return nil
} }
// AddRulesMap - adds rules map for bucket name. // AddRulesMap - adds rules map for bucket name.

@ -211,10 +211,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn
func ifModifiedSince(objTime time.Time, givenTime time.Time) bool { func ifModifiedSince(objTime time.Time, givenTime time.Time) bool {
// The Date-Modified header truncates sub-second precision, so // The Date-Modified header truncates sub-second precision, so
// use mtime < t+1s instead of mtime <= t to check for unmodified. // use mtime < t+1s instead of mtime <= t to check for unmodified.
if objTime.After(givenTime.Add(1 * time.Second)) { return objTime.After(givenTime.Add(1 * time.Second))
return true
}
return false
} }
// canonicalizeETag returns ETag with leading and trailing double-quotes removed, // canonicalizeETag returns ETag with leading and trailing double-quotes removed,

@ -838,7 +838,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
reader = pipeReader reader = pipeReader
length = -1 length = -1
snappyWriter := snappy.NewWriter(pipeWriter) snappyWriter := snappy.NewBufferedWriter(pipeWriter)
go func() { go func() {
// Compress the decompressed source object. // Compress the decompressed source object.
@ -1217,7 +1217,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
snappyWriter := snappy.NewWriter(pipeWriter) snappyWriter := snappy.NewBufferedWriter(pipeWriter)
var actualReader *hash.Reader var actualReader *hash.Reader
actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize) actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
@ -1660,7 +1660,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
reader = pipeReader reader = pipeReader
length = -1 length = -1
snappyWriter := snappy.NewWriter(pipeWriter) snappyWriter := snappy.NewBufferedWriter(pipeWriter)
go func() { go func() {
// Compress the decompressed source object. // Compress the decompressed source object.
@ -1902,7 +1902,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
isCompressed := false isCompressed := false
if objectAPI.IsCompressionSupported() && compressPart { if objectAPI.IsCompressionSupported() && compressPart {
pipeReader, pipeWriter = io.Pipe() pipeReader, pipeWriter = io.Pipe()
snappyWriter := snappy.NewWriter(pipeWriter) snappyWriter := snappy.NewBufferedWriter(pipeWriter)
var actualReader *hash.Reader var actualReader *hash.Reader
actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize) actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize)

@ -855,6 +855,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
shouldPass: true, shouldPass: true,
fault: None,
}, },
// Test case - 2 // Test case - 2
// Small chunk size. // Small chunk size.
@ -869,6 +870,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
shouldPass: true, shouldPass: true,
fault: None,
}, },
// Test case - 3 // Test case - 3
// Empty data // Empty data
@ -897,6 +899,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
accessKey: "", accessKey: "",
secretKey: "", secretKey: "",
shouldPass: false, shouldPass: false,
fault: None,
}, },
// Test case - 5 // Test case - 5
// Wrong auth header returns as bad request. // Wrong auth header returns as bad request.
@ -912,6 +915,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
shouldPass: false, shouldPass: false,
removeAuthHeader: true, removeAuthHeader: true,
fault: None,
}, },
// Test case - 6 // Test case - 6
// Large chunk size.. also passes. // Large chunk size.. also passes.
@ -926,6 +930,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
shouldPass: true, shouldPass: true,
fault: None,
}, },
// Test case - 7 // Test case - 7
// Chunk with malformed encoding. // Chunk with malformed encoding.
@ -1017,6 +1022,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
shouldPass: true, shouldPass: true,
contentEncoding: "aws-chunked,gzip", contentEncoding: "aws-chunked,gzip",
fault: None,
}, },
} }
// Iterating over the cases, fetching the object validating the response. // Iterating over the cases, fetching the object validating the response.

@ -465,7 +465,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
if string(bytesBuffer.Bytes()) != "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." { if bytesBuffer.String() != "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." {
t.Errorf("%s: Invalid upload ID error mismatch.", instanceType) t.Errorf("%s: Invalid upload ID error mismatch.", instanceType)
} }
} }

@ -158,23 +158,20 @@ func (sys *PolicySys) Init(objAPI ObjectLayer) error {
// the following reasons: // the following reasons:
// - Read quorum is lost just after the initialization // - Read quorum is lost just after the initialization
// of the object layer. // of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh) for range newRetryTimerSimple(doneCh) {
for { // Load PolicySys once during boot.
select { if err := sys.refresh(objAPI); err != nil {
case _ = <-retryTimerCh: if err == errDiskNotFound ||
// Load PolicySys once during boot. strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) ||
if err := sys.refresh(objAPI); err != nil { strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
if err == errDiskNotFound || logger.Info("Waiting for policy subsystem to be initialized..")
strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || continue
strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) {
logger.Info("Waiting for policy subsystem to be initialized..")
continue
}
return err
} }
return nil return err
} }
break
} }
return nil
} }
// NewPolicySys - creates new policy system. // NewPolicySys - creates new policy system.

@ -860,7 +860,7 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif
return 0, err return 0, err
} }
if bytes.Compare(h.Sum(nil), verifier.sum) != 0 { if !bytes.Equal(h.Sum(nil), verifier.sum) {
return 0, hashMismatchError{hex.EncodeToString(verifier.sum), hex.EncodeToString(h.Sum(nil))} return 0, hashMismatchError{hex.EncodeToString(verifier.sum), hex.EncodeToString(h.Sum(nil))}
} }

@ -188,7 +188,7 @@ func TestPosixIsDirEmpty(t *testing.T) {
// Should give false on non-existent directory. // Should give false on non-existent directory.
dir1 := slashpath.Join(tmp, "non-existent-directory") dir1 := slashpath.Join(tmp, "non-existent-directory")
if isDirEmpty(dir1) != false { if isDirEmpty(dir1) {
t.Error("expected false for non-existent directory, got true") t.Error("expected false for non-existent directory, got true")
} }
@ -199,7 +199,7 @@ func TestPosixIsDirEmpty(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if isDirEmpty(dir2) != false { if isDirEmpty(dir2) {
t.Error("expected false for a file, got true") t.Error("expected false for a file, got true")
} }
@ -210,7 +210,7 @@ func TestPosixIsDirEmpty(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if isDirEmpty(dir3) != true { if !isDirEmpty(dir3) {
t.Error("expected true for empty dir, got false") t.Error("expected true for empty dir, got false")
} }
} }

@ -21,13 +21,6 @@ import (
"sync" "sync"
) )
var b512pool = sync.Pool{
New: func() interface{} {
buf := make([]byte, 512)
return &buf
},
}
// A Pool is a type-safe wrapper around a sync.Pool. // A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct { type Pool struct {
p *sync.Pool p *sync.Pool

@ -77,11 +77,6 @@ func (t mytype) Foo(a *Auth, b *int) error {
return nil return nil
} }
// incompatible method because of unexported method.
func (t mytype) foo(a *Auth, b *int) error {
return nil
}
// incompatible method because of first argument is not Authenticator. // incompatible method because of first argument is not Authenticator.
func (t *mytype) Bar(a, b *int) error { func (t *mytype) Bar(a, b *int) error {
return nil return nil

@ -285,10 +285,8 @@ func TestRPCClientCall(t *testing.T) {
case1ExpectedResult := 19 * 8 case1ExpectedResult := 19 * 8
testCases := []struct { testCases := []struct {
serviceMethod string serviceMethod string
args interface { args *Args
SetAuthArgs(args AuthArgs)
}
result interface{} result interface{}
changeConfig bool changeConfig bool
expectedResult interface{} expectedResult interface{}

@ -186,9 +186,8 @@ func doesV4PresignParamsExist(query url.Values) APIErrorCode {
// Parses all the presigned signature values into separate elements. // Parses all the presigned signature values into separate elements.
func parsePreSignV4(query url.Values, region string) (psv preSignValues, aec APIErrorCode) { func parsePreSignV4(query url.Values, region string) (psv preSignValues, aec APIErrorCode) {
var err APIErrorCode
// verify whether the required query params exist. // verify whether the required query params exist.
err = doesV4PresignParamsExist(query) err := doesV4PresignParamsExist(query)
if err != ErrNone { if err != ErrNone {
return psv, err return psv, err
} }

@ -306,7 +306,6 @@ type TestServer struct {
SecretKey string SecretKey string
Server *httptest.Server Server *httptest.Server
Obj ObjectLayer Obj ObjectLayer
endpoints EndpointList
} }
// UnstartedTestServer - Configures a temp FS/XL backend, // UnstartedTestServer - Configures a temp FS/XL backend,
@ -949,7 +948,7 @@ func preSignV2(req *http.Request, accessKeyID, secretAccessKey string, expires i
// Sign given request using Signature V2. // Sign given request using Signature V2.
func signRequestV2(req *http.Request, accessKey, secretKey string) error { func signRequestV2(req *http.Request, accessKey, secretKey string) error {
req = s3signer.SignV2(*req, accessKey, secretKey, false) s3signer.SignV2(*req, accessKey, secretKey, false)
return nil return nil
} }
@ -1305,36 +1304,6 @@ func getRandomBucketName() string {
} }
// TruncateWriter - Writes `n` bytes, then returns with number of bytes written.
// differs from iotest.TruncateWriter, the difference is commented in the Write method.
func TruncateWriter(w io.Writer, n int64) io.Writer {
return &truncateWriter{w, n}
}
type truncateWriter struct {
w io.Writer
n int64
}
func (t *truncateWriter) Write(p []byte) (n int, err error) {
if t.n <= 0 {
return len(p), nil
}
// real write
n = len(p)
if int64(n) > t.n {
n = int(t.n)
}
n, err = t.w.Write(p[0:n])
t.n -= int64(n)
// Removed from iotest.TruncateWriter.
// Need the Write method to return truncated number of bytes written, not the size of the buffer requested to be written.
// if err == nil {
// n = len(p)
// }
return
}
// NewEOFWriter returns a Writer that writes to w, // NewEOFWriter returns a Writer that writes to w,
// but returns EOF error after writing n bytes. // but returns EOF error after writing n bytes.
func NewEOFWriter(w io.Writer, n int64) io.Writer { func NewEOFWriter(w io.Writer, n int64) io.Writer {
@ -1696,11 +1665,10 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl
// Register the API end points with XL object layer. // Register the API end points with XL object layer.
// Registering only the GetObject handler. // Registering only the GetObject handler.
apiRouter := initTestAPIEndPoints(obj, endpoints) apiRouter := initTestAPIEndPoints(obj, endpoints)
var f http.HandlerFunc f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
f = func(w http.ResponseWriter, r *http.Request) {
r.RequestURI = r.URL.RequestURI() r.RequestURI = r.URL.RequestURI()
apiRouter.ServeHTTP(w, r) apiRouter.ServeHTTP(w, r)
} })
return bucketName, f, nil return bucketName, f, nil
} }
@ -2173,40 +2141,6 @@ func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler {
return muxRouter return muxRouter
} }
func StartTestS3PeerRPCServer(t TestErrHandler) (TestServer, []string) {
// init disks
objLayer, fsDirs, err := prepareXL16()
if err != nil {
t.Fatalf("%s", err)
}
// Create an instance of TestServer.
testRPCServer := TestServer{}
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("%s", err)
}
// Fetch credentials for the test server.
credentials := globalServerConfig.GetCredential()
testRPCServer.AccessKey = credentials.AccessKey
testRPCServer.SecretKey = credentials.SecretKey
// set object layer
testRPCServer.Obj = objLayer
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
// Register router on a new mux
muxRouter := mux.NewRouter().SkipClean(true)
registerPeerRPCRouter(muxRouter)
// Initialize and run the TestServer.
testRPCServer.Server = httptest.NewServer(muxRouter)
return testRPCServer, fsDirs
}
// generateTLSCertKey creates valid key/cert with registered DNS or IP address // generateTLSCertKey creates valid key/cert with registered DNS or IP address
// depending on the passed parameter. That way, we can use tls config without // depending on the passed parameter. That way, we can use tls config without
// passing InsecureSkipVerify flag. This code is a simplified version of // passing InsecureSkipVerify flag. This code is a simplified version of

@ -228,11 +228,7 @@ func getProfileData() ([]byte, error) {
} }
// Starts a profiler returns nil if profiler is not enabled, caller needs to handle this. // Starts a profiler returns nil if profiler is not enabled, caller needs to handle this.
func startProfiler(profilerType, dirPath string) (interface { func startProfiler(profilerType, dirPath string) (minioProfiler, error) {
Stop()
Path() string
}, error) {
var err error var err error
if dirPath == "" { if dirPath == "" {
dirPath, err = ioutil.TempDir("", "profile") dirPath, err = ioutil.TempDir("", "profile")
@ -277,14 +273,17 @@ func startProfiler(profilerType, dirPath string) (interface {
}, nil }, nil
} }
// Global profiler to be used by service go-routine. // minioProfiler - minio profiler interface.
var globalProfiler interface { type minioProfiler interface {
// Stop the profiler // Stop the profiler
Stop() Stop()
// Return the path of the profiling file // Return the path of the profiling file
Path() string Path() string
} }
// Global profiler to be used by service go-routine.
var globalProfiler minioProfiler
// dump the request into a string in JSON format. // dump the request into a string in JSON format.
func dumpRequest(r *http.Request) string { func dumpRequest(r *http.Request) string {
header := cloneHeader(r.Header) header := cloneHeader(r.Header)
@ -307,7 +306,7 @@ func dumpRequest(r *http.Request) string {
} }
// Formatted string. // Formatted string.
return strings.TrimSpace(string(buffer.Bytes())) return strings.TrimSpace(buffer.String())
} }
// isFile - returns whether given path is a file or not. // isFile - returns whether given path is a file or not.

@ -913,7 +913,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
snappyWriter := snappy.NewWriter(pipeWriter) snappyWriter := snappy.NewBufferedWriter(pipeWriter)
var actualReader *hash.Reader var actualReader *hash.Reader
actualReader, err = hash.NewReader(reader, size, "", "", actualSize) actualReader, err = hash.NewReader(reader, size, "", "", actualSize)
@ -1313,14 +1313,15 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
// Response writer should be limited early on for decryption upto required length, // Response writer should be limited early on for decryption upto required length,
// additionally also skipping mod(offset)64KiB boundaries. // additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, args.BucketName, objectName, startOffset, length, info, false) writer, startOffset, length, err = DecryptBlocksRequest(writer, r,
args.BucketName, objectName, startOffset, length, info, false)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return err return err
} }
} }
httpWriter := ioutil.WriteOnClose(writer) httpWriter := ioutil.WriteOnClose(writer)
if err = getObject(ctx, args.BucketName, objectName, 0, length, httpWriter, "", opts); err != nil { if err = getObject(ctx, args.BucketName, objectName, startOffset, length, httpWriter, "", opts); err != nil {
httpWriter.Close() httpWriter.Close()
if info.IsCompressed() { if info.IsCompressed() {
// Wait for decompression go-routine to retire. // Wait for decompression go-routine to retire.

@ -120,7 +120,7 @@ func TestWriteWebErrorResponse(t *testing.T) {
recvDesc := buffer.Bytes() recvDesc := buffer.Bytes()
// Check if the written desc is same as the one expected. // Check if the written desc is same as the one expected.
if !bytes.Equal(recvDesc, []byte(desc)) { if !bytes.Equal(recvDesc, []byte(desc)) {
t.Errorf("Test %d: Unexpected response, expecting %s, got %s", i+1, desc, string(buffer.Bytes())) t.Errorf("Test %d: Unexpected response, expecting %s, got %s", i+1, desc, buffer.String())
} }
buffer.Reset() buffer.Reset()
} }
@ -491,23 +491,23 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
} }
test := func(token string) (error, *ListObjectsRep) { test := func(token string) (*ListObjectsRep, error) {
listObjectsRequest := ListObjectsArgs{BucketName: bucketName, Prefix: ""} listObjectsRequest := ListObjectsArgs{BucketName: bucketName, Prefix: ""}
listObjectsReply := &ListObjectsRep{} listObjectsReply := &ListObjectsRep{}
var req *http.Request var req *http.Request
req, err = newTestWebRPCRequest("Web.ListObjects", token, listObjectsRequest) req, err = newTestWebRPCRequest("Web.ListObjects", token, listObjectsRequest)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request: <ERROR> %v", err) return nil, err
} }
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
if rec.Code != http.StatusOK { if rec.Code != http.StatusOK {
return fmt.Errorf("Expected the response status to be 200, but instead found `%d`", rec.Code), listObjectsReply return listObjectsReply, fmt.Errorf("Expected the response status to be 200, but instead found `%d`", rec.Code)
} }
err = getTestWebRPCResponse(rec, &listObjectsReply) err = getTestWebRPCResponse(rec, &listObjectsReply)
if err != nil { if err != nil {
return err, listObjectsReply return listObjectsReply, err
} }
return nil, listObjectsReply return listObjectsReply, nil
} }
verifyReply := func(reply *ListObjectsRep) { verifyReply := func(reply *ListObjectsRep) {
if len(reply.Objects) == 0 { if len(reply.Objects) == 0 {
@ -522,14 +522,14 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
} }
// Authenticated ListObjects should succeed. // Authenticated ListObjects should succeed.
err, reply := test(authorization) reply, err := test(authorization)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
verifyReply(reply) verifyReply(reply)
// Unauthenticated ListObjects should fail. // Unauthenticated ListObjects should fail.
err, _ = test("") _, err = test("")
if err == nil { if err == nil {
t.Fatalf("Expected error `%s`", err) t.Fatalf("Expected error `%s`", err)
} }
@ -552,7 +552,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
defer globalPolicySys.Remove(bucketName) defer globalPolicySys.Remove(bucketName)
// Unauthenticated ListObjects with READ bucket policy should succeed. // Unauthenticated ListObjects with READ bucket policy should succeed.
err, reply = test("") reply, err = test("")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1559,7 +1559,7 @@ func TestWebCheckAuthorization(t *testing.T) {
if rec.Code != http.StatusForbidden { if rec.Code != http.StatusForbidden {
t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code) t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code)
} }
resp := string(rec.Body.Bytes()) resp := rec.Body.String()
if !strings.EqualFold(resp, errAuthentication.Error()) { if !strings.EqualFold(resp, errAuthentication.Error()) {
t.Fatalf("Unexpected error message, expected: %s, found: `%s`", errAuthentication, resp) t.Fatalf("Unexpected error message, expected: %s, found: `%s`", errAuthentication, resp)
} }
@ -1580,7 +1580,7 @@ func TestWebCheckAuthorization(t *testing.T) {
if rec.Code != http.StatusForbidden { if rec.Code != http.StatusForbidden {
t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code) t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code)
} }
resp = string(rec.Body.Bytes()) resp = rec.Body.String()
if !strings.EqualFold(resp, errAuthentication.Error()) { if !strings.EqualFold(resp, errAuthentication.Error()) {
t.Fatalf("Unexpected error message, expected: `%s`, found: `%s`", errAuthentication, resp) t.Fatalf("Unexpected error message, expected: `%s`, found: `%s`", errAuthentication, resp)
} }

@ -1111,16 +1111,8 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe
res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
// Copy "after" drive state too from before. // Copy "after" drive state too from before.
for k, v := range beforeDrives { for k, v := range beforeDrives {
res.Before.Drives[k] = madmin.HealDriveInfo{ res.Before.Drives[k] = madmin.HealDriveInfo(v)
UUID: v.UUID, res.After.Drives[k] = madmin.HealDriveInfo(v)
Endpoint: v.Endpoint,
State: v.State,
}
res.After.Drives[k] = madmin.HealDriveInfo{
UUID: v.UUID,
Endpoint: v.Endpoint,
State: v.State,
}
} }
for index, sErr := range sErrs { for index, sErr := range sErrs {
@ -1253,12 +1245,8 @@ func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove b
if err != nil { if err != nil {
return result, err return result, err
} }
for _, v := range healResult.Before.Drives { result.Before.Drives = append(result.Before.Drives, healResult.Before.Drives...)
result.Before.Drives = append(result.Before.Drives, v) result.After.Drives = append(result.After.Drives, healResult.After.Drives...)
}
for _, v := range healResult.After.Drives {
result.After.Drives = append(result.After.Drives, v)
}
} }
for _, endpoint := range s.endpoints { for _, endpoint := range s.endpoints {
@ -1326,10 +1314,7 @@ func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
return nil, err return nil, err
} }
for _, currBucket := range buckets { for _, currBucket := range buckets {
healBuckets[currBucket.Name] = BucketInfo{ healBuckets[currBucket.Name] = BucketInfo(currBucket)
Name: currBucket.Name,
Created: currBucket.Created,
}
} }
} }
for _, bucketInfo := range healBuckets { for _, bucketInfo := range healBuckets {

@ -130,11 +130,7 @@ func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucke
} }
volInfo, serr := disk.StatVol(bucketName) volInfo, serr := disk.StatVol(bucketName)
if serr == nil { if serr == nil {
bucketInfo = BucketInfo{ return BucketInfo(volInfo), nil
Name: volInfo.Name,
Created: volInfo.Created,
}
return bucketInfo, nil
} }
err = serr err = serr
// For any reason disk went offline continue and pick the next one. // For any reason disk went offline continue and pick the next one.
@ -185,10 +181,7 @@ func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo,
if isReservedOrInvalidBucket(volInfo.Name) { if isReservedOrInvalidBucket(volInfo.Name) {
continue continue
} }
bucketsInfo = append(bucketsInfo, BucketInfo{ bucketsInfo = append(bucketsInfo, BucketInfo(volInfo))
Name: volInfo.Name,
Created: volInfo.Created,
})
} }
// For buckets info empty, loop once again to check // For buckets info empty, loop once again to check
// if we have, can happen if disks were down. // if we have, can happen if disks were down.

@ -278,7 +278,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) _, errs := readAllXLMetadata(ctx, xlDisks, bucket, object)
readQuorum := len(xl.storageDisks) / 2 readQuorum := len(xl.storageDisks) / 2
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
t.Fatalf("Failed to read xl meta data %v", reducedErr) t.Fatalf("Failed to read xl meta data %v", reducedErr)
@ -286,7 +286,7 @@ func TestDisksWithAllParts(t *testing.T) {
// Test that all disks are returned without any failures with // Test that all disks are returned without any failures with
// unmodified meta data // unmodified meta data
partsMetadata, errs = readAllXLMetadata(ctx, xlDisks, bucket, object) partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object)
if err != nil { if err != nil {
t.Fatalf("Failed to read xl meta data %v", err) t.Fatalf("Failed to read xl meta data %v", err)
} }

@ -120,28 +120,18 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
return StorageInfo{} return StorageInfo{}
} }
_, sscParity := getRedundancyCount(standardStorageClass, len(disks))
_, rrscparity := getRedundancyCount(reducedRedundancyStorageClass, len(disks))
// Total number of online data drives available
// This is the number of drives we report free and total space for
availableDataDisks := uint64(onlineDisks - sscParity)
// Available data disks can be zero when onlineDisks is equal to parity,
// at that point we simply choose online disks to calculate the size.
if availableDataDisks == 0 {
availableDataDisks = uint64(onlineDisks)
}
storageInfo := StorageInfo{}
// Combine all disks to get total usage. // Combine all disks to get total usage.
var used uint64 var used uint64
for _, di := range validDisksInfo { for _, di := range validDisksInfo {
used = used + di.Used used = used + di.Used
} }
storageInfo.Used = used
_, sscParity := getRedundancyCount(standardStorageClass, len(disks))
_, rrscparity := getRedundancyCount(reducedRedundancyStorageClass, len(disks))
storageInfo := StorageInfo{
Used: used,
}
storageInfo.Backend.Type = BackendErasure storageInfo.Backend.Type = BackendErasure
storageInfo.Backend.OnlineDisks = onlineDisks storageInfo.Backend.OnlineDisks = onlineDisks
storageInfo.Backend.OfflineDisks = offlineDisks storageInfo.Backend.OfflineDisks = offlineDisks

@ -60,7 +60,7 @@ func TestCertNew(t *testing.T) {
if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) {
t.Error("certificate doesn't match expected certificate") t.Error("certificate doesn't match expected certificate")
} }
c, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair) _, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair)
if err == nil { if err == nil {
t.Fatal("Expected to fail but got success") t.Fatal("Expected to fail but got success")
} }

@ -38,18 +38,6 @@ func TestFree(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if di.Total <= 0 {
t.Error("Unexpected Total", di.Total)
}
if di.Free <= 0 {
t.Error("Unexpected Free", di.Free)
}
if di.Files <= 0 {
t.Error("Unexpected Files", di.Files)
}
if di.Ffree <= 0 {
t.Error("Unexpected Ffree", di.Ffree)
}
if di.FSType == "UNKNOWN" { if di.FSType == "UNKNOWN" {
t.Error("Unexpected FSType", di.FSType) t.Error("Unexpected FSType", di.FSType)
} }

@ -38,10 +38,10 @@ var (
// `{1...64}` // `{1...64}`
// `{33...64}` // `{33...64}`
func parseEllipsesRange(pattern string) (seq []string, err error) { func parseEllipsesRange(pattern string) (seq []string, err error) {
if strings.Index(pattern, openBraces) == -1 { if !strings.Contains(pattern, openBraces) {
return nil, errors.New("Invalid argument") return nil, errors.New("Invalid argument")
} }
if strings.Index(pattern, closeBraces) == -1 { if !strings.Contains(pattern, closeBraces) {
return nil, errors.New("Invalid argument") return nil, errors.New("Invalid argument")
} }
@ -145,7 +145,7 @@ func (p Pattern) Expand() []string {
case p.Suffix != "" && p.Prefix == "": case p.Suffix != "" && p.Prefix == "":
labels = append(labels, fmt.Sprintf("%s%s", p.Seq[i], p.Suffix)) labels = append(labels, fmt.Sprintf("%s%s", p.Seq[i], p.Suffix))
case p.Suffix == "" && p.Prefix == "": case p.Suffix == "" && p.Prefix == "":
labels = append(labels, fmt.Sprintf("%s", p.Seq[i])) labels = append(labels, p.Seq[i])
default: default:
labels = append(labels, fmt.Sprintf("%s%s%s", p.Prefix, p.Seq[i], p.Suffix)) labels = append(labels, fmt.Sprintf("%s%s%s", p.Prefix, p.Seq[i], p.Suffix))
} }

@ -191,13 +191,11 @@ func (q Queue) ToRulesMap() RulesMap {
// Unused. Available for completion. // Unused. Available for completion.
type lambda struct { type lambda struct {
common
ARN string `xml:"CloudFunction"` ARN string `xml:"CloudFunction"`
} }
// Unused. Available for completion. // Unused. Available for completion.
type topic struct { type topic struct {
common
ARN string `xml:"Topic" json:"Topic"` ARN string `xml:"Topic" json:"Topic"`
} }

@ -199,8 +199,7 @@ func TestQueueUnmarshalXML(t *testing.T) {
} }
func TestQueueValidate(t *testing.T) { func TestQueueValidate(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>
<Filter></Filter> <Filter></Filter>
@ -281,8 +280,7 @@ func TestQueueValidate(t *testing.T) {
} }
func TestQueueSetRegion(t *testing.T) { func TestQueueSetRegion(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>
<Filter></Filter> <Filter></Filter>
@ -341,8 +339,7 @@ func TestQueueSetRegion(t *testing.T) {
} }
func TestQueueToRulesMap(t *testing.T) { func TestQueueToRulesMap(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>
<Filter></Filter> <Filter></Filter>
@ -520,8 +517,7 @@ func TestConfigUnmarshalXML(t *testing.T) {
} }
func TestConfigValidate(t *testing.T) { func TestConfigValidate(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<NotificationConfiguration> <NotificationConfiguration>
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>
@ -628,8 +624,7 @@ func TestConfigValidate(t *testing.T) {
} }
func TestConfigSetRegion(t *testing.T) { func TestConfigSetRegion(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<NotificationConfiguration> <NotificationConfiguration>
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>
@ -733,8 +728,7 @@ func TestConfigSetRegion(t *testing.T) {
} }
func TestConfigToRulesMap(t *testing.T) { func TestConfigToRulesMap(t *testing.T) {
var data []byte data := []byte(`
data = []byte(`
<NotificationConfiguration> <NotificationConfiguration>
<QueueConfiguration> <QueueConfiguration>
<Id>1</Id> <Id>1</Id>

@ -142,13 +142,13 @@ Test-Header: TestHeaderValue
status, testCase.expectedStatus) status, testCase.expectedStatus)
} }
matched, err := regexp.MatchString(testCase.expectedLogRegexp, string(logOutput.Bytes())) matched, err := regexp.MatchString(testCase.expectedLogRegexp, logOutput.String())
if err != nil { if err != nil {
t.Fatalf("Test %d: Incorrect regexp: %v", i+1, err) t.Fatalf("Test %d: Incorrect regexp: %v", i+1, err)
} }
if !matched { if !matched {
t.Fatalf("Test %d: Unexpected log content, found: `%s`", i+1, string(logOutput.Bytes())) t.Fatalf("Test %d: Unexpected log content, found: `%s`", i+1, logOutput.String())
} }
} }
} }

@ -60,6 +60,9 @@ func TestHashReaderHelperMethods(t *testing.T) {
t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", hex.EncodeToString(r.MD5Current())) t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", hex.EncodeToString(r.MD5Current()))
} }
expectedSHA256, err := hex.DecodeString("88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589") expectedSHA256, err := hex.DecodeString("88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(r.SHA256(), expectedSHA256) { if !bytes.Equal(r.SHA256(), expectedSHA256) {
t.Errorf("Expected md5hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString()) t.Errorf("Expected md5hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString())
} }

@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
@ -61,9 +60,6 @@ type AdminClient struct {
// Advanced functionality. // Advanced functionality.
isTraceEnabled bool isTraceEnabled bool
traceOutput io.Writer traceOutput io.Writer
// Random seed.
random *rand.Rand
} }
// Global constants. // Global constants.
@ -118,21 +114,17 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Ad
} }
// SetAppInfo - add application details to user agent. // SetAppInfo - add application details to user agent.
func (c *AdminClient) SetAppInfo(appName string, appVersion string) { func (adm *AdminClient) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user // if app name and version is not set, we do not a new user
// agent. // agent.
if appName != "" && appVersion != "" { if appName != "" && appVersion != "" {
c.appInfo = struct { adm.appInfo.appName = appName
appName string adm.appInfo.appVersion = appVersion
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
} }
} }
// SetCustomTransport - set new custom transport. // SetCustomTransport - set new custom transport.
func (c *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) { func (adm *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport // Set this to override default transport
// ``http.DefaultTransport``. // ``http.DefaultTransport``.
// //
@ -147,28 +139,28 @@ func (c *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper)
// } // }
// api.SetTransport(tr) // api.SetTransport(tr)
// //
if c.httpClient != nil { if adm.httpClient != nil {
c.httpClient.Transport = customHTTPTransport adm.httpClient.Transport = customHTTPTransport
} }
} }
// TraceOn - enable HTTP tracing. // TraceOn - enable HTTP tracing.
func (c *AdminClient) TraceOn(outputStream io.Writer) { func (adm *AdminClient) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout. // if outputStream is nil then default to os.Stdout.
if outputStream == nil { if outputStream == nil {
outputStream = os.Stdout outputStream = os.Stdout
} }
// Sets a new output stream. // Sets a new output stream.
c.traceOutput = outputStream adm.traceOutput = outputStream
// Enable tracing. // Enable tracing.
c.isTraceEnabled = true adm.isTraceEnabled = true
} }
// TraceOff - disable HTTP tracing. // TraceOff - disable HTTP tracing.
func (c *AdminClient) TraceOff() { func (adm *AdminClient) TraceOff() {
// Disable tracing. // Disable tracing.
c.isTraceEnabled = false adm.isTraceEnabled = false
} }
// requestMetadata - is container for all the values to make a // requestMetadata - is container for all the values to make a
@ -181,7 +173,7 @@ type requestData struct {
} }
// Filter out signature value from Authorization header. // Filter out signature value from Authorization header.
func (c AdminClient) filterSignature(req *http.Request) { func (adm AdminClient) filterSignature(req *http.Request) {
/// Signature V4 authorization header. /// Signature V4 authorization header.
// Save the original auth. // Save the original auth.
@ -197,19 +189,18 @@ func (c AdminClient) filterSignature(req *http.Request) {
// Set a temporary redacted auth // Set a temporary redacted auth
req.Header.Set("Authorization", newAuth) req.Header.Set("Authorization", newAuth)
return
} }
// dumpHTTP - dump HTTP request and response. // dumpHTTP - dump HTTP request and response.
func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { func (adm AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump. // Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") _, err := fmt.Fprintln(adm.traceOutput, "---------START-HTTP---------")
if err != nil { if err != nil {
return err return err
} }
// Filter out Signature field from Authorization header. // Filter out Signature field from Authorization header.
c.filterSignature(req) adm.filterSignature(req)
// Only display request header. // Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false) reqTrace, err := httputil.DumpRequestOut(req, false)
@ -218,7 +209,7 @@ func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error {
} }
// Write request to trace output. // Write request to trace output.
_, err = fmt.Fprint(c.traceOutput, string(reqTrace)) _, err = fmt.Fprint(adm.traceOutput, string(reqTrace))
if err != nil { if err != nil {
return err return err
} }
@ -254,24 +245,24 @@ func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error {
} }
} }
// Write response to trace output. // Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) _, err = fmt.Fprint(adm.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil { if err != nil {
return err return err
} }
// Ends the http dump. // Ends the http dump.
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") _, err = fmt.Fprintln(adm.traceOutput, "---------END-HTTP---------")
return err return err
} }
// do - execute http request. // do - execute http request.
func (c AdminClient) do(req *http.Request) (*http.Response, error) { func (adm AdminClient) do(req *http.Request) (*http.Response, error) {
var resp *http.Response var resp *http.Response
var err error var err error
// Do the request in a loop in case of 307 http is met since golang still doesn't // Do the request in a loop in case of 307 http is met since golang still doesn't
// handle properly this situation (https://github.com/golang/go/issues/7912) // handle properly this situation (https://github.com/golang/go/issues/7912)
for { for {
resp, err = c.httpClient.Do(req) resp, err = adm.httpClient.Do(req)
if err != nil { if err != nil {
// Handle this specifically for now until future Golang // Handle this specifically for now until future Golang
// versions fix this issue properly. // versions fix this issue properly.
@ -304,8 +295,8 @@ func (c AdminClient) do(req *http.Request) (*http.Response, error) {
} }
// If trace is enabled, dump http request and response. // If trace is enabled, dump http request and response.
if c.isTraceEnabled { if adm.isTraceEnabled {
err = c.dumpHTTP(req, resp) err = adm.dumpHTTP(req, resp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -323,7 +314,7 @@ var successStatus = []int{
// executeMethod - instantiates a given method, and retries the // executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially // request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm. // delayed manner using a standard back off algorithm.
func (c AdminClient) executeMethod(method string, reqData requestData) (res *http.Response, err error) { func (adm AdminClient) executeMethod(method string, reqData requestData) (res *http.Response, err error) {
// Create a done channel to control 'ListObjects' go routine. // Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{}, 1) doneCh := make(chan struct{}, 1)
@ -333,13 +324,13 @@ func (c AdminClient) executeMethod(method string, reqData requestData) (res *htt
// Instantiate a new request. // Instantiate a new request.
var req *http.Request var req *http.Request
req, err = c.newRequest(method, reqData) req, err = adm.newRequest(method, reqData)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Initiate the request. // Initiate the request.
res, err = c.do(req) res, err = adm.do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -368,15 +359,15 @@ func (c AdminClient) executeMethod(method string, reqData requestData) (res *htt
} }
// set User agent. // set User agent.
func (c AdminClient) setUserAgent(req *http.Request) { func (adm AdminClient) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent) req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" { if adm.appInfo.appName != "" && adm.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) req.Header.Set("User-Agent", libraryUserAgent+" "+adm.appInfo.appName+"/"+adm.appInfo.appVersion)
} }
} }
// newRequest - instantiate a new HTTP request for a given method. // newRequest - instantiate a new HTTP request for a given method.
func (c AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) { func (adm AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) {
// If no method is supplied default to 'POST'. // If no method is supplied default to 'POST'.
if method == "" { if method == "" {
method = "POST" method = "POST"
@ -386,7 +377,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R
location := "" location := ""
// Construct a new target URL. // Construct a new target URL.
targetURL, err := c.makeTargetURL(reqData) targetURL, err := adm.makeTargetURL(reqData)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -397,7 +388,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R
return nil, err return nil, err
} }
c.setUserAgent(req) adm.setUserAgent(req)
for k, v := range reqData.customHeaders { for k, v := range reqData.customHeaders {
req.Header.Set(k, v[0]) req.Header.Set(k, v[0])
} }
@ -407,15 +398,15 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(reqData.content))) req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(reqData.content)))
req.Body = ioutil.NopCloser(bytes.NewReader(reqData.content)) req.Body = ioutil.NopCloser(bytes.NewReader(reqData.content))
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "", location) req = s3signer.SignV4(*req, adm.accessKeyID, adm.secretAccessKey, "", location)
return req, nil return req, nil
} }
// makeTargetURL make a new target url. // makeTargetURL make a new target url.
func (c AdminClient) makeTargetURL(r requestData) (*url.URL, error) { func (adm AdminClient) makeTargetURL(r requestData) (*url.URL, error) {
host := c.endpointURL.Host host := adm.endpointURL.Host
scheme := c.endpointURL.Scheme scheme := adm.endpointURL.Scheme
urlStr := scheme + "://" + host + libraryAdminURLPrefix + r.relPath urlStr := scheme + "://" + host + libraryAdminURLPrefix + r.relPath

@ -31,9 +31,8 @@ func TestMimeLookup(t *testing.T) {
} }
func TestTypeByExtension(t *testing.T) { func TestTypeByExtension(t *testing.T) {
var contentType string
// Test TypeByExtension. // Test TypeByExtension.
contentType = TypeByExtension(".txt") contentType := TypeByExtension(".txt")
if contentType != "text/plain" { if contentType != "text/plain" {
t.Fatalf("Invalid content type are found expected \"text/plain\", got %s", contentType) t.Fatalf("Invalid content type are found expected \"text/plain\", got %s", contentType)
} }

@ -62,17 +62,16 @@ func FormatJSONSyntaxError(data io.Reader, offset int64) (highlight string) {
if readBytes > offset { if readBytes > offset {
break break
} }
switch b { if b == '\n' {
case '\n':
readLine.Reset() readLine.Reset()
errLine++ errLine++
case '\t': continue
} else if b == '\t' {
readLine.WriteByte(' ') readLine.WriteByte(' ')
case '\r': } else if b == '\r' {
break break
default:
readLine.WriteByte(b)
} }
readLine.WriteByte(b)
} }
lineLen := readLine.Len() lineLen := readLine.Len()

@ -307,7 +307,6 @@ func (writer *messageWriter) start() {
case <-recordStagingTicker.C: case <-recordStagingTicker.C:
if !writer.flushRecords() { if !writer.flushRecords() {
quitFlag = true quitFlag = true
break
} }
case <-keepAliveTicker.C: case <-keepAliveTicker.C:

@ -408,9 +408,7 @@ func (s3Select *S3Select) Evaluate(w http.ResponseWriter) {
} }
if err != nil { if err != nil {
if serr := writer.FinishWithError("InternalError", err.Error()); serr != nil { _ = writer.FinishWithError("InternalError", err.Error())
// FIXME: log errors.
}
} }
} }

@ -274,10 +274,6 @@ func (e *FuncExpr) aggregateRow(r Record) error {
// called after calling aggregateRow() on each input row, to calculate // called after calling aggregateRow() on each input row, to calculate
// the final aggregate result. // the final aggregate result.
func (e *Expression) getAggregate() (*Value, error) {
return e.evalNode(nil)
}
func (e *FuncExpr) getAggregate() (*Value, error) { func (e *FuncExpr) getAggregate() (*Value, error) {
switch e.getFunctionName() { switch e.getFunctionName() {
case aggFnCount: case aggFnCount:

@ -234,7 +234,9 @@ func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
inferTypeAsTimestamp(ts) if err = inferTypeAsTimestamp(ts); err != nil {
return nil, err
}
t, ok := ts.ToTimestamp() t, ok := ts.ToTimestamp()
if !ok { if !ok {
return nil, fmt.Errorf("%s() expects a timestamp argument", sqlFnDateAdd) return nil, fmt.Errorf("%s() expects a timestamp argument", sqlFnDateAdd)
@ -248,7 +250,9 @@ func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
inferTypeAsTimestamp(tval1) if err = inferTypeAsTimestamp(tval1); err != nil {
return nil, err
}
ts1, ok := tval1.ToTimestamp() ts1, ok := tval1.ToTimestamp()
if !ok { if !ok {
return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff)
@ -258,7 +262,9 @@ func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
inferTypeAsTimestamp(tval2) if err = inferTypeAsTimestamp(tval2); err != nil {
return nil, err
}
ts2, ok := tval2.ToTimestamp() ts2, ok := tval2.ToTimestamp()
if !ok { if !ok {
return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff)
@ -363,7 +369,9 @@ func handleSQLExtract(r Record, e *ExtractFunc) (res *Value, err error) {
return nil, verr return nil, verr
} }
inferTypeAsTimestamp(timeVal) if err = inferTypeAsTimestamp(timeVal); err != nil {
return nil, err
}
t, ok := timeVal.ToTimestamp() t, ok := timeVal.ToTimestamp()
if !ok { if !ok {

@ -83,8 +83,6 @@ type Select struct {
type SelectExpression struct { type SelectExpression struct {
All bool `parser:" @\"*\""` All bool `parser:" @\"*\""`
Expressions []*AliasedExpression `parser:"| @@ { \",\" @@ }"` Expressions []*AliasedExpression `parser:"| @@ { \",\" @@ }"`
prop qProp
} }
// TableExpression represents the FROM clause // TableExpression represents the FROM clause

@ -38,7 +38,6 @@ var (
layoutSecond, layoutSecond,
layoutNanosecond, layoutNanosecond,
} }
oneNanoSecond = 1
) )
func parseSQLTimestamp(s string) (t time.Time, err error) { func parseSQLTimestamp(s string) (t time.Time, err error) {

@ -248,7 +248,7 @@ func (v *Value) CSVString() string {
case typeBool: case typeBool:
return fmt.Sprintf("%v", v.value.(bool)) return fmt.Sprintf("%v", v.value.(bool))
case typeString: case typeString:
return fmt.Sprintf("%s", v.value.(string)) return v.value.(string)
case typeInt: case typeInt:
return fmt.Sprintf("%v", v.value.(int64)) return fmt.Sprintf("%v", v.value.(int64))
case typeFloat: case typeFloat:
@ -610,22 +610,22 @@ func (v *Value) minmax(a *Value, isMax, isFirstRow bool) error {
return nil return nil
} }
func inferTypeAsTimestamp(v *Value) { func inferTypeAsTimestamp(v *Value) error {
if s, ok := v.ToString(); ok { if s, ok := v.ToString(); ok {
t, err := parseSQLTimestamp(s) t, err := parseSQLTimestamp(s)
if err != nil { if err != nil {
return return err
} }
v.setTimestamp(t) v.setTimestamp(t)
} else if b, ok := v.ToBytes(); ok { } else if b, ok := v.ToBytes(); ok {
s := string(b) s := string(b)
t, err := parseSQLTimestamp(s) t, err := parseSQLTimestamp(s)
if err != nil { if err != nil {
return return err
} }
v.setTimestamp(t) v.setTimestamp(t)
} }
return return nil
} }
// inferTypeAsString is used to convert untyped values to string - it // inferTypeAsString is used to convert untyped values to string - it

@ -22,8 +22,7 @@ import (
// Simply make sure creating a new tree works. // Simply make sure creating a new tree works.
func TestNewTrie(t *testing.T) { func TestNewTrie(t *testing.T) {
var trie *Trie trie := NewTrie()
trie = NewTrie()
if trie.size != 0 { if trie.size != 0 {
t.Errorf("expected size 0, got: %d", trie.size) t.Errorf("expected size 0, got: %d", trie.size)
@ -32,8 +31,7 @@ func TestNewTrie(t *testing.T) {
// Ensure that we can insert new keys into the tree, then check the size. // Ensure that we can insert new keys into the tree, then check the size.
func TestInsert(t *testing.T) { func TestInsert(t *testing.T) {
var trie *Trie trie := NewTrie()
trie = NewTrie()
// We need to have an empty tree to begin with. // We need to have an empty tree to begin with.
if trie.size != 0 { if trie.size != 0 {
@ -51,8 +49,7 @@ func TestInsert(t *testing.T) {
// Ensure that PrefixMatch gives us the correct two keys in the tree. // Ensure that PrefixMatch gives us the correct two keys in the tree.
func TestPrefixMatch(t *testing.T) { func TestPrefixMatch(t *testing.T) {
var trie *Trie trie := NewTrie()
trie = NewTrie()
// Feed it some fodder: only 'minio' and 'miny-os' should trip the matcher. // Feed it some fodder: only 'minio' and 'miny-os' should trip the matcher.
trie.Insert("minio") trie.Insert("minio")

@ -26,7 +26,6 @@ func TestMinimum(t *testing.T) {
type testCase struct { type testCase struct {
listval []int listval []int
expected int expected int
pass bool
} }
testCases := []testCase{ testCases := []testCase{
{listval: []int{3, 4, 15}, expected: 3}, {listval: []int{3, 4, 15}, expected: 3},

@ -0,0 +1 @@
checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019"]
Loading…
Cancel
Save