From df35d7db9d43d0bf73cb22b83577505f947d0e2e Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 13 Feb 2019 04:59:36 -0800 Subject: [PATCH] Introduce staticcheck for stricter builds (#7035) --- Makefile | 32 ++--- cmd/admin-handlers.go | 11 +- cmd/admin-handlers_test.go | 99 +-------------- cmd/benchmark-utils_test.go | 7 +- cmd/bitrot-streaming.go | 2 +- cmd/bucket-handlers_test.go | 6 +- cmd/config-versions.go | 9 -- cmd/config.go | 23 ++-- cmd/dummy-object-layer_test.go | 167 -------------------------- cmd/encryption-v1.go | 2 +- cmd/endpoint.go | 2 +- cmd/erasure-decode_test.go | 2 +- cmd/format-fs.go | 52 ++++---- cmd/format-xl_test.go | 2 +- cmd/fs-v1-helpers.go | 2 +- cmd/fs-v1.go | 3 +- cmd/gateway/azure/gateway-azure.go | 3 + cmd/gateway/gcs/gateway-gcs.go | 1 + cmd/gateway/oss/gateway-oss_test.go | 3 +- cmd/handler-utils.go | 2 - cmd/healthcheck-handler_test.go | 2 +- cmd/http/close.go | 8 -- cmd/iam.go | 25 ++-- cmd/local-locker.go | 14 +-- cmd/lock-rpc-server-common_test.go | 10 +- cmd/lock-rpc-server_test.go | 13 +- cmd/lock-stat.go | 55 --------- cmd/logger/logonce.go | 11 +- cmd/logger/target/http/http.go | 3 + cmd/namespace-lock.go | 11 +- cmd/namespace-lock_test.go | 1 - cmd/notification.go | 26 ++-- cmd/object-handlers-common.go | 5 +- cmd/object-handlers.go | 8 +- cmd/object-handlers_test.go | 6 + cmd/object_api_suite_test.go | 2 +- cmd/policy.go | 25 ++-- cmd/posix.go | 2 +- cmd/posix_test.go | 6 +- cmd/rpc/pool.go | 7 -- cmd/rpc/server_test.go | 5 - cmd/rpc_test.go | 6 +- cmd/signature-v4-parser.go | 3 +- cmd/test-utils_test.go | 72 +---------- cmd/utils.go | 15 ++- cmd/web-handlers.go | 7 +- cmd/web-handlers_test.go | 22 ++-- cmd/xl-sets.go | 25 +--- cmd/xl-v1-bucket.go | 11 +- cmd/xl-v1-healing-common_test.go | 4 +- cmd/xl-v1.go | 22 +--- pkg/certs/certs_test.go | 2 +- pkg/disk/disk_test.go | 12 -- pkg/ellipses/ellipses.go | 6 +- pkg/event/config.go | 2 - pkg/event/config_test.go | 18 +-- pkg/handlers/http-tracer_test.go | 4 +- pkg/hash/reader_test.go | 3 + pkg/madmin/api.go | 79 ++++++------ pkg/mimedb/db_test.go | 3 +- pkg/quick/errorutil.go | 11 +- pkg/s3select/message.go | 1 - pkg/s3select/select.go | 4 +- pkg/s3select/sql/aggregation.go | 4 - pkg/s3select/sql/funceval.go | 16 ++- pkg/s3select/sql/parser.go | 2 - pkg/s3select/sql/timestampfuncs.go | 1 - pkg/s3select/sql/value.go | 10 +- pkg/trie/trie_test.go | 9 +- pkg/words/damerau-levenshtein_test.go | 1 - staticcheck.conf | 1 + 71 files changed, 274 insertions(+), 777 deletions(-) delete mode 100644 cmd/dummy-object-layer_test.go delete mode 100644 cmd/lock-stat.go create mode 100644 staticcheck.conf diff --git a/Makefile b/Makefile index 685b86977..f28240c8a 100644 --- a/Makefile +++ b/Makefile @@ -14,43 +14,32 @@ checks: getdeps: @echo "Installing golint" && go get -u golang.org/x/lint/golint - @echo "Installing gocyclo" && go get -u github.com/fzipp/gocyclo - @echo "Installing deadcode" && go get -u github.com/remyoudompheng/go-misc/deadcode + @echo "Installing staticcheck" && go get -u honnef.co/go/tools/... @echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell - @echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign crosscompile: @(env bash $(PWD)/buildscripts/cross-compile.sh) -verifiers: getdeps vet fmt lint cyclo deadcode spelling +verifiers: getdeps vet fmt lint staticcheck spelling vet: @echo "Running $@" - @go tool vet cmd - @go tool vet pkg + @go vet github.com/minio/minio/... fmt: @echo "Running $@" - @gofmt -d cmd - @gofmt -d pkg + @gofmt -d cmd/ + @gofmt -d pkg/ lint: @echo "Running $@" - @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd... - @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg... + @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/... + @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/... -ineffassign: +staticcheck: @echo "Running $@" - @${GOPATH}/bin/ineffassign . - -cyclo: - @echo "Running $@" - @${GOPATH}/bin/gocyclo -over 200 cmd - @${GOPATH}/bin/gocyclo -over 200 pkg - -deadcode: - @echo "Running $@" - @${GOPATH}/bin/deadcode -test $(shell go list ./...) || true + @${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/... + @${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/... spelling: @${GOPATH}/bin/misspell -locale US -error `find cmd/` @@ -105,6 +94,7 @@ install: build clean: @echo "Cleaning up all the generated files" @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv @rm -rvf minio @rm -rvf build @rm -rvf release diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 5b770bf74..b8133bf2a 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -381,7 +381,6 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request } else { writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) } - return } func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry { @@ -437,12 +436,20 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request return } - // Method only allowed in XL mode. + // Method only allowed in Distributed XL mode. if !globalIsDistXL { writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) return } + // Authenticate request + // Setting the region as empty so as the mc server info command is irrespective to the region. + adminAPIErr := checkAdminRequestAuthType(ctx, r, "") + if adminAPIErr != ErrNone { + writeErrorResponseJSON(w, errorCodes.ToAPIErr(adminAPIErr), r.URL) + return + } + thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints)) if err != nil { writeErrorResponseJSON(w, toAdminAPIErr(ctx, err), r.URL) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 9a60459ab..cee82dffe 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -29,7 +29,6 @@ import ( "strings" "sync" "testing" - "time" "github.com/gorilla/mux" "github.com/minio/minio/pkg/auth" @@ -231,10 +230,9 @@ var ( // adminXLTestBed - encapsulates subsystems that need to be setup for // admin-handler unit tests. type adminXLTestBed struct { - configPath string - xlDirs []string - objLayer ObjectLayer - router *mux.Router + xlDirs []string + objLayer ObjectLayer + router *mux.Router } // prepareAdminXLTestBed - helper function that setups a single-node @@ -773,7 +771,7 @@ func TestSetConfigHandler(t *testing.T) { rec := httptest.NewRecorder() adminTestBed.router.ServeHTTP(rec, req) - respBody := string(rec.Body.Bytes()) + respBody := rec.Body.String() if rec.Code != http.StatusBadRequest || !strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") { t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody) @@ -792,7 +790,7 @@ func TestSetConfigHandler(t *testing.T) { rec := httptest.NewRecorder() adminTestBed.router.ServeHTTP(rec, req) - respBody := string(rec.Body.Bytes()) + respBody := rec.Body.String() if rec.Code != http.StatusBadRequest || !strings.Contains(respBody, "JSON configuration provided is of incorrect format") { t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody) @@ -879,90 +877,3 @@ func TestToAdminAPIErrCode(t *testing.T) { } } } - -func mkHealStartReq(t *testing.T, bucket, prefix string, - opts madmin.HealOpts) *http.Request { - - body, err := json.Marshal(opts) - if err != nil { - t.Fatalf("Unable marshal heal opts") - } - - path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket) - if bucket != "" && prefix != "" { - path += "/" + prefix - } - - req, err := newTestRequest("POST", path, - int64(len(body)), bytes.NewReader(body)) - if err != nil { - t.Fatalf("Failed to construct request - %v", err) - } - cred := globalServerConfig.GetCredential() - err = signRequestV4(req, cred.AccessKey, cred.SecretKey) - if err != nil { - t.Fatalf("Failed to sign request - %v", err) - } - - return req -} - -func mkHealStatusReq(t *testing.T, bucket, prefix, - clientToken string) *http.Request { - - path := fmt.Sprintf("/minio/admin/v1/heal/%s", bucket) - if bucket != "" && prefix != "" { - path += "/" + prefix - } - path += fmt.Sprintf("?clientToken=%s", clientToken) - - req, err := newTestRequest("POST", path, 0, nil) - if err != nil { - t.Fatalf("Failed to construct request - %v", err) - } - cred := globalServerConfig.GetCredential() - err = signRequestV4(req, cred.AccessKey, cred.SecretKey) - if err != nil { - t.Fatalf("Failed to sign request - %v", err) - } - - return req -} - -func collectHealResults(t *testing.T, adminTestBed *adminXLTestBed, bucket, - prefix, clientToken string, timeLimitSecs int) madmin.HealTaskStatus { - - var res, cur madmin.HealTaskStatus - - // loop and fetch heal status. have a time-limit to loop over - // all statuses. - timeLimit := UTCNow().Add(time.Second * time.Duration(timeLimitSecs)) - for cur.Summary != healStoppedStatus && cur.Summary != healFinishedStatus { - if UTCNow().After(timeLimit) { - t.Fatalf("heal-status loop took too long - clientToken: %s", clientToken) - } - req := mkHealStatusReq(t, bucket, prefix, clientToken) - rec := httptest.NewRecorder() - adminTestBed.router.ServeHTTP(rec, req) - if http.StatusOK != rec.Code { - t.Errorf("Unexpected status code - got %d but expected %d", - rec.Code, http.StatusOK) - break - } - err := json.NewDecoder(rec.Body).Decode(&cur) - if err != nil { - t.Errorf("unable to unmarshal resp: %v", err) - break - } - - // all results are accumulated into a slice - // and returned to caller in the end - allItems := append(res.Items, cur.Items...) - res = cur - res.Items = allItems - - time.Sleep(time.Millisecond * 200) - } - - return res -} diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index 95c78b636..60be81dc1 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -100,7 +100,6 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { b.Fatal(err) } - md5hex := getMD5Hash(textData) sha256hex := "" var textPartData []byte @@ -117,7 +116,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { } else { textPartData = textData[j*partSize:] } - md5hex = getMD5Hash([]byte(textPartData)) + md5hex := getMD5Hash([]byte(textPartData)) var partInfo PartInfo partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j, mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{}) @@ -230,10 +229,8 @@ func getRandomByte() []byte { const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" // seeding the random number generator. rand.Seed(UTCNow().UnixNano()) - var b byte // pick a character randomly. - b = letterBytes[rand.Intn(len(letterBytes))] - return []byte{b} + return []byte{letterBytes[rand.Intn(len(letterBytes))]} } // picks a random byte and repeats it to size bytes. diff --git a/cmd/bitrot-streaming.go b/cmd/bitrot-streaming.go index 63f1d9ace..17125e3ef 100644 --- a/cmd/bitrot-streaming.go +++ b/cmd/bitrot-streaming.go @@ -146,7 +146,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) { } b.h.Write(buf) - if bytes.Compare(b.h.Sum(nil), b.hashBytes) != 0 { + if !bytes.Equal(b.h.Sum(nil), b.hashBytes) { err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))} logger.LogIf(context.Background(), err) return 0, err diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 765dce76b..29580bfc4 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -95,12 +95,12 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) } if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass { - t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes())) + t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String()) } errorResponse := APIErrorResponse{} err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse) if err != nil && !testCase.shouldPass { - t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes())) + t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String()) } if errorResponse.Resource != testCase.errorResponse.Resource { t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource) @@ -131,7 +131,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri errorResponse = APIErrorResponse{} err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse) if err != nil && !testCase.shouldPass { - t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(recV2.Body.Bytes())) + t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String()) } if errorResponse.Resource != testCase.errorResponse.Resource { t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource) diff --git a/cmd/config-versions.go b/cmd/config-versions.go index 1ae5d6b8c..f69975cfb 100644 --- a/cmd/config-versions.go +++ b/cmd/config-versions.go @@ -263,9 +263,6 @@ type serverConfigV7 struct { // Notification queue configuration. Notify notifierV1 `json:"notify"` - - // Read Write mutex. - rwMutex *sync.RWMutex } // serverConfigV8 server configuration version '8'. Adds NATS notifier @@ -282,9 +279,6 @@ type serverConfigV8 struct { // Notification queue configuration. Notify notifierV1 `json:"notify"` - - // Read Write mutex. - rwMutex *sync.RWMutex } // serverConfigV9 server configuration version '9'. Adds PostgreSQL @@ -301,9 +295,6 @@ type serverConfigV9 struct { // Notification queue configuration. Notify notifierV1 `json:"notify"` - - // Read Write mutex. - rwMutex *sync.RWMutex } type loggerV7 struct { diff --git a/cmd/config.go b/cmd/config.go index bfc92c0ca..adb73f2ee 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -127,23 +127,18 @@ func (sys *ConfigSys) Init(objAPI ObjectLayer) error { // of the object layer. // - Write quorum not met when upgrading configuration // version is needed. - retryTimerCh := newRetryTimerSimple(doneCh) - for { - select { - case _ = <-retryTimerCh: - err := initConfig(objAPI) - if err != nil { - if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || - strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { - logger.Info("Waiting for configuration to be initialized..") - continue - } - return err + for range newRetryTimerSimple(doneCh) { + if err := initConfig(objAPI); err != nil { + if strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || + strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { + logger.Info("Waiting for configuration to be initialized..") + continue } - - return nil + return err } + break } + return nil } // NewConfigSys - creates new config system object. diff --git a/cmd/dummy-object-layer_test.go b/cmd/dummy-object-layer_test.go deleted file mode 100644 index 6ad5f1ae3..000000000 --- a/cmd/dummy-object-layer_test.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "io" - "net/http" - - "github.com/minio/minio/pkg/madmin" - "github.com/minio/minio/pkg/policy" -) - -type DummyObjectLayer struct{} - -func (api *DummyObjectLayer) Shutdown(context.Context) (err error) { - return -} - -func (api *DummyObjectLayer) StorageInfo(context.Context) (si StorageInfo) { - return -} - -func (api *DummyObjectLayer) MakeBucketWithLocation(ctx context.Context, bucket string, location string) (err error) { - return -} - -func (api *DummyObjectLayer) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { - return -} - -func (api *DummyObjectLayer) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { - return -} - -func (api *DummyObjectLayer) DeleteBucket(ctx context.Context, bucket string) (err error) { - return -} - -func (api *DummyObjectLayer) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { - return -} - -func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - return -} - -func (api *DummyObjectLayer) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lock LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { - return -} - -func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { - return -} - -func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { - return -} - -func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { - return -} - -func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { - return -} - -func (api *DummyObjectLayer) DeleteObject(ctx context.Context, bucket, object string) (err error) { - return -} - -func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { - return -} - -func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { - return -} - -func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) { - return -} - -func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { - return -} - -func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { - return -} - -func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) { - return -} - -func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { - return -} - -func (api *DummyObjectLayer) ReloadFormat(ctx context.Context, dryRun bool) (err error) { - return -} - -func (api *DummyObjectLayer) HealFormat(ctx context.Context, dryRun bool) (item madmin.HealResultItem, err error) { - return -} - -func (api *DummyObjectLayer) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (items madmin.HealResultItem, err error) { - return -} - -func (api *DummyObjectLayer) HealObject(ctx context.Context, bucket, object string, dryRun, remove bool) (item madmin.HealResultItem, err error) { - return -} - -func (api *DummyObjectLayer) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) { - return -} - -func (api *DummyObjectLayer) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (info ListObjectsInfo, err error) { - return -} - -func (api *DummyObjectLayer) SetBucketPolicy(context.Context, string, *policy.Policy) (err error) { - return -} - -func (api *DummyObjectLayer) GetBucketPolicy(context.Context, string) (bucketPolicy *policy.Policy, err error) { - return -} - -func (api *DummyObjectLayer) RefreshBucketPolicy(context.Context, string) (err error) { - return -} - -func (api *DummyObjectLayer) DeleteBucketPolicy(context.Context, string) (err error) { - return -} - -func (api *DummyObjectLayer) IsNotificationSupported() (b bool) { - return -} -func (api *DummyObjectLayer) IsListenBucketSupported() (b bool) { - return -} - -func (api *DummyObjectLayer) IsEncryptionSupported() (b bool) { - return -} - -func (api *DummyObjectLayer) IsCompressionSupported() (b bool) { - return -} diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index ac0199c54..ef018ec49 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -742,7 +742,7 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri return writer, encStartOffset, encLength, nil } - seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo) + _, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo) var partStartIndex int var partStartOffset = startOffset diff --git a/cmd/endpoint.go b/cmd/endpoint.go index aaed13b7f..6e04b1e98 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -94,7 +94,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { // - Scheme field must contain "http" or "https" // - All field should be empty except Host and Path. if !((u.Scheme == "http" || u.Scheme == "https") && - u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") { + u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { return ep, fmt.Errorf("invalid URL endpoint format") } diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index 58bffa0f5..77f849c81 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -28,7 +28,7 @@ import ( humanize "github.com/dustin/go-humanize" ) -func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { +func (a badDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { return 0, errFaultyDisk } diff --git a/cmd/format-fs.go b/cmd/format-fs.go index 00f0bfdb8..b2527ed6d 100644 --- a/cmd/format-fs.go +++ b/cmd/format-fs.go @@ -341,35 +341,35 @@ func formatFSFixDeploymentID(fsFormatPath string) error { doneCh := make(chan struct{}) defer close(doneCh) - retryTimerCh := newRetryTimerSimple(doneCh) - for { - select { - case <-retryTimerCh: - - wlk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0) - if err == lock.ErrAlreadyLocked { - // Lock already present, sleep and attempt again + var wlk *lock.LockedFile + for range newRetryTimerSimple(doneCh) { + wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0) + if err == lock.ErrAlreadyLocked { + // Lock already present, sleep and attempt again + logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime()) + continue + } + if err != nil { + break + } - logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime()) - continue - } - if err != nil { - return err - } - defer wlk.Close() + if err = jsonLoad(wlk, format); err != nil { + break + } - err = jsonLoad(wlk, format) - if err != nil { - return err - } + // Check if format needs to be updated + if format.ID != "" { + err = nil + break + } - // Check if it needs to be updated - if format.ID != "" { - return nil - } - format.ID = mustGetUUID() - return jsonSave(wlk, format) + format.ID = mustGetUUID() + if err = jsonSave(wlk, format); err != nil { + break } } - + if wlk != nil { + wlk.Close() + } + return err } diff --git a/cmd/format-xl_test.go b/cmd/format-xl_test.go index 4f648e38c..c64712f54 100644 --- a/cmd/format-xl_test.go +++ b/cmd/format-xl_test.go @@ -522,7 +522,7 @@ func TestGetXLID(t *testing.T) { } formats[2].ID = "bad-id" - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { + if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { t.Fatal("Unexpected Success") } } diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go index 84708cab2..d718a6cdc 100644 --- a/cmd/fs-v1-helpers.go +++ b/cmd/fs-v1-helpers.go @@ -294,7 +294,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos // Seek to the requested offset. if offset > 0 { - _, err = fr.Seek(offset, os.SEEK_SET) + _, err = fr.Seek(offset, io.SeekStart) if err != nil { logger.LogIf(ctx, err) return nil, 0, err diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 98e9cc602..6361f5870 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -532,8 +532,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, nsUnlocker() return nil, toObjectErr(err, bucket, object) } - var reader io.Reader - reader = io.LimitReader(readCloser, length) + reader := io.LimitReader(readCloser, length) closeFn := func() { readCloser.Close() } diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index 925bfb1de..879b352af 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -740,6 +740,9 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r * if data.Size() < azureBlockSize/10 { blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined) + if err != nil { + return objInfo, azureToObjectError(err, bucket, object) + } if err = blob.CreateBlockBlobFromReader(data, nil); err != nil { return objInfo, azureToObjectError(err, bucket, object) } diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index dd1adf38d..914b9f4db 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -888,6 +888,7 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r object := l.client.Bucket(bucket).Object(key) w := object.NewWriter(ctx) + // Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below // the current chunk-size of the writer. This avoids an unnecessary memory allocation. if data.Size() < int64(w.ChunkSize) { diff --git a/cmd/gateway/oss/gateway-oss_test.go b/cmd/gateway/oss/gateway-oss_test.go index 4c95dda3f..36193f5a3 100644 --- a/cmd/gateway/oss/gateway-oss_test.go +++ b/cmd/gateway/oss/gateway-oss_test.go @@ -111,9 +111,8 @@ func TestOSSToObjectError(t *testing.T) { func TestS3MetaToOSSOptions(t *testing.T) { var err error - var headers map[string]string - headers = map[string]string{ + headers := map[string]string{ "x-amz-meta-invalid_meta": "value", } _, err = appendS3MetaToOSSOptions(context.Background(), nil, headers) diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index a466af641..02d7b76a9 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -367,11 +367,9 @@ func getResource(path string, host string, domain string) (string, error) { // If none of the http routes match respond with MethodNotAllowed, in JSON func notFoundHandlerJSON(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) - return } // If none of the http routes match respond with MethodNotAllowed func notFoundHandler(w http.ResponseWriter, r *http.Request) { writeErrorResponse(w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r)) - return } diff --git a/cmd/healthcheck-handler_test.go b/cmd/healthcheck-handler_test.go index 2def81403..ad5d6d1fd 100644 --- a/cmd/healthcheck-handler_test.go +++ b/cmd/healthcheck-handler_test.go @@ -34,7 +34,7 @@ func TestGoroutineCountCheck(t *testing.T) { // Make goroutines -- to make sure number of go-routines is higher than threshold if tt.threshold == 5 || tt.threshold == 6 { for i := 0; i < 6; i++ { - go time.Sleep(5) + go time.Sleep(5 * time.Nanosecond) } } if err := goroutineCountCheck(tt.threshold); (err != nil) != tt.wantErr { diff --git a/cmd/http/close.go b/cmd/http/close.go index f8f0e10a9..bf3586eba 100644 --- a/cmd/http/close.go +++ b/cmd/http/close.go @@ -19,16 +19,8 @@ package http import ( "io" "io/ioutil" - "sync" ) -var b512pool = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 512) - return &buf - }, -} - // DrainBody close non nil response with any response Body. // convenient wrapper to drain any remaining data on response body. // diff --git a/cmd/iam.go b/cmd/iam.go index 9ff27032f..8535d1c0e 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -94,23 +94,20 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error { // the following reasons: // - Read quorum is lost just after the initialization // of the object layer. - retryTimerCh := newRetryTimerSimple(doneCh) - for { - select { - case _ = <-retryTimerCh: - // Load IAMSys once during boot. - if err := sys.refresh(objAPI); err != nil { - if err == errDiskNotFound || - strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || - strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { - logger.Info("Waiting for IAM subsystem to be initialized..") - continue - } - return err + for range newRetryTimerSimple(doneCh) { + // Load IAMSys once during boot. + if err := sys.refresh(objAPI); err != nil { + if err == errDiskNotFound || + strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || + strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { + logger.Info("Waiting for IAM subsystem to be initialized..") + continue } - return nil + return err } + break } + return nil } // DeleteCannedPolicy - deletes a canned policy. diff --git a/cmd/local-locker.go b/cmd/local-locker.go index 6b9470a30..f5004c7fe 100644 --- a/cmd/local-locker.go +++ b/cmd/local-locker.go @@ -145,11 +145,9 @@ func (l *localLocker) ForceUnlock(args dsync.LockArgs) (reply bool, err error) { if len(args.UID) != 0 { return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID) } - if _, ok := l.lockMap[args.Resource]; ok { - // Only clear lock when it is taken - // Remove the lock (irrespective of write or read lock) - delete(l.lockMap, args.Resource) - } + // Only clear lock when it is taken + // Remove the lock (irrespective of write or read lock) + delete(l.lockMap, args.Resource) return true, nil } @@ -159,11 +157,7 @@ func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo { lockCopy := make(map[string][]lockRequesterInfo) for k, v := range l.lockMap { - var lockSlice []lockRequesterInfo - for _, lockInfo := range v { - lockSlice = append(lockSlice, lockInfo) - } - lockCopy[k] = lockSlice + lockCopy[k] = append(lockCopy[k], v...) } return lockCopy } diff --git a/cmd/lock-rpc-server-common_test.go b/cmd/lock-rpc-server-common_test.go index 55e34d17a..019fd2388 100644 --- a/cmd/lock-rpc-server-common_test.go +++ b/cmd/lock-rpc-server-common_test.go @@ -41,7 +41,7 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) { // first test by simulating item has already been deleted locker.ll.removeEntryIfExists(nlrip) { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo(nil) if !reflect.DeepEqual(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) @@ -52,7 +52,7 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) { locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item locker.ll.removeEntryIfExists(nlrip) { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo(nil) if !reflect.DeepEqual(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) @@ -87,7 +87,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) { lockRequesterInfo2, } - lri, _ := locker.ll.lockMap["name"] + lri := locker.ll.lockMap["name"] // test unknown uid if locker.ll.removeEntry("name", "unknown-uid", &lri) { @@ -97,7 +97,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) { if !locker.ll.removeEntry("name", "0123-4567", &lri) { t.Errorf("Expected %#v, got %#v", true, false) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo{lockRequesterInfo2} if !reflect.DeepEqual(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) @@ -107,7 +107,7 @@ func TestLockRpcServerRemoveEntry(t *testing.T) { if !locker.ll.removeEntry("name", "89ab-cdef", &lri) { t.Errorf("Expected %#v, got %#v", true, false) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo(nil) if !reflect.DeepEqual(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index b5d2cce59..ef8ae67ee 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -94,7 +94,7 @@ func TestLockRpcServerLock(t *testing.T) { if !result { t.Errorf("Expected %#v, got %#v", true, result) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo{ { Writer: true, @@ -174,7 +174,7 @@ func TestLockRpcServerUnlock(t *testing.T) { if !result { t.Errorf("Expected %#v, got %#v", true, result) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo(nil) if !testLockEquality(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) @@ -210,7 +210,7 @@ func TestLockRpcServerRLock(t *testing.T) { if !result { t.Errorf("Expected %#v, got %#v", true, result) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo{ { Writer: false, @@ -312,7 +312,7 @@ func TestLockRpcServerRUnlock(t *testing.T) { if !result { t.Errorf("Expected %#v, got %#v", true, result) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo{ { Writer: false, @@ -336,7 +336,7 @@ func TestLockRpcServerRUnlock(t *testing.T) { if !result { t.Errorf("Expected %#v, got %#v", true, result) } else { - gotLri, _ := locker.ll.lockMap["name"] + gotLri := locker.ll.lockMap["name"] expectedLri := []lockRequesterInfo(nil) if !testLockEquality(expectedLri, gotLri) { t.Errorf("Expected %#v, got %#v", expectedLri, gotLri) @@ -531,9 +531,6 @@ func TestLockServerInit(t *testing.T) { globalIsDistXL = testCase.isDistXL globalLockServer = nil _, _ = newDsyncNodes(testCase.endpoints) - if err != nil { - t.Fatalf("Got unexpected error initializing lock servers: %v", err) - } if globalLockServer == nil && testCase.isDistXL { t.Errorf("Test %d: Expected initialized lock RPC receiver, but got uninitialized", i+1) } diff --git a/cmd/lock-stat.go b/cmd/lock-stat.go deleted file mode 100644 index f72b30e67..000000000 --- a/cmd/lock-stat.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -// lockStat - encapsulates total, blocked and granted lock counts. -type lockStat struct { - total int64 - blocked int64 - granted int64 -} - -// lockWaiting - updates lock stat when a lock becomes blocked. -func (ls *lockStat) lockWaiting() { - ls.blocked++ - ls.total++ -} - -// lockGranted - updates lock stat when a lock is granted. -func (ls *lockStat) lockGranted() { - ls.blocked-- - ls.granted++ -} - -// lockTimedOut - updates lock stat when a lock is timed out. -func (ls *lockStat) lockTimedOut() { - ls.blocked-- - ls.total-- -} - -// lockRemoved - updates lock stat when a lock is removed, by Unlock -// or ForceUnlock. -func (ls *lockStat) lockRemoved(granted bool) { - if granted { - ls.granted-- - ls.total-- - - } else { - ls.blocked-- - ls.total-- - } -} diff --git a/cmd/logger/logonce.go b/cmd/logger/logonce.go index cfa16951b..861d7cd5b 100644 --- a/cmd/logger/logonce.go +++ b/cmd/logger/logonce.go @@ -56,12 +56,11 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id interface{}) // Cleanup the map every 30 minutes so that the log message is printed again for the user to notice. func (l *logOnceType) cleanupRoutine() { for { - select { - case <-time.After(time.Minute * 30): - l.Lock() - l.IDMap = make(map[interface{}]error) - l.Unlock() - } + l.Lock() + l.IDMap = make(map[interface{}]error) + l.Unlock() + + time.Sleep(30 * time.Minute) } } diff --git a/cmd/logger/target/http/http.go b/cmd/logger/target/http/http.go index d169cf18c..d87bb3111 100644 --- a/cmd/logger/target/http/http.go +++ b/cmd/logger/target/http/http.go @@ -50,6 +50,9 @@ func (h *Target) startHTTPLogger() { } req, err := gohttp.NewRequest("POST", h.endpoint, bytes.NewBuffer(logJSON)) + if err != nil { + continue + } req.Header.Set("Content-Type", "application/json") resp, err := h.client.Do(req) diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index d17d4cd02..eb188c2a4 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -102,7 +102,6 @@ func newNSLock(isDistXL bool) *nsLockMap { nsMutex := nsLockMap{ isDistXL: isDistXL, lockMap: make(map[nsParam]*nsLock), - counters: &lockStat{}, } return &nsMutex } @@ -127,9 +126,6 @@ type nsLock struct { // nsLockMap - namespace lock map, provides primitives to Lock, // Unlock, RLock and RUnlock. type nsLockMap struct { - // Lock counter used for lock debugging. - counters *lockStat - // Indicates if namespace is part of a distributed setup. isDistXL bool lockMap map[nsParam]*nsLock @@ -259,11 +255,8 @@ func (n *nsLockMap) ForceUnlock(volume, path string) { dsync.NewDRWMutex(pathJoin(volume, path), globalDsync).ForceUnlock() } - param := nsParam{volume, path} - if _, found := n.lockMap[param]; found { - // Remove lock from the map. - delete(n.lockMap, param) - } + // Remove lock from the map. + delete(n.lockMap, nsParam{volume, path}) } // lockInstance - frontend/top-level interface for namespace locks. diff --git a/cmd/namespace-lock_test.go b/cmd/namespace-lock_test.go index 6bd8c2eb3..70fbae9db 100644 --- a/cmd/namespace-lock_test.go +++ b/cmd/namespace-lock_test.go @@ -47,7 +47,6 @@ func TestNamespaceLockTest(t *testing.T) { unlk func(s1, s2, s3 string) rlk func(s1, s2, s3 string, t time.Duration) bool runlk func(s1, s2, s3 string) - lkCount int lockedRefCount uint unlockedRefCount uint shouldPass bool diff --git a/cmd/notification.go b/cmd/notification.go index 3430024ae..666bfbee1 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -271,6 +271,9 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io isDir: false, sys: nil, }) + if zerr != nil { + return profilingDataFound + } zwriter, zerr := zipWriter.CreateHeader(header) if zerr != nil { @@ -602,22 +605,19 @@ func (sys *NotificationSys) Init(objAPI ObjectLayer) error { // the following reasons: // - Read quorum is lost just after the initialization // of the object layer. - retryTimerCh := newRetryTimerSimple(doneCh) - for { - select { - case _ = <-retryTimerCh: - if err := sys.refresh(objAPI); err != nil { - if err == errDiskNotFound || - strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || - strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { - logger.Info("Waiting for notification subsystem to be initialized..") - continue - } - return err + for range newRetryTimerSimple(doneCh) { + if err := sys.refresh(objAPI); err != nil { + if err == errDiskNotFound || + strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || + strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { + logger.Info("Waiting for notification subsystem to be initialized..") + continue } - return nil + return err } + break } + return nil } // AddRulesMap - adds rules map for bucket name. diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 3cf7b8236..a122fcbbe 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -211,10 +211,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn func ifModifiedSince(objTime time.Time, givenTime time.Time) bool { // The Date-Modified header truncates sub-second precision, so // use mtime < t+1s instead of mtime <= t to check for unmodified. - if objTime.After(givenTime.Add(1 * time.Second)) { - return true - } - return false + return objTime.After(givenTime.Add(1 * time.Second)) } // canonicalizeETag returns ETag with leading and trailing double-quotes removed, diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index d0d6f0508..c0d7514c2 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -838,7 +838,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re reader = pipeReader length = -1 - snappyWriter := snappy.NewWriter(pipeWriter) + snappyWriter := snappy.NewBufferedWriter(pipeWriter) go func() { // Compress the decompressed source object. @@ -1217,7 +1217,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) pipeReader, pipeWriter := io.Pipe() - snappyWriter := snappy.NewWriter(pipeWriter) + snappyWriter := snappy.NewBufferedWriter(pipeWriter) var actualReader *hash.Reader actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize) @@ -1660,7 +1660,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt reader = pipeReader length = -1 - snappyWriter := snappy.NewWriter(pipeWriter) + snappyWriter := snappy.NewBufferedWriter(pipeWriter) go func() { // Compress the decompressed source object. @@ -1902,7 +1902,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http isCompressed := false if objectAPI.IsCompressionSupported() && compressPart { pipeReader, pipeWriter = io.Pipe() - snappyWriter := snappy.NewWriter(pipeWriter) + snappyWriter := snappy.NewBufferedWriter(pipeWriter) var actualReader *hash.Reader actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize) diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 38858757b..fddf94858 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -855,6 +855,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, shouldPass: true, + fault: None, }, // Test case - 2 // Small chunk size. @@ -869,6 +870,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, shouldPass: true, + fault: None, }, // Test case - 3 // Empty data @@ -897,6 +899,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam accessKey: "", secretKey: "", shouldPass: false, + fault: None, }, // Test case - 5 // Wrong auth header returns as bad request. @@ -912,6 +915,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam secretKey: credentials.SecretKey, shouldPass: false, removeAuthHeader: true, + fault: None, }, // Test case - 6 // Large chunk size.. also passes. @@ -926,6 +930,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, shouldPass: true, + fault: None, }, // Test case - 7 // Chunk with malformed encoding. @@ -1017,6 +1022,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam secretKey: credentials.SecretKey, shouldPass: true, contentEncoding: "aws-chunked,gzip", + fault: None, }, } // Iterating over the cases, fetching the object validating the response. diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index dd4383ae5..19a14dd93 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -465,7 +465,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan if err != nil { t.Fatalf("%s: %s", instanceType, err) } - if string(bytesBuffer.Bytes()) != "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." { + if bytesBuffer.String() != "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." { t.Errorf("%s: Invalid upload ID error mismatch.", instanceType) } } diff --git a/cmd/policy.go b/cmd/policy.go index 3c57ad122..51d641a02 100644 --- a/cmd/policy.go +++ b/cmd/policy.go @@ -158,23 +158,20 @@ func (sys *PolicySys) Init(objAPI ObjectLayer) error { // the following reasons: // - Read quorum is lost just after the initialization // of the object layer. - retryTimerCh := newRetryTimerSimple(doneCh) - for { - select { - case _ = <-retryTimerCh: - // Load PolicySys once during boot. - if err := sys.refresh(objAPI); err != nil { - if err == errDiskNotFound || - strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || - strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { - logger.Info("Waiting for policy subsystem to be initialized..") - continue - } - return err + for range newRetryTimerSimple(doneCh) { + // Load PolicySys once during boot. + if err := sys.refresh(objAPI); err != nil { + if err == errDiskNotFound || + strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || + strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { + logger.Info("Waiting for policy subsystem to be initialized..") + continue } - return nil + return err } + break } + return nil } // NewPolicySys - creates new policy system. diff --git a/cmd/posix.go b/cmd/posix.go index 4f391ea5d..fb6a664d1 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -860,7 +860,7 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif return 0, err } - if bytes.Compare(h.Sum(nil), verifier.sum) != 0 { + if !bytes.Equal(h.Sum(nil), verifier.sum) { return 0, hashMismatchError{hex.EncodeToString(verifier.sum), hex.EncodeToString(h.Sum(nil))} } diff --git a/cmd/posix_test.go b/cmd/posix_test.go index cf58a3e69..25e380eca 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -188,7 +188,7 @@ func TestPosixIsDirEmpty(t *testing.T) { // Should give false on non-existent directory. dir1 := slashpath.Join(tmp, "non-existent-directory") - if isDirEmpty(dir1) != false { + if isDirEmpty(dir1) { t.Error("expected false for non-existent directory, got true") } @@ -199,7 +199,7 @@ func TestPosixIsDirEmpty(t *testing.T) { t.Fatal(err) } - if isDirEmpty(dir2) != false { + if isDirEmpty(dir2) { t.Error("expected false for a file, got true") } @@ -210,7 +210,7 @@ func TestPosixIsDirEmpty(t *testing.T) { t.Fatal(err) } - if isDirEmpty(dir3) != true { + if !isDirEmpty(dir3) { t.Error("expected true for empty dir, got false") } } diff --git a/cmd/rpc/pool.go b/cmd/rpc/pool.go index 72f627316..84dcbf6e1 100644 --- a/cmd/rpc/pool.go +++ b/cmd/rpc/pool.go @@ -21,13 +21,6 @@ import ( "sync" ) -var b512pool = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 512) - return &buf - }, -} - // A Pool is a type-safe wrapper around a sync.Pool. type Pool struct { p *sync.Pool diff --git a/cmd/rpc/server_test.go b/cmd/rpc/server_test.go index 49b6c4396..f2912d104 100644 --- a/cmd/rpc/server_test.go +++ b/cmd/rpc/server_test.go @@ -77,11 +77,6 @@ func (t mytype) Foo(a *Auth, b *int) error { return nil } -// incompatible method because of unexported method. -func (t mytype) foo(a *Auth, b *int) error { - return nil -} - // incompatible method because of first argument is not Authenticator. func (t *mytype) Bar(a, b *int) error { return nil diff --git a/cmd/rpc_test.go b/cmd/rpc_test.go index a8b9cca73..0f6d03fab 100644 --- a/cmd/rpc_test.go +++ b/cmd/rpc_test.go @@ -285,10 +285,8 @@ func TestRPCClientCall(t *testing.T) { case1ExpectedResult := 19 * 8 testCases := []struct { - serviceMethod string - args interface { - SetAuthArgs(args AuthArgs) - } + serviceMethod string + args *Args result interface{} changeConfig bool expectedResult interface{} diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index 3f4326181..00668e846 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -186,9 +186,8 @@ func doesV4PresignParamsExist(query url.Values) APIErrorCode { // Parses all the presigned signature values into separate elements. func parsePreSignV4(query url.Values, region string) (psv preSignValues, aec APIErrorCode) { - var err APIErrorCode // verify whether the required query params exist. - err = doesV4PresignParamsExist(query) + err := doesV4PresignParamsExist(query) if err != ErrNone { return psv, err } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 9bbb2ecf7..14ad50ac5 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -306,7 +306,6 @@ type TestServer struct { SecretKey string Server *httptest.Server Obj ObjectLayer - endpoints EndpointList } // UnstartedTestServer - Configures a temp FS/XL backend, @@ -949,7 +948,7 @@ func preSignV2(req *http.Request, accessKeyID, secretAccessKey string, expires i // Sign given request using Signature V2. func signRequestV2(req *http.Request, accessKey, secretKey string) error { - req = s3signer.SignV2(*req, accessKey, secretKey, false) + s3signer.SignV2(*req, accessKey, secretKey, false) return nil } @@ -1305,36 +1304,6 @@ func getRandomBucketName() string { } -// TruncateWriter - Writes `n` bytes, then returns with number of bytes written. -// differs from iotest.TruncateWriter, the difference is commented in the Write method. -func TruncateWriter(w io.Writer, n int64) io.Writer { - return &truncateWriter{w, n} -} - -type truncateWriter struct { - w io.Writer - n int64 -} - -func (t *truncateWriter) Write(p []byte) (n int, err error) { - if t.n <= 0 { - return len(p), nil - } - // real write - n = len(p) - if int64(n) > t.n { - n = int(t.n) - } - n, err = t.w.Write(p[0:n]) - t.n -= int64(n) - // Removed from iotest.TruncateWriter. - // Need the Write method to return truncated number of bytes written, not the size of the buffer requested to be written. - // if err == nil { - // n = len(p) - // } - return -} - // NewEOFWriter returns a Writer that writes to w, // but returns EOF error after writing n bytes. func NewEOFWriter(w io.Writer, n int64) io.Writer { @@ -1696,11 +1665,10 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl // Register the API end points with XL object layer. // Registering only the GetObject handler. apiRouter := initTestAPIEndPoints(obj, endpoints) - var f http.HandlerFunc - f = func(w http.ResponseWriter, r *http.Request) { + f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r.RequestURI = r.URL.RequestURI() apiRouter.ServeHTTP(w, r) - } + }) return bucketName, f, nil } @@ -2173,40 +2141,6 @@ func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler { return muxRouter } -func StartTestS3PeerRPCServer(t TestErrHandler) (TestServer, []string) { - // init disks - objLayer, fsDirs, err := prepareXL16() - if err != nil { - t.Fatalf("%s", err) - } - - // Create an instance of TestServer. - testRPCServer := TestServer{} - - if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { - t.Fatalf("%s", err) - } - - // Fetch credentials for the test server. - credentials := globalServerConfig.GetCredential() - testRPCServer.AccessKey = credentials.AccessKey - testRPCServer.SecretKey = credentials.SecretKey - - // set object layer - testRPCServer.Obj = objLayer - globalObjLayerMutex.Lock() - globalObjectAPI = objLayer - globalObjLayerMutex.Unlock() - - // Register router on a new mux - muxRouter := mux.NewRouter().SkipClean(true) - registerPeerRPCRouter(muxRouter) - - // Initialize and run the TestServer. - testRPCServer.Server = httptest.NewServer(muxRouter) - return testRPCServer, fsDirs -} - // generateTLSCertKey creates valid key/cert with registered DNS or IP address // depending on the passed parameter. That way, we can use tls config without // passing InsecureSkipVerify flag. This code is a simplified version of diff --git a/cmd/utils.go b/cmd/utils.go index ba69b3a69..1da72988d 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -228,11 +228,7 @@ func getProfileData() ([]byte, error) { } // Starts a profiler returns nil if profiler is not enabled, caller needs to handle this. -func startProfiler(profilerType, dirPath string) (interface { - Stop() - Path() string -}, error) { - +func startProfiler(profilerType, dirPath string) (minioProfiler, error) { var err error if dirPath == "" { dirPath, err = ioutil.TempDir("", "profile") @@ -277,14 +273,17 @@ func startProfiler(profilerType, dirPath string) (interface { }, nil } -// Global profiler to be used by service go-routine. -var globalProfiler interface { +// minioProfiler - minio profiler interface. +type minioProfiler interface { // Stop the profiler Stop() // Return the path of the profiling file Path() string } +// Global profiler to be used by service go-routine. +var globalProfiler minioProfiler + // dump the request into a string in JSON format. func dumpRequest(r *http.Request) string { header := cloneHeader(r.Header) @@ -307,7 +306,7 @@ func dumpRequest(r *http.Request) string { } // Formatted string. - return strings.TrimSpace(string(buffer.Bytes())) + return strings.TrimSpace(buffer.String()) } // isFile - returns whether given path is a file or not. diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index afec74ceb..c96aa8a80 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -913,7 +913,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) pipeReader, pipeWriter := io.Pipe() - snappyWriter := snappy.NewWriter(pipeWriter) + snappyWriter := snappy.NewBufferedWriter(pipeWriter) var actualReader *hash.Reader actualReader, err = hash.NewReader(reader, size, "", "", actualSize) @@ -1313,14 +1313,15 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { // Response writer should be limited early on for decryption upto required length, // additionally also skipping mod(offset)64KiB boundaries. writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) - writer, startOffset, length, err = DecryptBlocksRequest(writer, r, args.BucketName, objectName, startOffset, length, info, false) + writer, startOffset, length, err = DecryptBlocksRequest(writer, r, + args.BucketName, objectName, startOffset, length, info, false) if err != nil { writeWebErrorResponse(w, err) return err } } httpWriter := ioutil.WriteOnClose(writer) - if err = getObject(ctx, args.BucketName, objectName, 0, length, httpWriter, "", opts); err != nil { + if err = getObject(ctx, args.BucketName, objectName, startOffset, length, httpWriter, "", opts); err != nil { httpWriter.Close() if info.IsCompressed() { // Wait for decompression go-routine to retire. diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index aa6ab1ba6..602965f9f 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -120,7 +120,7 @@ func TestWriteWebErrorResponse(t *testing.T) { recvDesc := buffer.Bytes() // Check if the written desc is same as the one expected. if !bytes.Equal(recvDesc, []byte(desc)) { - t.Errorf("Test %d: Unexpected response, expecting %s, got %s", i+1, desc, string(buffer.Bytes())) + t.Errorf("Test %d: Unexpected response, expecting %s, got %s", i+1, desc, buffer.String()) } buffer.Reset() } @@ -491,23 +491,23 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa t.Fatalf("Was not able to upload an object, %v", err) } - test := func(token string) (error, *ListObjectsRep) { + test := func(token string) (*ListObjectsRep, error) { listObjectsRequest := ListObjectsArgs{BucketName: bucketName, Prefix: ""} listObjectsReply := &ListObjectsRep{} var req *http.Request req, err = newTestWebRPCRequest("Web.ListObjects", token, listObjectsRequest) if err != nil { - t.Fatalf("Failed to create HTTP request: %v", err) + return nil, err } apiRouter.ServeHTTP(rec, req) if rec.Code != http.StatusOK { - return fmt.Errorf("Expected the response status to be 200, but instead found `%d`", rec.Code), listObjectsReply + return listObjectsReply, fmt.Errorf("Expected the response status to be 200, but instead found `%d`", rec.Code) } err = getTestWebRPCResponse(rec, &listObjectsReply) if err != nil { - return err, listObjectsReply + return listObjectsReply, err } - return nil, listObjectsReply + return listObjectsReply, nil } verifyReply := func(reply *ListObjectsRep) { if len(reply.Objects) == 0 { @@ -522,14 +522,14 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa } // Authenticated ListObjects should succeed. - err, reply := test(authorization) + reply, err := test(authorization) if err != nil { t.Fatal(err) } verifyReply(reply) // Unauthenticated ListObjects should fail. - err, _ = test("") + _, err = test("") if err == nil { t.Fatalf("Expected error `%s`", err) } @@ -552,7 +552,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa defer globalPolicySys.Remove(bucketName) // Unauthenticated ListObjects with READ bucket policy should succeed. - err, reply = test("") + reply, err = test("") if err != nil { t.Fatal(err) } @@ -1559,7 +1559,7 @@ func TestWebCheckAuthorization(t *testing.T) { if rec.Code != http.StatusForbidden { t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code) } - resp := string(rec.Body.Bytes()) + resp := rec.Body.String() if !strings.EqualFold(resp, errAuthentication.Error()) { t.Fatalf("Unexpected error message, expected: %s, found: `%s`", errAuthentication, resp) } @@ -1580,7 +1580,7 @@ func TestWebCheckAuthorization(t *testing.T) { if rec.Code != http.StatusForbidden { t.Fatalf("Expected the response status to be 403, but instead found `%d`", rec.Code) } - resp = string(rec.Body.Bytes()) + resp = rec.Body.String() if !strings.EqualFold(resp, errAuthentication.Error()) { t.Fatalf("Unexpected error message, expected: `%s`, found: `%s`", errAuthentication, resp) } diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 895cf2507..9e7d59119 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -1111,16 +1111,8 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) // Copy "after" drive state too from before. for k, v := range beforeDrives { - res.Before.Drives[k] = madmin.HealDriveInfo{ - UUID: v.UUID, - Endpoint: v.Endpoint, - State: v.State, - } - res.After.Drives[k] = madmin.HealDriveInfo{ - UUID: v.UUID, - Endpoint: v.Endpoint, - State: v.State, - } + res.Before.Drives[k] = madmin.HealDriveInfo(v) + res.After.Drives[k] = madmin.HealDriveInfo(v) } for index, sErr := range sErrs { @@ -1253,12 +1245,8 @@ func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove b if err != nil { return result, err } - for _, v := range healResult.Before.Drives { - result.Before.Drives = append(result.Before.Drives, v) - } - for _, v := range healResult.After.Drives { - result.After.Drives = append(result.After.Drives, v) - } + result.Before.Drives = append(result.Before.Drives, healResult.Before.Drives...) + result.After.Drives = append(result.After.Drives, healResult.After.Drives...) } for _, endpoint := range s.endpoints { @@ -1326,10 +1314,7 @@ func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { return nil, err } for _, currBucket := range buckets { - healBuckets[currBucket.Name] = BucketInfo{ - Name: currBucket.Name, - Created: currBucket.Created, - } + healBuckets[currBucket.Name] = BucketInfo(currBucket) } } for _, bucketInfo := range healBuckets { diff --git a/cmd/xl-v1-bucket.go b/cmd/xl-v1-bucket.go index 2c5cef2e6..e368ca3fe 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/xl-v1-bucket.go @@ -130,11 +130,7 @@ func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucke } volInfo, serr := disk.StatVol(bucketName) if serr == nil { - bucketInfo = BucketInfo{ - Name: volInfo.Name, - Created: volInfo.Created, - } - return bucketInfo, nil + return BucketInfo(volInfo), nil } err = serr // For any reason disk went offline continue and pick the next one. @@ -185,10 +181,7 @@ func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, if isReservedOrInvalidBucket(volInfo.Name) { continue } - bucketsInfo = append(bucketsInfo, BucketInfo{ - Name: volInfo.Name, - Created: volInfo.Created, - }) + bucketsInfo = append(bucketsInfo, BucketInfo(volInfo)) } // For buckets info empty, loop once again to check // if we have, can happen if disks were down. diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/xl-v1-healing-common_test.go index ae685a9d6..cd8292e11 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/xl-v1-healing-common_test.go @@ -278,7 +278,7 @@ func TestDisksWithAllParts(t *testing.T) { t.Fatalf("Failed to putObject %v", err) } - partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) + _, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) readQuorum := len(xl.storageDisks) / 2 if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { t.Fatalf("Failed to read xl meta data %v", reducedErr) @@ -286,7 +286,7 @@ func TestDisksWithAllParts(t *testing.T) { // Test that all disks are returned without any failures with // unmodified meta data - partsMetadata, errs = readAllXLMetadata(ctx, xlDisks, bucket, object) + partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) if err != nil { t.Fatalf("Failed to read xl meta data %v", err) } diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index 1d971d3cc..e6849a8a8 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -120,28 +120,18 @@ func getStorageInfo(disks []StorageAPI) StorageInfo { return StorageInfo{} } - _, sscParity := getRedundancyCount(standardStorageClass, len(disks)) - _, rrscparity := getRedundancyCount(reducedRedundancyStorageClass, len(disks)) - - // Total number of online data drives available - // This is the number of drives we report free and total space for - availableDataDisks := uint64(onlineDisks - sscParity) - - // Available data disks can be zero when onlineDisks is equal to parity, - // at that point we simply choose online disks to calculate the size. - if availableDataDisks == 0 { - availableDataDisks = uint64(onlineDisks) - } - - storageInfo := StorageInfo{} - // Combine all disks to get total usage. var used uint64 for _, di := range validDisksInfo { used = used + di.Used } - storageInfo.Used = used + _, sscParity := getRedundancyCount(standardStorageClass, len(disks)) + _, rrscparity := getRedundancyCount(reducedRedundancyStorageClass, len(disks)) + + storageInfo := StorageInfo{ + Used: used, + } storageInfo.Backend.Type = BackendErasure storageInfo.Backend.OnlineDisks = onlineDisks storageInfo.Backend.OfflineDisks = offlineDisks diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go index c17946584..ac89e84ad 100644 --- a/pkg/certs/certs_test.go +++ b/pkg/certs/certs_test.go @@ -60,7 +60,7 @@ func TestCertNew(t *testing.T) { if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { t.Error("certificate doesn't match expected certificate") } - c, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair) + _, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair) if err == nil { t.Fatal("Expected to fail but got success") } diff --git a/pkg/disk/disk_test.go b/pkg/disk/disk_test.go index 415a36242..72facc331 100644 --- a/pkg/disk/disk_test.go +++ b/pkg/disk/disk_test.go @@ -38,18 +38,6 @@ func TestFree(t *testing.T) { t.Fatal(err) } - if di.Total <= 0 { - t.Error("Unexpected Total", di.Total) - } - if di.Free <= 0 { - t.Error("Unexpected Free", di.Free) - } - if di.Files <= 0 { - t.Error("Unexpected Files", di.Files) - } - if di.Ffree <= 0 { - t.Error("Unexpected Ffree", di.Ffree) - } if di.FSType == "UNKNOWN" { t.Error("Unexpected FSType", di.FSType) } diff --git a/pkg/ellipses/ellipses.go b/pkg/ellipses/ellipses.go index faab0834c..9314ba95a 100644 --- a/pkg/ellipses/ellipses.go +++ b/pkg/ellipses/ellipses.go @@ -38,10 +38,10 @@ var ( // `{1...64}` // `{33...64}` func parseEllipsesRange(pattern string) (seq []string, err error) { - if strings.Index(pattern, openBraces) == -1 { + if !strings.Contains(pattern, openBraces) { return nil, errors.New("Invalid argument") } - if strings.Index(pattern, closeBraces) == -1 { + if !strings.Contains(pattern, closeBraces) { return nil, errors.New("Invalid argument") } @@ -145,7 +145,7 @@ func (p Pattern) Expand() []string { case p.Suffix != "" && p.Prefix == "": labels = append(labels, fmt.Sprintf("%s%s", p.Seq[i], p.Suffix)) case p.Suffix == "" && p.Prefix == "": - labels = append(labels, fmt.Sprintf("%s", p.Seq[i])) + labels = append(labels, p.Seq[i]) default: labels = append(labels, fmt.Sprintf("%s%s%s", p.Prefix, p.Seq[i], p.Suffix)) } diff --git a/pkg/event/config.go b/pkg/event/config.go index 5c03c3f46..b7f189c9b 100644 --- a/pkg/event/config.go +++ b/pkg/event/config.go @@ -191,13 +191,11 @@ func (q Queue) ToRulesMap() RulesMap { // Unused. Available for completion. type lambda struct { - common ARN string `xml:"CloudFunction"` } // Unused. Available for completion. type topic struct { - common ARN string `xml:"Topic" json:"Topic"` } diff --git a/pkg/event/config_test.go b/pkg/event/config_test.go index 52d84a08e..0c50a561a 100644 --- a/pkg/event/config_test.go +++ b/pkg/event/config_test.go @@ -199,8 +199,7 @@ func TestQueueUnmarshalXML(t *testing.T) { } func TestQueueValidate(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 @@ -281,8 +280,7 @@ func TestQueueValidate(t *testing.T) { } func TestQueueSetRegion(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 @@ -341,8 +339,7 @@ func TestQueueSetRegion(t *testing.T) { } func TestQueueToRulesMap(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 @@ -520,8 +517,7 @@ func TestConfigUnmarshalXML(t *testing.T) { } func TestConfigValidate(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 @@ -628,8 +624,7 @@ func TestConfigValidate(t *testing.T) { } func TestConfigSetRegion(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 @@ -733,8 +728,7 @@ func TestConfigSetRegion(t *testing.T) { } func TestConfigToRulesMap(t *testing.T) { - var data []byte - data = []byte(` + data := []byte(` 1 diff --git a/pkg/handlers/http-tracer_test.go b/pkg/handlers/http-tracer_test.go index 4bb242095..1a34614a6 100644 --- a/pkg/handlers/http-tracer_test.go +++ b/pkg/handlers/http-tracer_test.go @@ -142,13 +142,13 @@ Test-Header: TestHeaderValue status, testCase.expectedStatus) } - matched, err := regexp.MatchString(testCase.expectedLogRegexp, string(logOutput.Bytes())) + matched, err := regexp.MatchString(testCase.expectedLogRegexp, logOutput.String()) if err != nil { t.Fatalf("Test %d: Incorrect regexp: %v", i+1, err) } if !matched { - t.Fatalf("Test %d: Unexpected log content, found: `%s`", i+1, string(logOutput.Bytes())) + t.Fatalf("Test %d: Unexpected log content, found: `%s`", i+1, logOutput.String()) } } } diff --git a/pkg/hash/reader_test.go b/pkg/hash/reader_test.go index a8013a2dc..82f886ab5 100644 --- a/pkg/hash/reader_test.go +++ b/pkg/hash/reader_test.go @@ -60,6 +60,9 @@ func TestHashReaderHelperMethods(t *testing.T) { t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", hex.EncodeToString(r.MD5Current())) } expectedSHA256, err := hex.DecodeString("88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589") + if err != nil { + t.Fatal(err) + } if !bytes.Equal(r.SHA256(), expectedSHA256) { t.Errorf("Expected md5hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString()) } diff --git a/pkg/madmin/api.go b/pkg/madmin/api.go index 6ed79d808..42a0b93c8 100644 --- a/pkg/madmin/api.go +++ b/pkg/madmin/api.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "io/ioutil" - "math/rand" "net/http" "net/http/httputil" "net/url" @@ -61,9 +60,6 @@ type AdminClient struct { // Advanced functionality. isTraceEnabled bool traceOutput io.Writer - - // Random seed. - random *rand.Rand } // Global constants. @@ -118,21 +114,17 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Ad } // SetAppInfo - add application details to user agent. -func (c *AdminClient) SetAppInfo(appName string, appVersion string) { +func (adm *AdminClient) SetAppInfo(appName string, appVersion string) { // if app name and version is not set, we do not a new user // agent. if appName != "" && appVersion != "" { - c.appInfo = struct { - appName string - appVersion string - }{} - c.appInfo.appName = appName - c.appInfo.appVersion = appVersion + adm.appInfo.appName = appName + adm.appInfo.appVersion = appVersion } } // SetCustomTransport - set new custom transport. -func (c *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) { +func (adm *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) { // Set this to override default transport // ``http.DefaultTransport``. // @@ -147,28 +139,28 @@ func (c *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) // } // api.SetTransport(tr) // - if c.httpClient != nil { - c.httpClient.Transport = customHTTPTransport + if adm.httpClient != nil { + adm.httpClient.Transport = customHTTPTransport } } // TraceOn - enable HTTP tracing. -func (c *AdminClient) TraceOn(outputStream io.Writer) { +func (adm *AdminClient) TraceOn(outputStream io.Writer) { // if outputStream is nil then default to os.Stdout. if outputStream == nil { outputStream = os.Stdout } // Sets a new output stream. - c.traceOutput = outputStream + adm.traceOutput = outputStream // Enable tracing. - c.isTraceEnabled = true + adm.isTraceEnabled = true } // TraceOff - disable HTTP tracing. -func (c *AdminClient) TraceOff() { +func (adm *AdminClient) TraceOff() { // Disable tracing. - c.isTraceEnabled = false + adm.isTraceEnabled = false } // requestMetadata - is container for all the values to make a @@ -181,7 +173,7 @@ type requestData struct { } // Filter out signature value from Authorization header. -func (c AdminClient) filterSignature(req *http.Request) { +func (adm AdminClient) filterSignature(req *http.Request) { /// Signature V4 authorization header. // Save the original auth. @@ -197,19 +189,18 @@ func (c AdminClient) filterSignature(req *http.Request) { // Set a temporary redacted auth req.Header.Set("Authorization", newAuth) - return } // dumpHTTP - dump HTTP request and response. -func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { +func (adm AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. - _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + _, err := fmt.Fprintln(adm.traceOutput, "---------START-HTTP---------") if err != nil { return err } // Filter out Signature field from Authorization header. - c.filterSignature(req) + adm.filterSignature(req) // Only display request header. reqTrace, err := httputil.DumpRequestOut(req, false) @@ -218,7 +209,7 @@ func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { } // Write request to trace output. - _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + _, err = fmt.Fprint(adm.traceOutput, string(reqTrace)) if err != nil { return err } @@ -254,24 +245,24 @@ func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { } } // Write response to trace output. - _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + _, err = fmt.Fprint(adm.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) if err != nil { return err } // Ends the http dump. - _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + _, err = fmt.Fprintln(adm.traceOutput, "---------END-HTTP---------") return err } // do - execute http request. -func (c AdminClient) do(req *http.Request) (*http.Response, error) { +func (adm AdminClient) do(req *http.Request) (*http.Response, error) { var resp *http.Response var err error // Do the request in a loop in case of 307 http is met since golang still doesn't // handle properly this situation (https://github.com/golang/go/issues/7912) for { - resp, err = c.httpClient.Do(req) + resp, err = adm.httpClient.Do(req) if err != nil { // Handle this specifically for now until future Golang // versions fix this issue properly. @@ -304,8 +295,8 @@ func (c AdminClient) do(req *http.Request) (*http.Response, error) { } // If trace is enabled, dump http request and response. - if c.isTraceEnabled { - err = c.dumpHTTP(req, resp) + if adm.isTraceEnabled { + err = adm.dumpHTTP(req, resp) if err != nil { return nil, err } @@ -323,7 +314,7 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c AdminClient) executeMethod(method string, reqData requestData) (res *http.Response, err error) { +func (adm AdminClient) executeMethod(method string, reqData requestData) (res *http.Response, err error) { // Create a done channel to control 'ListObjects' go routine. doneCh := make(chan struct{}, 1) @@ -333,13 +324,13 @@ func (c AdminClient) executeMethod(method string, reqData requestData) (res *htt // Instantiate a new request. var req *http.Request - req, err = c.newRequest(method, reqData) + req, err = adm.newRequest(method, reqData) if err != nil { return nil, err } // Initiate the request. - res, err = c.do(req) + res, err = adm.do(req) if err != nil { return nil, err } @@ -368,15 +359,15 @@ func (c AdminClient) executeMethod(method string, reqData requestData) (res *htt } // set User agent. -func (c AdminClient) setUserAgent(req *http.Request) { +func (adm AdminClient) setUserAgent(req *http.Request) { req.Header.Set("User-Agent", libraryUserAgent) - if c.appInfo.appName != "" && c.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + if adm.appInfo.appName != "" && adm.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+adm.appInfo.appName+"/"+adm.appInfo.appVersion) } } // newRequest - instantiate a new HTTP request for a given method. -func (c AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) { +func (adm AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) { // If no method is supplied default to 'POST'. if method == "" { method = "POST" @@ -386,7 +377,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R location := "" // Construct a new target URL. - targetURL, err := c.makeTargetURL(reqData) + targetURL, err := adm.makeTargetURL(reqData) if err != nil { return nil, err } @@ -397,7 +388,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R return nil, err } - c.setUserAgent(req) + adm.setUserAgent(req) for k, v := range reqData.customHeaders { req.Header.Set(k, v[0]) } @@ -407,15 +398,15 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(reqData.content))) req.Body = ioutil.NopCloser(bytes.NewReader(reqData.content)) - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "", location) + req = s3signer.SignV4(*req, adm.accessKeyID, adm.secretAccessKey, "", location) return req, nil } // makeTargetURL make a new target url. -func (c AdminClient) makeTargetURL(r requestData) (*url.URL, error) { +func (adm AdminClient) makeTargetURL(r requestData) (*url.URL, error) { - host := c.endpointURL.Host - scheme := c.endpointURL.Scheme + host := adm.endpointURL.Host + scheme := adm.endpointURL.Scheme urlStr := scheme + "://" + host + libraryAdminURLPrefix + r.relPath diff --git a/pkg/mimedb/db_test.go b/pkg/mimedb/db_test.go index 3807ecbd5..2fdcbbed9 100644 --- a/pkg/mimedb/db_test.go +++ b/pkg/mimedb/db_test.go @@ -31,9 +31,8 @@ func TestMimeLookup(t *testing.T) { } func TestTypeByExtension(t *testing.T) { - var contentType string // Test TypeByExtension. - contentType = TypeByExtension(".txt") + contentType := TypeByExtension(".txt") if contentType != "text/plain" { t.Fatalf("Invalid content type are found expected \"text/plain\", got %s", contentType) } diff --git a/pkg/quick/errorutil.go b/pkg/quick/errorutil.go index f20b981b2..caf3d3f1d 100644 --- a/pkg/quick/errorutil.go +++ b/pkg/quick/errorutil.go @@ -62,17 +62,16 @@ func FormatJSONSyntaxError(data io.Reader, offset int64) (highlight string) { if readBytes > offset { break } - switch b { - case '\n': + if b == '\n' { readLine.Reset() errLine++ - case '\t': + continue + } else if b == '\t' { readLine.WriteByte(' ') - case '\r': + } else if b == '\r' { break - default: - readLine.WriteByte(b) } + readLine.WriteByte(b) } lineLen := readLine.Len() diff --git a/pkg/s3select/message.go b/pkg/s3select/message.go index 60ce9238e..6fabd97c6 100644 --- a/pkg/s3select/message.go +++ b/pkg/s3select/message.go @@ -307,7 +307,6 @@ func (writer *messageWriter) start() { case <-recordStagingTicker.C: if !writer.flushRecords() { quitFlag = true - break } case <-keepAliveTicker.C: diff --git a/pkg/s3select/select.go b/pkg/s3select/select.go index 88ad400cb..6247acefd 100644 --- a/pkg/s3select/select.go +++ b/pkg/s3select/select.go @@ -408,9 +408,7 @@ func (s3Select *S3Select) Evaluate(w http.ResponseWriter) { } if err != nil { - if serr := writer.FinishWithError("InternalError", err.Error()); serr != nil { - // FIXME: log errors. - } + _ = writer.FinishWithError("InternalError", err.Error()) } } diff --git a/pkg/s3select/sql/aggregation.go b/pkg/s3select/sql/aggregation.go index fd0669bad..f8ba79c3f 100644 --- a/pkg/s3select/sql/aggregation.go +++ b/pkg/s3select/sql/aggregation.go @@ -274,10 +274,6 @@ func (e *FuncExpr) aggregateRow(r Record) error { // called after calling aggregateRow() on each input row, to calculate // the final aggregate result. -func (e *Expression) getAggregate() (*Value, error) { - return e.evalNode(nil) -} - func (e *FuncExpr) getAggregate() (*Value, error) { switch e.getFunctionName() { case aggFnCount: diff --git a/pkg/s3select/sql/funceval.go b/pkg/s3select/sql/funceval.go index 6d5321fa7..1eda62e68 100644 --- a/pkg/s3select/sql/funceval.go +++ b/pkg/s3select/sql/funceval.go @@ -234,7 +234,9 @@ func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) { if err != nil { return nil, err } - inferTypeAsTimestamp(ts) + if err = inferTypeAsTimestamp(ts); err != nil { + return nil, err + } t, ok := ts.ToTimestamp() if !ok { return nil, fmt.Errorf("%s() expects a timestamp argument", sqlFnDateAdd) @@ -248,7 +250,9 @@ func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) { if err != nil { return nil, err } - inferTypeAsTimestamp(tval1) + if err = inferTypeAsTimestamp(tval1); err != nil { + return nil, err + } ts1, ok := tval1.ToTimestamp() if !ok { return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) @@ -258,7 +262,9 @@ func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) { if err != nil { return nil, err } - inferTypeAsTimestamp(tval2) + if err = inferTypeAsTimestamp(tval2); err != nil { + return nil, err + } ts2, ok := tval2.ToTimestamp() if !ok { return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) @@ -363,7 +369,9 @@ func handleSQLExtract(r Record, e *ExtractFunc) (res *Value, err error) { return nil, verr } - inferTypeAsTimestamp(timeVal) + if err = inferTypeAsTimestamp(timeVal); err != nil { + return nil, err + } t, ok := timeVal.ToTimestamp() if !ok { diff --git a/pkg/s3select/sql/parser.go b/pkg/s3select/sql/parser.go index f5bce5e41..a9ebe6399 100644 --- a/pkg/s3select/sql/parser.go +++ b/pkg/s3select/sql/parser.go @@ -83,8 +83,6 @@ type Select struct { type SelectExpression struct { All bool `parser:" @\"*\""` Expressions []*AliasedExpression `parser:"| @@ { \",\" @@ }"` - - prop qProp } // TableExpression represents the FROM clause diff --git a/pkg/s3select/sql/timestampfuncs.go b/pkg/s3select/sql/timestampfuncs.go index 75ec8add1..7b6271dc7 100644 --- a/pkg/s3select/sql/timestampfuncs.go +++ b/pkg/s3select/sql/timestampfuncs.go @@ -38,7 +38,6 @@ var ( layoutSecond, layoutNanosecond, } - oneNanoSecond = 1 ) func parseSQLTimestamp(s string) (t time.Time, err error) { diff --git a/pkg/s3select/sql/value.go b/pkg/s3select/sql/value.go index f5cf172d6..6c96f9b24 100644 --- a/pkg/s3select/sql/value.go +++ b/pkg/s3select/sql/value.go @@ -248,7 +248,7 @@ func (v *Value) CSVString() string { case typeBool: return fmt.Sprintf("%v", v.value.(bool)) case typeString: - return fmt.Sprintf("%s", v.value.(string)) + return v.value.(string) case typeInt: return fmt.Sprintf("%v", v.value.(int64)) case typeFloat: @@ -610,22 +610,22 @@ func (v *Value) minmax(a *Value, isMax, isFirstRow bool) error { return nil } -func inferTypeAsTimestamp(v *Value) { +func inferTypeAsTimestamp(v *Value) error { if s, ok := v.ToString(); ok { t, err := parseSQLTimestamp(s) if err != nil { - return + return err } v.setTimestamp(t) } else if b, ok := v.ToBytes(); ok { s := string(b) t, err := parseSQLTimestamp(s) if err != nil { - return + return err } v.setTimestamp(t) } - return + return nil } // inferTypeAsString is used to convert untyped values to string - it diff --git a/pkg/trie/trie_test.go b/pkg/trie/trie_test.go index 454b898e0..15e599606 100644 --- a/pkg/trie/trie_test.go +++ b/pkg/trie/trie_test.go @@ -22,8 +22,7 @@ import ( // Simply make sure creating a new tree works. func TestNewTrie(t *testing.T) { - var trie *Trie - trie = NewTrie() + trie := NewTrie() if trie.size != 0 { t.Errorf("expected size 0, got: %d", trie.size) @@ -32,8 +31,7 @@ func TestNewTrie(t *testing.T) { // Ensure that we can insert new keys into the tree, then check the size. func TestInsert(t *testing.T) { - var trie *Trie - trie = NewTrie() + trie := NewTrie() // We need to have an empty tree to begin with. if trie.size != 0 { @@ -51,8 +49,7 @@ func TestInsert(t *testing.T) { // Ensure that PrefixMatch gives us the correct two keys in the tree. func TestPrefixMatch(t *testing.T) { - var trie *Trie - trie = NewTrie() + trie := NewTrie() // Feed it some fodder: only 'minio' and 'miny-os' should trip the matcher. trie.Insert("minio") diff --git a/pkg/words/damerau-levenshtein_test.go b/pkg/words/damerau-levenshtein_test.go index 62c64a338..e2338f424 100644 --- a/pkg/words/damerau-levenshtein_test.go +++ b/pkg/words/damerau-levenshtein_test.go @@ -26,7 +26,6 @@ func TestMinimum(t *testing.T) { type testCase struct { listval []int expected int - pass bool } testCases := []testCase{ {listval: []int{3, 4, 15}, expected: 3}, diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 000000000..ee0604cd7 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019"]