From 3df7285c3cab65afcf329223e836d3dfd2e6aa74 Mon Sep 17 00:00:00 2001 From: Nitish Tiwari Date: Fri, 6 Dec 2019 12:46:06 +0530 Subject: [PATCH] Add Support for Cache and S3 related metrics in Prometheus endpoint (#8591) This PR adds support below metrics - Cache Hit Count - Cache Miss Count - Data served from Cache (in Bytes) - Bytes received from AWS S3 - Bytes sent to AWS S3 - Number of requests sent to AWS S3 Fixes #8549 --- cmd/admin-handlers.go | 2 +- cmd/api-headers.go | 2 +- cmd/api-response.go | 2 +- cmd/bucket-handlers.go | 4 +- cmd/disk-cache-backend.go | 6 +-- cmd/disk-cache-stats.go | 64 +++++++++++++++++++++++++ cmd/disk-cache.go | 36 +++++++++++--- cmd/disk-usage.go | 2 +- cmd/fs-v1-metadata.go | 2 +- cmd/fs-v1.go | 12 +++-- cmd/gateway-main.go | 1 - cmd/gateway-metrics.go | 79 +++++++++++++++++++++++++++++++ cmd/gateway-unsupported.go | 6 +++ cmd/gateway/s3/gateway-s3.go | 79 +++++++++++++++++++++++-------- cmd/generic-handlers.go | 8 ++-- cmd/metrics.go | 61 +++++++++++++++++++++++- cmd/object-api-common.go | 10 ++-- cmd/object-api-input-checks.go | 6 +-- cmd/object-api-interface.go | 3 ++ cmd/object-api-utils.go | 14 +++--- cmd/object-handlers.go | 4 +- cmd/posix.go | 8 ++-- cmd/tree-walk.go | 6 +-- cmd/tree-walk_test.go | 2 +- cmd/web-handlers.go | 6 +-- cmd/xl-sets.go | 11 ++++- cmd/xl-v1-healing.go | 2 +- cmd/xl-v1-list-objects.go | 4 +- cmd/xl-v1-object.go | 10 ++-- cmd/xl-v1.go | 6 +++ cmd/xl-zones.go | 10 +++- docs/metrics/prometheus/README.md | 18 ++++++- 32 files changed, 400 insertions(+), 86 deletions(-) create mode 100644 cmd/disk-cache-stats.go create mode 100644 cmd/gateway-metrics.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index da96446a9..8f6ef207e 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -1028,7 +1028,7 @@ func mustTrace(entry interface{}, trcAll, errOnly bool) bool { if !ok { return false } - trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator) + trace := trcAll || !HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator) if errOnly { return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest } diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 16c59ab6d..19e8f178e 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -94,7 +94,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp // Set all other user defined metadata. for k, v := range objInfo.UserDefined { - if hasPrefix(k, ReservedMetadataPrefix) { + if HasPrefix(k, ReservedMetadataPrefix) { // Do not need to send any internal metadata // values to client. continue diff --git a/cmd/api-response.go b/cmd/api-response.go index e3a3db61d..17dcc563b 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -526,7 +526,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, if metadata { content.UserMetadata = make(StringMap) for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) { - if hasPrefix(k, ReservedMetadataPrefix) { + if HasPrefix(k, ReservedMetadataPrefix) { // Do not need to send any internal metadata // values to client. continue diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 2876560c7..c2f9290f1 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -214,7 +214,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, if keyMarker != "" { // Marker not common with prefix is not implemented. - if !hasPrefix(keyMarker, prefix) { + if !HasPrefix(keyMarker, prefix) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) return } @@ -750,7 +750,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } if objectAPI.IsEncryptionSupported() { - if crypto.IsRequested(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE requests + if crypto.IsRequested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests if crypto.SSECopy.IsRequested(r.Header) { writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) return diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 27abee980..75d1e6617 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -334,14 +334,14 @@ func (c *diskCache) updateMetadataIfChanged(ctx context.Context, bucket, object bkMeta := make(map[string]string) cacheMeta := make(map[string]string) for k, v := range bkObjectInfo.UserDefined { - if hasPrefix(k, ReservedMetadataPrefix) { + if HasPrefix(k, ReservedMetadataPrefix) { // Do not need to send any internal metadata continue } bkMeta[http.CanonicalHeaderKey(k)] = v } for k, v := range cacheObjInfo.UserDefined { - if hasPrefix(k, ReservedMetadataPrefix) { + if HasPrefix(k, ReservedMetadataPrefix) { // Do not need to send any internal metadata continue } @@ -602,7 +602,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang var nsUnlocker = func() {} // For a directory, we need to send an reader that returns no bytes. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { // The lock taken above is released when // objReader.Close() is called by the caller. return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker) diff --git a/cmd/disk-cache-stats.go b/cmd/disk-cache-stats.go new file mode 100644 index 000000000..6a18d00ac --- /dev/null +++ b/cmd/disk-cache-stats.go @@ -0,0 +1,64 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "go.uber.org/atomic" +) + +// CacheStats - represents bytes served from cache, +// cache hits and cache misses. +type CacheStats struct { + BytesServed atomic.Uint64 + Hits atomic.Uint64 + Misses atomic.Uint64 +} + +// Increase total bytes served from cache +func (s *CacheStats) incBytesServed(n int64) { + s.BytesServed.Add(uint64(n)) +} + +// Increase cache hit by 1 +func (s *CacheStats) incHit() { + s.Hits.Add(uint64(1)) +} + +// Increase cache miss by 1 +func (s *CacheStats) incMiss() { + s.Misses.Add(uint64(1)) +} + +// Get total bytes served +func (s *CacheStats) getBytesServed() uint64 { + return s.BytesServed.Load() +} + +// Get total cache hits +func (s *CacheStats) getHits() uint64 { + return s.Hits.Load() +} + +// Get total cache misses +func (s *CacheStats) getMisses() uint64 { + return s.Misses.Load() +} + +// Prepare new CacheStats structure +func newCacheStats() *CacheStats { + return &CacheStats{} +} diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 6d470c161..59bc38ac9 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -57,6 +57,7 @@ type CacheObjectLayer interface { PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) // Storage operations. StorageInfo(ctx context.Context) CacheStorageInfo + CacheStats() *CacheStats } // Abstracts disk caching - used by the S3 layer @@ -74,6 +75,9 @@ type cacheObjects struct { // nsMutex namespace lock nsMutex *nsLockMap + // Cache stats + cacheStats *CacheStats + // Object functions pointing to the corresponding functions of backend implementation. NewNSLockFn func(ctx context.Context, bucket, object string) RWLocker GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) @@ -181,11 +185,17 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string cacheReader, cacheErr := c.get(ctx, dcache, bucket, object, rs, h, opts) if cacheErr == nil { cc = cacheControlOpts(cacheReader.ObjInfo) - if !cc.isEmpty() && !cc.isStale(cacheReader.ObjInfo.ModTime) { + if !cc.isStale(cacheReader.ObjInfo.ModTime) { + // This is a cache hit, mark it so + c.cacheStats.incHit() + c.cacheStats.incBytesServed(cacheReader.ObjInfo.Size) return cacheReader, nil } } + // Reaching here implies cache miss + c.cacheStats.incMiss() + objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts) if backendDownError(err) && cacheErr == nil { return cacheReader, nil @@ -282,10 +292,16 @@ func (c *cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, cachedObjInfo, cerr := c.stat(ctx, dcache, bucket, object) if cerr == nil { cc = cacheControlOpts(cachedObjInfo) - if !cc.isEmpty() && !cc.isStale(cachedObjInfo.ModTime) { + if !cc.isStale(cachedObjInfo.ModTime) { + // This is a cache hit, mark it so + c.cacheStats.incHit() return cachedObjInfo, nil } } + + // Reaching here implies cache miss + c.cacheStats.incMiss() + objInfo, err := getObjectInfoFn(ctx, bucket, object, opts) if err != nil { if _, ok := err.(ObjectNotFound); ok { @@ -332,6 +348,11 @@ func (c *cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) } } +// CacheStats - returns underlying storage statistics. +func (c *cacheObjects) CacheStats() (cs *CacheStats) { + return c.cacheStats +} + // skipCache() returns true if cache migration is in progress func (c *cacheObjects) skipCache() bool { c.migMutex.Lock() @@ -572,11 +593,12 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec } c := &cacheObjects{ - cache: cache, - exclude: config.Exclude, - migrating: migrateSw, - migMutex: sync.Mutex{}, - nsMutex: newNSLock(false), + cache: cache, + exclude: config.Exclude, + migrating: migrateSw, + migMutex: sync.Mutex{}, + nsMutex: newNSLock(false), + cacheStats: newCacheStats(), GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts) }, diff --git a/cmd/disk-usage.go b/cmd/disk-usage.go index c4522cabc..8776fb1da 100644 --- a/cmd/disk-usage.go +++ b/cmd/disk-usage.go @@ -34,7 +34,7 @@ func walk(ctx context.Context, path string, usageFn usageFunc) error { return err } - if !hasSuffix(path, SlashSeparator) { + if !HasSuffix(path, SlashSeparator) { return nil } diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go index f684e581e..d76abc899 100644 --- a/cmd/fs-v1-metadata.go +++ b/cmd/fs-v1-metadata.go @@ -142,7 +142,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object)) } - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { m.Meta["etag"] = emptyETag // For directories etag is d41d8cd98f00b204e9800998ecf8427e m.Meta["content-type"] = "application/octet-stream" } diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 86244f0d8..1aebf315c 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -514,7 +514,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, return nil, toObjectErr(err, bucket, object) } // For a directory, we need to send an reader that returns no bytes. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { // The lock taken above is released when // objReader.Close() is called by the caller. return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker) @@ -605,7 +605,7 @@ func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offse } // If its a directory request, we return an empty body. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { _, err = writer.Write([]byte("")) logger.LogIf(ctx, err) return toObjectErr(err, bucket, object) @@ -699,7 +699,7 @@ func (fs *FSObjects) defaultFsJSON(object string) fsMetaV1 { // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) { fsMeta := fsMetaV1{} - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)) if err != nil { return oi, err @@ -1167,6 +1167,12 @@ func (fs *FSObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker return ListObjectsInfo{}, NotImplemented{} } +// GetMetrics - no op +func (fs *FSObjects) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} + // SetBucketPolicy sets policy on bucket func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error { return savePolicyConfig(ctx, fs, bucket, policy) diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index 8640f82d2..d5b0cac85 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -226,7 +226,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) { globalHTTPServer.Shutdown() logger.FatalIf(err, "Unable to initialize gateway backend") } - newObject = NewGatewayLayerWithLocker(newObject) // Re-enable logging diff --git a/cmd/gateway-metrics.go b/cmd/gateway-metrics.go new file mode 100644 index 000000000..b766a17f5 --- /dev/null +++ b/cmd/gateway-metrics.go @@ -0,0 +1,79 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "sync" + + "go.uber.org/atomic" +) + +// Metrics - represents bytes served from backend +// only implemented for S3 Gateway +type Metrics struct { + BytesReceived atomic.Uint64 + BytesSent atomic.Uint64 + RequestStats map[string]int + sync.RWMutex +} + +// IncBytesReceived - Increase total bytes received from gateway backend +func (s *Metrics) IncBytesReceived(n int64) { + s.BytesReceived.Add(uint64(n)) +} + +// GetBytesReceived - Get total bytes received from gateway backend +func (s *Metrics) GetBytesReceived() uint64 { + return s.BytesReceived.Load() +} + +// IncBytesSent - Increase total bytes sent to gateway backend +func (s *Metrics) IncBytesSent(n int64) { + s.BytesSent.Add(uint64(n)) +} + +// GetBytesSent - Get total bytes received from gateway backend +func (s *Metrics) GetBytesSent() uint64 { + return s.BytesSent.Load() +} + +// IncRequests - Increase request sent to gateway backend by 1 +func (s *Metrics) IncRequests(method string) { + s.Lock() + defer s.Unlock() + if s == nil { + return + } + if s.RequestStats == nil { + s.RequestStats = make(map[string]int) + } + if _, ok := s.RequestStats[method]; ok { + s.RequestStats[method]++ + return + } + s.RequestStats[method] = 1 +} + +// GetRequests - Get total number of requests sent to gateway backend +func (s *Metrics) GetRequests() map[string]int { + return s.RequestStats +} + +// NewMetrics - Prepare new Metrics structure +func NewMetrics() *Metrics { + return &Metrics{} +} diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index 03024c824..691d05bfd 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -167,6 +167,12 @@ func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, sr return objInfo, NotImplemented{} } +// GetMetrics - no op +func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} + // IsNotificationSupported returns whether bucket notification is applicable for this layer. func (a GatewayUnsupported) IsNotificationSupported() bool { return false diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go index eec763123..94fc2fa6f 100644 --- a/cmd/gateway/s3/gateway-s3.go +++ b/cmd/gateway/s3/gateway-s3.go @@ -132,9 +132,11 @@ func (g *S3) Name() string { const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1< 0 { - if hasSuffix(elem[len(elem)-1], SlashSeparator) { + if HasSuffix(elem[len(elem)-1], SlashSeparator) { trailingSlash = SlashSeparator } } @@ -271,20 +271,20 @@ func extractETag(metadata map[string]string) string { return etag } -// Prefix matcher string matches prefix in a platform specific way. +// HasPrefix - Prefix matcher string matches prefix in a platform specific way. // For example on windows since its case insensitive we are supposed // to do case insensitive checks. -func hasPrefix(s string, prefix string) bool { +func HasPrefix(s string, prefix string) bool { if runtime.GOOS == globalWindowsOSName { return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) } return strings.HasPrefix(s, prefix) } -// Suffix matcher string matches suffix in a platform specific way. +// HasSuffix - Suffix matcher string matches suffix in a platform specific way. // For example on windows since its case insensitive we are supposed // to do case insensitive checks. -func hasSuffix(s string, suffix string) bool { +func HasSuffix(s string, suffix string) bool { if runtime.GOOS == globalWindowsOSName { return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix)) } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index a5576aaea..742b83086 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -878,7 +878,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } for k, v := range srcInfo.UserDefined { - if hasPrefix(k, ReservedMetadataPrefix) { + if HasPrefix(k, ReservedMetadataPrefix) { encMetadata[k] = v } } @@ -1254,7 +1254,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req var objectEncryptionKey []byte if objectAPI.IsEncryptionSupported() { - if crypto.IsRequested(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests + if crypto.IsRequested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests if crypto.SSECopy.IsRequested(r.Header) { writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) return diff --git a/cmd/posix.go b/cmd/posix.go index 8f523d4bd..f094e7b15 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -604,7 +604,7 @@ func listVols(dirPath string) ([]VolInfo, error) { } var volsInfo []VolInfo for _, entry := range entries { - if !hasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) { + if !HasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) { // Skip if entry is neither a directory not a valid volume name. continue } @@ -752,7 +752,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st return } var fi FileInfo - if hasSuffix(walkResult.entry, SlashSeparator) { + if HasSuffix(walkResult.entry, SlashSeparator) { fi = FileInfo{ Volume: volume, Name: walkResult.entry, @@ -1492,8 +1492,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e } } - srcIsDir := hasSuffix(srcPath, SlashSeparator) - dstIsDir := hasSuffix(dstPath, SlashSeparator) + srcIsDir := HasSuffix(srcPath, SlashSeparator) + dstIsDir := HasSuffix(dstPath, SlashSeparator) // Either src and dst have to be directories or files, else return error. if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { return errFileAccessDenied diff --git a/cmd/tree-walk.go b/cmd/tree-walk.go index ad062eb81..5198f7718 100644 --- a/cmd/tree-walk.go +++ b/cmd/tree-walk.go @@ -37,7 +37,7 @@ func filterMatchingPrefix(entries []string, prefixEntry string) []string { if start == end { break } - if hasPrefix(entries[start], prefixEntry) { + if HasPrefix(entries[start], prefixEntry) { break } start++ @@ -46,7 +46,7 @@ func filterMatchingPrefix(entries []string, prefixEntry string) []string { if start == end { break } - if hasPrefix(entries[end-1], prefixEntry) { + if HasPrefix(entries[end-1], prefixEntry) { break } end-- @@ -95,7 +95,7 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker for i, entry := range entries { pentry := pathJoin(prefixDir, entry) - isDir := hasSuffix(pentry, SlashSeparator) + isDir := HasSuffix(pentry, SlashSeparator) if i == 0 && markerDir == entry { if !recursive { diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index cdcac547e..f0247cc21 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -93,7 +93,7 @@ func testTreeWalkPrefix(t *testing.T, listDir ListDirFunc) { // Check if all entries received on the channel match the prefix. for res := range twResultCh { - if !hasPrefix(res.entry, prefix) { + if !HasPrefix(res.entry, prefix) { t.Errorf("Entry %s doesn't match prefix %s", res.entry, prefix) } } diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index a2d35c4e7..343e74925 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -669,7 +669,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, next: for _, objectName := range args.Objects { // If not a directory, remove the object. - if !hasSuffix(objectName, SlashSeparator) && objectName != "" { + if !HasSuffix(objectName, SlashSeparator) && objectName != "" { // Check for permissions only in the case of // non-anonymous login. For anonymous login, policy has already // been checked. @@ -1043,7 +1043,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { return } if objectAPI.IsEncryptionSupported() { - if crypto.IsRequested(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests + if crypto.IsRequested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests rawReader := hashReader var objectEncryptionKey []byte reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) @@ -1447,7 +1447,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { return nil } - if !hasSuffix(object, SlashSeparator) { + if !HasSuffix(object, SlashSeparator) { // If not a directory, compress the file and write it to response. err := zipit(pathJoin(args.Prefix, object)) if err != nil { diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 82dc2f8a3..22269c5a5 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -28,6 +28,7 @@ import ( "github.com/minio/minio/cmd/config/storageclass" xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/dsync" "github.com/minio/minio/pkg/lifecycle" @@ -1039,7 +1040,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi // Marker is set validate pre-condition. if marker != "" { // Marker not common with prefix is not implemented. Send an empty response - if !hasPrefix(marker, prefix) { + if !HasPrefix(marker, prefix) { return loi, nil } } @@ -1092,7 +1093,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi for _, entry := range entries.Files { var objInfo ObjectInfo - if hasSuffix(entry.Name, SlashSeparator) { + if HasSuffix(entry.Name, SlashSeparator) { if !recursive { loi.Prefixes = append(loi.Prefixes, entry.Name) continue @@ -1655,3 +1656,9 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, healObj func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys, true) } + +// GetMetrics - no op +func (s *xlSets) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index f01d7a4bb..332466e98 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -680,7 +680,7 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRu healCtx := logger.SetReqInfo(context.Background(), newReqInfo) // Healing directories handle it separately. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { return xl.healObjectDir(healCtx, bucket, object, dryRun) } diff --git a/cmd/xl-v1-list-objects.go b/cmd/xl-v1-list-objects.go index be046699f..50e15384f 100644 --- a/cmd/xl-v1-list-objects.go +++ b/cmd/xl-v1-list-objects.go @@ -87,7 +87,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del } entry := walkResult.entry var objInfo ObjectInfo - if hasSuffix(entry, SlashSeparator) { + if HasSuffix(entry, SlashSeparator) { // Object name needs to be full path. objInfo.Bucket = bucket objInfo.Name = entry @@ -156,7 +156,7 @@ func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, del // Marker is set validate pre-condition. if marker != "" { // Marker not common with prefix is not implemented.Send an empty response - if !hasPrefix(marker, prefix) { + if !HasPrefix(marker, prefix) { return ListObjectsInfo{}, e } } diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 3b389f151..4ae6cc94b 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -129,7 +129,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // Handler directory request by returning a reader that // returns no bytes. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { var objInfo ObjectInfo if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil { return nil, toObjectErr(err, bucket, object) @@ -190,7 +190,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO } // If its a directory request, we return an empty body. - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { _, err := writer.Write([]byte("")) logger.LogIf(ctx, err) return toObjectErr(err, bucket, object) @@ -344,7 +344,7 @@ func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, op return oi, err } - if hasSuffix(object, SlashSeparator) { + if HasSuffix(object, SlashSeparator) { info, err := xl.getObjectInfoDir(ctx, bucket, object) if err != nil { return oi, toObjectErr(err, bucket, object) @@ -804,7 +804,7 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects [] } for i, object := range objects { - isObjectDirs[i] = hasSuffix(object, SlashSeparator) + isObjectDirs[i] = HasSuffix(object, SlashSeparator) } for i, object := range objects { @@ -903,7 +903,7 @@ func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (er } var writeQuorum int - var isObjectDir = hasSuffix(object, SlashSeparator) + var isObjectDir = HasSuffix(object, SlashSeparator) if isObjectDir { _, err = xl.getObjectInfoDir(ctx, bucket, object) diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index 2e84d62d9..66b80ee5e 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -200,3 +200,9 @@ func getStorageInfo(disks []StorageAPI) StorageInfo { func (xl xlObjects) StorageInfo(ctx context.Context) StorageInfo { return getStorageInfo(xl.getDisks()) } + +// GetMetrics - no op +func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} diff --git a/cmd/xl-zones.go b/cmd/xl-zones.go index e117e1aca..99add9a63 100644 --- a/cmd/xl-zones.go +++ b/cmd/xl-zones.go @@ -617,7 +617,7 @@ func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delim // Marker is set validate pre-condition. if marker != "" { // Marker not common with prefix is not implemented. Send an empty response - if !hasPrefix(marker, prefix) { + if !HasPrefix(marker, prefix) { return loi, nil } } @@ -682,7 +682,7 @@ func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delim for _, entry := range entries.Files { var objInfo ObjectInfo - if hasSuffix(entry.Name, SlashSeparator) { + if HasSuffix(entry.Name, SlashSeparator) { if !recursive { loi.Prefixes = append(loi.Prefixes, entry.Name) continue @@ -1312,3 +1312,9 @@ func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { } return healBuckets, nil } + +// GetMetrics - no op +func (z *xlZones) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} diff --git a/docs/metrics/prometheus/README.md b/docs/metrics/prometheus/README.md index 7760a6d09..7daa22785 100644 --- a/docs/metrics/prometheus/README.md +++ b/docs/metrics/prometheus/README.md @@ -118,7 +118,6 @@ The list of metrics and its definition are as follows. (NOTE: instance here is o > 1. Instance here is one MinIO node. > 2. `s3 requests` exclude internode requests. - - standard go runtime metrics prefixed by `go_` - process level metrics prefixed with `process_` - prometheus scrap metrics prefixed with `promhttp_` @@ -138,6 +137,23 @@ The list of metrics and its definition are as follows. (NOTE: instance here is o - `minio_version_info`: Current MinIO version with commit-id. - `s3_ttfb_seconds`: Histogram that holds the latency information of the requests. +Apart from above metrics, MinIO also exposes below mode specific metrics + +### Cache specific metrics + +MinIO Gateway instances enabled with Disk-Caching expose caching related metrics. + +- `cache_data_served`: Total number of bytes served from cache. +- `cache_hits_total`: Total number of cache hits. +- `cache_misses_total`: Total number of cache misses. + +### S3 Gateway & Cache specific metrics + +MinIO S3 Gateway instance exposes metrics related to Gateway communication with AWS S3. + +- `gateway_s3_requests`: Total number of GET & HEAD requests made to AWS S3. This metrics has a label `method` that identifies GET & HEAD Requests. +- `gateway_s3_bytes_sent`: Total number of bytes sent to AWS S3 (in GET & HEAD Requests). +- `gateway_s3_bytes_received`: Total number of bytes received from AWS S3 (in GET & HEAD Requests). ## Migration guide for the new set of metrics