api: Bucket notification add filter rules check and validate. (#2272)

These filtering techniques are used to validate
object names for their prefix and suffix.
master
Harshavardhana 8 years ago committed by GitHub
parent 043ddbd834
commit efbf7dbc0f
  1. 24
      api-errors.go
  2. 55
      bucket-notification-datatypes.go
  3. 65
      bucket-notification-utils.go
  4. 17
      bucket-notification-utils_test.go
  5. 10
      logger-amqp.go
  6. 14
      logger-elasticsearch.go
  7. 13
      logger-redis.go
  8. 3
      logger.go
  9. 1
      main.go
  10. 96
      queues.go

@ -110,6 +110,10 @@ const (
ErrARNNotification ErrARNNotification
ErrRegionNotification ErrRegionNotification
ErrOverlappingFilterNotification ErrOverlappingFilterNotification
ErrFilterNameInvalid
ErrFilterNamePrefix
ErrFilterNameSuffix
ErrFilterPrefixValueInvalid
// S3 extended errors. // S3 extended errors.
ErrContentSHA256Mismatch ErrContentSHA256Mismatch
@ -438,6 +442,26 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrFilterNameInvalid: {
Code: "InvalidArgument",
Description: "filter rule name must be either prefix or suffix",
HTTPStatusCode: http.StatusBadRequest,
},
ErrFilterNamePrefix: {
Code: "InvalidArgument",
Description: "Cannot specify more than one prefix rule in a filter.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrFilterNameSuffix: {
Code: "InvalidArgument",
Description: "Cannot specify more than one suffix rule in a filter.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrFilterPrefixValueInvalid: {
Code: "InvalidArgument",
Description: "prefix rule value cannot exceed 1024 characters",
HTTPStatusCode: http.StatusBadRequest,
},
/// S3 extensions. /// S3 extensions.
ErrContentSHA256Mismatch: { ErrContentSHA256Mismatch: {

@ -18,41 +18,43 @@ package main
import "encoding/xml" import "encoding/xml"
// Represents the criteria for the filter rule.
type filterRule struct { type filterRule struct {
Name string `xml:"FilterRuleName"` Name string `xml:"Name"`
Value string Value string `xml:"Value"`
} }
// Collection of filter rules per service config.
type keyFilter struct { type keyFilter struct {
FilterRules []filterRule `xml:"FilterRule"` FilterRules []filterRule `xml:"FilterRule,omitempty"`
}
type notificationConfigFilter struct {
Key keyFilter `xml:"S3Key"`
} }
// Queue SQS configuration. // Queue SQS configuration.
type queueConfig struct { type queueConfig struct {
Events []string `xml:"Event"` Events []string `xml:"Event"`
Filter notificationConfigFilter Filter struct {
Key keyFilter `xml:"S3Key,omitempty"`
}
ID string `xml:"Id"` ID string `xml:"Id"`
QueueArn string `xml:"Queue"` QueueArn string `xml:"Queue"`
} }
// Topic SNS configuration, this is a compliance field // Topic SNS configuration, this is a compliance field not used by minio yet.
// not used by minio yet.
type topicConfig struct { type topicConfig struct {
Events []string `xml:"Event"` Events []string `xml:"Event"`
Filter notificationConfigFilter Filter struct {
Key keyFilter `xml:"S3Key"`
}
ID string `xml:"Id"` ID string `xml:"Id"`
TopicArn string `xml:"Topic"` TopicArn string `xml:"Topic"`
} }
// Lambda function configuration, this is a compliance field // Lambda function configuration, this is a compliance field not used by minio yet.
// not used by minio yet.
type lambdaFuncConfig struct { type lambdaFuncConfig struct {
Events []string `xml:"Event"` Events []string `xml:"Event"`
Filter notificationConfigFilter Filter struct {
Key keyFilter `xml:"S3Key"`
}
ID string `xml:"Id"` ID string `xml:"Id"`
LambdaFunctionArn string `xml:"CloudFunction"` LambdaFunctionArn string `xml:"CloudFunction"`
} }
@ -110,13 +112,15 @@ func defaultIdentity() identity {
return identity{"minio"} return identity{"minio"}
} }
type s3BucketReference struct { // Notification event bucket metadata.
type bucketMeta struct {
Name string `json:"name"` Name string `json:"name"`
OwnerIdentity identity `json:"ownerIdentity"` OwnerIdentity identity `json:"ownerIdentity"`
ARN string `json:"arn"` ARN string `json:"arn"`
} }
type s3ObjectReference struct { // Notification event object metadata.
type objectMeta struct {
Key string `json:"key"` Key string `json:"key"`
Size int64 `json:"size,omitempty"` Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"` ETag string `json:"eTag,omitempty"`
@ -124,11 +128,12 @@ type s3ObjectReference struct {
Sequencer string `json:"sequencer"` Sequencer string `json:"sequencer"`
} }
type s3Reference struct { // Notification event server specific metadata.
SchemaVersion string `json:"s3SchemaVersion"` type eventMeta struct {
ConfigurationID string `json:"configurationId"` SchemaVersion string `json:"s3SchemaVersion"`
Bucket s3BucketReference `json:"bucket"` ConfigurationID string `json:"configurationId"`
Object s3ObjectReference `json:"object"` Bucket bucketMeta `json:"bucket"`
Object objectMeta `json:"object"`
} }
// NotificationEvent represents an Amazon an S3 bucket notification event. // NotificationEvent represents an Amazon an S3 bucket notification event.
@ -141,7 +146,7 @@ type NotificationEvent struct {
UserIdentity identity `json:"userIdentity"` UserIdentity identity `json:"userIdentity"`
RequestParameters map[string]string `json:"requestParameters"` RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"` ResponseElements map[string]string `json:"responseElements"`
S3 s3Reference `json:"s3"` S3 eventMeta `json:"s3"`
} }
// Represents the minio sqs type and inputs. // Represents the minio sqs type and inputs.

@ -51,6 +51,55 @@ func checkEvents(events []string) APIErrorCode {
return ErrNone return ErrNone
} }
// Valid if filterName is 'prefix'.
func isValidFilterNamePrefix(filterName string) bool {
return "prefix" == filterName
}
// Valid if filterName is 'suffix'.
func isValidFilterNameSuffix(filterName string) bool {
return "suffix" == filterName
}
// Is this a valid filterName? - returns true if valid.
func isValidFilterName(filterName string) bool {
return isValidFilterNamePrefix(filterName) || isValidFilterNameSuffix(filterName)
}
// checkFilterRules - checks given list of filter rules if all of them are valid.
func checkFilterRules(filterRules []filterRule) APIErrorCode {
ruleSetMap := make(map[string]string)
// Validate all filter rules.
for _, filterRule := range filterRules {
// Unknown filter rule name found, returns an appropriate error.
if !isValidFilterName(filterRule.Name) {
return ErrFilterNameInvalid
}
// Filter names should not be set twice per notification service
// configuration, if found return an appropriate error.
if _, ok := ruleSetMap[filterRule.Name]; ok {
if isValidFilterNamePrefix(filterRule.Name) {
return ErrFilterNamePrefix
} else if isValidFilterNameSuffix(filterRule.Name) {
return ErrFilterNameSuffix
} else {
return ErrFilterNameInvalid
}
}
// Maximum prefix length can be up to 1,024 characters, validate.
if !IsValidObjectPrefix(filterRule.Value) {
return ErrFilterPrefixValueInvalid
}
// Set the new rule name to keep track of duplicates.
ruleSetMap[filterRule.Name] = filterRule.Value
}
// Success all prefixes validated.
return ErrNone
}
// checkQueueArn - check if the queue arn is valid. // checkQueueArn - check if the queue arn is valid.
func checkQueueArn(queueArn string) APIErrorCode { func checkQueueArn(queueArn string) APIErrorCode {
if !strings.HasPrefix(queueArn, minioSqs) { if !strings.HasPrefix(queueArn, minioSqs) {
@ -62,6 +111,14 @@ func checkQueueArn(queueArn string) APIErrorCode {
return ErrNone return ErrNone
} }
// Validate if we recognize the queue type.
func isValidQueue(sqsArn arnMinioSqs) bool {
amqpQ := isAMQPQueue(sqsArn) // Is amqp queue?.
elasticQ := isElasticQueue(sqsArn) // Is elastic queue?.
redisQ := isRedisQueue(sqsArn) // Is redis queue?.
return amqpQ || elasticQ || redisQ
}
// Check - validates queue configuration and returns error if any. // Check - validates queue configuration and returns error if any.
func checkQueueConfig(qConfig queueConfig) APIErrorCode { func checkQueueConfig(qConfig queueConfig) APIErrorCode {
// Check queue arn is valid. // Check queue arn is valid.
@ -72,7 +129,7 @@ func checkQueueConfig(qConfig queueConfig) APIErrorCode {
// Unmarshals QueueArn into structured object. // Unmarshals QueueArn into structured object.
sqsArn := unmarshalSqsArn(qConfig.QueueArn) sqsArn := unmarshalSqsArn(qConfig.QueueArn)
// Validate if sqsArn requested any of the known supported queues. // Validate if sqsArn requested any of the known supported queues.
if !isAMQPQueue(sqsArn) || !isElasticQueue(sqsArn) || !isRedisQueue(sqsArn) { if !isValidQueue(sqsArn) {
return ErrARNNotification return ErrARNNotification
} }
@ -81,6 +138,11 @@ func checkQueueConfig(qConfig queueConfig) APIErrorCode {
return s3Error return s3Error
} }
// Check if valid filters are set in queue config.
if s3Error := checkFilterRules(qConfig.Filter.Key.FilterRules); s3Error != ErrNone {
return s3Error
}
// Success. // Success.
return ErrNone return ErrNone
} }
@ -113,6 +175,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
// Returned value represents minio sqs types, currently supported are // Returned value represents minio sqs types, currently supported are
// - amqp // - amqp
// - elasticsearch // - elasticsearch
// - redis
func unmarshalSqsArn(queueArn string) (mSqs arnMinioSqs) { func unmarshalSqsArn(queueArn string) (mSqs arnMinioSqs) {
sqsType := strings.TrimPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":") sqsType := strings.TrimPrefix(queueArn, minioSqs+serverConfig.GetRegion()+":")
mSqs = arnMinioSqs{} mSqs = arnMinioSqs{}

@ -0,0 +1,17 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main

@ -17,8 +17,6 @@
package main package main
import ( import (
"errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/streadway/amqp" "github.com/streadway/amqp"
) )
@ -46,6 +44,9 @@ type amqpConn struct {
} }
func dialAMQP(amqpL amqpLogger) (amqpConn, error) { func dialAMQP(amqpL amqpLogger) (amqpConn, error) {
if !amqpL.Enable {
return amqpConn{}, errLoggerNotEnabled
}
conn, err := amqp.Dial(amqpL.URL) conn, err := amqp.Dial(amqpL.URL)
if err != nil { if err != nil {
return amqpConn{}, err return amqpConn{}, err
@ -53,13 +54,8 @@ func dialAMQP(amqpL amqpLogger) (amqpConn, error) {
return amqpConn{Connection: conn, params: amqpL}, nil return amqpConn{Connection: conn, params: amqpL}, nil
} }
var errLoggerNotEnabled = errors.New("logger type not enabled")
func enableAMQPLogger() error { func enableAMQPLogger() error {
amqpL := serverConfig.GetAMQPLogger() amqpL := serverConfig.GetAMQPLogger()
if !amqpL.Enable {
return errLoggerNotEnabled
}
// Connect to amqp server. // Connect to amqp server.
amqpC, err := dialAMQP(amqpL) amqpC, err := dialAMQP(amqpL)

@ -37,8 +37,11 @@ type elasticClient struct {
} }
// Connects to elastic search instance at URL. // Connects to elastic search instance at URL.
func dialElastic(url string) (*elastic.Client, error) { func dialElastic(esLogger elasticSearchLogger) (*elastic.Client, error) {
client, err := elastic.NewClient(elastic.SetURL(url), elastic.SetSniff(false)) if !esLogger.Enable {
return nil, errLoggerNotEnabled
}
client, err := elastic.NewClient(elastic.SetURL(esLogger.URL), elastic.SetSniff(false))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -48,10 +51,9 @@ func dialElastic(url string) (*elastic.Client, error) {
// Enables elasticsearch logger. // Enables elasticsearch logger.
func enableElasticLogger() error { func enableElasticLogger() error {
esLogger := serverConfig.GetElasticSearchLogger() esLogger := serverConfig.GetElasticSearchLogger()
if !esLogger.Enable {
return errLoggerNotEnabled // Dial to elastic search.
} client, err := dialElastic(esLogger)
client, err := dialElastic(esLogger.URL)
if err != nil { if err != nil {
return err return err
} }

@ -38,7 +38,13 @@ type redisConn struct {
} }
// Dial a new connection to redis instance at addr, optionally with a password if any. // Dial a new connection to redis instance at addr, optionally with a password if any.
func dialRedis(addr, password string) (*redis.Pool, error) { func dialRedis(rLogger redisLogger) (*redis.Pool, error) {
// Return error if redis not enabled.
if !rLogger.Enable {
return nil, errLoggerNotEnabled
}
addr := rLogger.Addr
password := rLogger.Password
rPool := &redis.Pool{ rPool := &redis.Pool{
MaxIdle: 3, MaxIdle: 3,
IdleTimeout: 240 * time.Second, IdleTimeout: 240 * time.Second,
@ -77,12 +83,9 @@ func dialRedis(addr, password string) (*redis.Pool, error) {
func enableRedisLogger() error { func enableRedisLogger() error {
rLogger := serverConfig.GetRedisLogger() rLogger := serverConfig.GetRedisLogger()
if !rLogger.Enable {
return errLoggerNotEnabled
}
// Dial redis. // Dial redis.
rPool, err := dialRedis(rLogger.Addr, rLogger.Password) rPool, err := dialRedis(rLogger)
if err != nil { if err != nil {
return err return err
} }

@ -19,6 +19,7 @@ package main
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors"
"os" "os"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@ -52,6 +53,8 @@ type logger struct {
// Add new loggers here. // Add new loggers here.
} }
var errLoggerNotEnabled = errors.New("requested logger type is not enabled")
// sysInfo returns useful system statistics. // sysInfo returns useful system statistics.
func sysInfo() map[string]string { func sysInfo() map[string]string {
host, err := os.Hostname() host, err := os.Hostname()

@ -80,6 +80,7 @@ func enableLoggers() {
// Adding new bucket notification related loggers. // Adding new bucket notification related loggers.
enableAMQPLogger() enableAMQPLogger()
enableElasticLogger() enableElasticLogger()
enableRedisLogger()
// Add your logger here. // Add your logger here.
} }

@ -19,6 +19,7 @@ package main
import ( import (
"fmt" "fmt"
"net/url" "net/url"
"strings"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -36,54 +37,48 @@ const (
// Returns true if queueArn is for an AMQP queue. // Returns true if queueArn is for an AMQP queue.
func isAMQPQueue(sqsArn arnMinioSqs) bool { func isAMQPQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeAMQP { if sqsArn.sqsType != queueTypeAMQP {
amqpL := serverConfig.GetAMQPLogger() return false
if !amqpL.Enable {
return false
}
// Connect to amqp server to validate.
amqpC, err := dialAMQP(amqpL)
if err != nil {
errorIf(err, "Unable to connect to amqp service.", amqpL)
return false
}
defer amqpC.Close()
} }
amqpL := serverConfig.GetAMQPLogger()
// Connect to amqp server to validate.
amqpC, err := dialAMQP(amqpL)
if err != nil {
errorIf(err, "Unable to connect to amqp service. %#v", amqpL)
return false
}
defer amqpC.Close()
return true return true
} }
// Returns true if queueArn is for an Redis queue. // Returns true if queueArn is for an Redis queue.
func isRedisQueue(sqsArn arnMinioSqs) bool { func isRedisQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeRedis { if sqsArn.sqsType != queueTypeRedis {
rLogger := serverConfig.GetRedisLogger() return false
if !rLogger.Enable { }
return false rLogger := serverConfig.GetRedisLogger()
} // Connect to redis server to validate.
// Connect to redis server to validate. rPool, err := dialRedis(rLogger)
rPool, err := dialRedis(rLogger.Addr, rLogger.Password) if err != nil {
if err != nil { errorIf(err, "Unable to connect to redis service. %#v", rLogger)
errorIf(err, "Unable to connect to redis service.", rLogger) return false
return false
}
defer rPool.Close()
} }
defer rPool.Close()
return true return true
} }
// Returns true if queueArn is for an ElasticSearch queue. // Returns true if queueArn is for an ElasticSearch queue.
func isElasticQueue(sqsArn arnMinioSqs) bool { func isElasticQueue(sqsArn arnMinioSqs) bool {
if sqsArn.sqsType == queueTypeElastic { if sqsArn.sqsType != queueTypeElastic {
esLogger := serverConfig.GetElasticSearchLogger() return false
if !esLogger.Enable { }
return false esLogger := serverConfig.GetElasticSearchLogger()
} elasticC, err := dialElastic(esLogger)
elasticC, err := dialElastic(esLogger.URL) if err != nil {
if err != nil { errorIf(err, "Unable to connect to elasticsearch service %#v", esLogger)
errorIf(err, "Unable to connect to elasticsearch service.", esLogger.URL) return false
return false
}
defer elasticC.Stop()
} }
defer elasticC.Stop()
return true return true
} }
@ -98,6 +93,19 @@ func eventMatch(eventType EventName, events []string) (ok bool) {
return ok return ok
} }
// Filter rule match, matches an object against the filter rules.
func filterRuleMatch(object string, frs []filterRule) bool {
var prefixMatch, suffixMatch = true, true
for _, fr := range frs {
if isValidFilterNamePrefix(fr.Name) {
prefixMatch = strings.HasPrefix(object, fr.Value)
} else if isValidFilterNameSuffix(fr.Name) {
suffixMatch = strings.HasSuffix(object, fr.Value)
}
}
return prefixMatch && suffixMatch
}
// NotifyObjectCreatedEvent - notifies a new 's3:ObjectCreated' event. // NotifyObjectCreatedEvent - notifies a new 's3:ObjectCreated' event.
// List of events reported through this function are // List of events reported through this function are
// - s3:ObjectCreated:Put // - s3:ObjectCreated:Put
@ -121,15 +129,15 @@ func notifyObjectCreatedEvent(nConfig notificationConfig, eventType EventName, b
UserIdentity: defaultIdentity(), UserIdentity: defaultIdentity(),
RequestParameters: map[string]string{}, RequestParameters: map[string]string{},
ResponseElements: map[string]string{}, ResponseElements: map[string]string{},
S3: s3Reference{ S3: eventMeta{
SchemaVersion: "1.0", SchemaVersion: "1.0",
ConfigurationID: "Config", ConfigurationID: "Config",
Bucket: s3BucketReference{ Bucket: bucketMeta{
Name: bucket, Name: bucket,
OwnerIdentity: defaultIdentity(), OwnerIdentity: defaultIdentity(),
ARN: "arn:aws:s3:::" + bucket, ARN: "arn:aws:s3:::" + bucket,
}, },
Object: s3ObjectReference{ Object: objectMeta{
Key: url.QueryEscape(object), Key: url.QueryEscape(object),
ETag: etag, ETag: etag,
Size: size, Size: size,
@ -140,7 +148,8 @@ func notifyObjectCreatedEvent(nConfig notificationConfig, eventType EventName, b
} }
// Notify to all the configured queues. // Notify to all the configured queues.
for _, qConfig := range nConfig.QueueConfigurations { for _, qConfig := range nConfig.QueueConfigurations {
if eventMatch(eventType, qConfig.Events) { ruleMatch := filterRuleMatch(object, qConfig.Filter.Key.FilterRules)
if eventMatch(eventType, qConfig.Events) && ruleMatch {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"Records": events, "Records": events,
}).Info() }).Info()
@ -167,15 +176,15 @@ func notifyObjectDeletedEvent(nConfig notificationConfig, bucket string, object
UserIdentity: defaultIdentity(), UserIdentity: defaultIdentity(),
RequestParameters: map[string]string{}, RequestParameters: map[string]string{},
ResponseElements: map[string]string{}, ResponseElements: map[string]string{},
S3: s3Reference{ S3: eventMeta{
SchemaVersion: "1.0", SchemaVersion: "1.0",
ConfigurationID: "Config", ConfigurationID: "Config",
Bucket: s3BucketReference{ Bucket: bucketMeta{
Name: bucket, Name: bucket,
OwnerIdentity: defaultIdentity(), OwnerIdentity: defaultIdentity(),
ARN: "arn:aws:s3:::" + bucket, ARN: "arn:aws:s3:::" + bucket,
}, },
Object: s3ObjectReference{ Object: objectMeta{
Key: url.QueryEscape(object), Key: url.QueryEscape(object),
Sequencer: sequencer, Sequencer: sequencer,
}, },
@ -184,7 +193,8 @@ func notifyObjectDeletedEvent(nConfig notificationConfig, bucket string, object
} }
// Notify to all the configured queues. // Notify to all the configured queues.
for _, qConfig := range nConfig.QueueConfigurations { for _, qConfig := range nConfig.QueueConfigurations {
if eventMatch(ObjectRemovedDelete, qConfig.Events) { ruleMatch := filterRuleMatch(object, qConfig.Filter.Key.FilterRules)
if eventMatch(ObjectRemovedDelete, qConfig.Events) && ruleMatch {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"Records": events, "Records": events,
}).Info() }).Info()

Loading…
Cancel
Save