diff --git a/main.go b/main.go index 9d66e311d..253df633f 100644 --- a/main.go +++ b/main.go @@ -24,7 +24,7 @@ import ( "github.com/minio-io/minio/pkg/utils/log" ) -func getStorageType(input string) server.StorageType { +func getDriverType(input string) server.DriverType { switch { case input == "file": return server.File @@ -34,15 +34,15 @@ func getStorageType(input string) server.StorageType { return server.Donut default: { - log.Println("Unknown storage type:", input) - log.Println("Choosing default storage type as 'file'..") + log.Println("Unknown driver type:", input) + log.Println("Choosing default driver type as 'file'..") return server.File } } } func runCmd(c *cli.Context) { - storageTypeStr := c.String("storage-type") + driverTypeStr := c.String("driver-type") domain := c.String("domain") apiaddress := c.String("api-address") webaddress := c.String("web-address") @@ -52,7 +52,7 @@ func runCmd(c *cli.Context) { log.Fatal("Both certificate and key must be provided to enable https") } tls := (certFile != "" && keyFile != "") - storageType := getStorageType(storageTypeStr) + driverType := getDriverType(driverTypeStr) var serverConfigs []server.Config apiServerConfig := server.Config{ Domain: domain, @@ -61,7 +61,7 @@ func runCmd(c *cli.Context) { CertFile: certFile, KeyFile: keyFile, APIType: server.MinioAPI{ - StorageType: storageType, + DriverType: driverType, }, } webUIServerConfig := server.Config{ @@ -110,7 +110,7 @@ func main() { Usage: "key.pem", }, cli.StringFlag{ - Name: "storage-type,s", + Name: "driver-type,t", Value: "donut", Usage: "valid entries: file,inmemory,donut", }, diff --git a/pkg/api/api_bucket_handlers.go b/pkg/api/api_bucket_handlers.go index 095687d2d..d99b4e854 100644 --- a/pkg/api/api_bucket_handlers.go +++ b/pkg/api/api_bucket_handlers.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/gorilla/mux" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" "github.com/minio-io/minio/pkg/utils/log" ) @@ -45,21 +45,21 @@ func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Requ } acceptsContentType := getContentType(req) - objects, resources, err := server.storage.ListObjects(bucket, resources) + objects, resources, err := server.driver.ListObjects(bucket, resources) switch err := err.(type) { case nil: // success { response := generateObjectsListResult(bucket, objects, resources) w.Write(writeObjectHeadersAndResponse(w, response, acceptsContentType)) } - case mstorage.BucketNotFound: + case drivers.BucketNotFound: { error := errorCodeError(NoSuchBucket) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { // Embed error log on server side log.Errorln(err) @@ -68,14 +68,14 @@ func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Requ w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ObjectNameInvalid: + case drivers.ObjectNameInvalid: { error := errorCodeError(NoSuchKey) errorResponse := getErrorResponse(error, resources.Prefix) @@ -91,14 +91,14 @@ func (server *minioAPI) listObjectsHandler(w http.ResponseWriter, req *http.Requ // owned by the authenticated sender of the request. func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Request) { acceptsContentType := getContentType(req) - buckets, err := server.storage.ListBuckets() + buckets, err := server.driver.ListBuckets() switch err := err.(type) { case nil: { response := generateBucketsListResult(buckets) w.Write(writeObjectHeadersAndResponse(w, response, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { log.Errorln(err) error := errorCodeError(InternalError) @@ -106,7 +106,7 @@ func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Requ w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BackendCorrupted: + case drivers.BackendCorrupted: { log.Errorln(err) error := errorCodeError(InternalError) @@ -123,7 +123,7 @@ func (server *minioAPI) listBucketsHandler(w http.ResponseWriter, req *http.Requ func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] - err := server.storage.CreateBucket(bucket) + err := server.driver.CreateBucket(bucket) resources := getBucketResources(req.URL.Query()) if resources.Policy == true { @@ -138,21 +138,21 @@ func (server *minioAPI) putBucketHandler(w http.ResponseWriter, req *http.Reques w.Header().Set("Server", "Minio") w.Header().Set("Connection", "close") } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketExists: + case drivers.BucketExists: { error := errorCodeError(BucketAlreadyExists) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { // Embed errors log on server side log.Errorln(err) diff --git a/pkg/api/api_object_handlers.go b/pkg/api/api_object_handlers.go index 7a949b236..1599e6aba 100644 --- a/pkg/api/api_object_handlers.go +++ b/pkg/api/api_object_handlers.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/gorilla/mux" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" "github.com/minio-io/minio/pkg/utils/log" ) @@ -35,7 +35,7 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques bucket = vars["bucket"] object = vars["object"] - metadata, err := server.storage.GetObjectMetadata(bucket, object, "") + metadata, err := server.driver.GetObjectMetadata(bucket, object, "") switch err := err.(type) { case nil: // success { @@ -52,7 +52,7 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques switch httpRange.start == 0 && httpRange.length == 0 { case true: writeObjectHeaders(w, metadata) - if _, err := server.storage.GetObject(w, bucket, object); err != nil { + if _, err := server.driver.GetObject(w, bucket, object); err != nil { log.Errorln(err) error := errorCodeError(InternalError) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) @@ -64,7 +64,7 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques metadata.Size = httpRange.length writeRangeObjectHeaders(w, metadata, httpRange.getContentRange()) w.WriteHeader(http.StatusPartialContent) - _, err := server.storage.GetPartialObject(w, bucket, object, httpRange.start, httpRange.length) + _, err := server.driver.GetPartialObject(w, bucket, object, httpRange.start, httpRange.length) if err != nil { log.Errorln(err) error := errorCodeError(InternalError) @@ -76,28 +76,28 @@ func (server *minioAPI) getObjectHandler(w http.ResponseWriter, req *http.Reques } } - case mstorage.ObjectNotFound: + case drivers.ObjectNotFound: { error := errorCodeError(NoSuchKey) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ObjectNameInvalid: + case drivers.ObjectNameInvalid: { error := errorCodeError(NoSuchKey) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { // Embed errors log on serve side log.Errorln(err) @@ -119,25 +119,25 @@ func (server *minioAPI) headObjectHandler(w http.ResponseWriter, req *http.Reque bucket = vars["bucket"] object = vars["object"] - metadata, err := server.storage.GetObjectMetadata(bucket, object, "") + metadata, err := server.driver.GetObjectMetadata(bucket, object, "") switch err := err.(type) { case nil: writeObjectHeaders(w, metadata) - case mstorage.ObjectNotFound: + case drivers.ObjectNotFound: { error := errorCodeError(NoSuchKey) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ObjectNameInvalid: + case drivers.ObjectNameInvalid: { error := errorCodeError(NoSuchKey) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { // Embed error log on server side log.Errorln(err) @@ -167,12 +167,12 @@ func (server *minioAPI) putObjectHandler(w http.ResponseWriter, req *http.Reques // get Content-MD5 sent by client md5 := req.Header.Get("Content-MD5") - err := server.storage.CreateObject(bucket, object, "", md5, req.Body) + err := server.driver.CreateObject(bucket, object, "", md5, req.Body) switch err := err.(type) { case nil: w.Header().Set("Server", "Minio") w.Header().Set("Connection", "close") - case mstorage.ImplementationError: + case drivers.ImplementationError: { // Embed error log on server side log.Errorln(err) @@ -181,35 +181,35 @@ func (server *minioAPI) putObjectHandler(w http.ResponseWriter, req *http.Reques w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNotFound: + case drivers.BucketNotFound: { error := errorCodeError(NoSuchBucket) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ObjectExists: + case drivers.ObjectExists: { error := errorCodeError(NotImplemented) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BadDigest: + case drivers.BadDigest: { error := errorCodeError(BadDigest) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.InvalidDigest: + case drivers.InvalidDigest: { error := errorCodeError(InvalidDigest) errorResponse := getErrorResponse(error, "/"+bucket+"/"+object) diff --git a/pkg/api/api_policy_handlers.go b/pkg/api/api_policy_handlers.go index c50b8c5c1..18338ccd5 100644 --- a/pkg/api/api_policy_handlers.go +++ b/pkg/api/api_policy_handlers.go @@ -21,7 +21,7 @@ import ( "net/http" "github.com/gorilla/mux" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" "github.com/minio-io/minio/pkg/utils/log" ) @@ -34,7 +34,7 @@ func (server *minioAPI) putBucketPolicyHandler(w http.ResponseWriter, req *http. bucket := vars["bucket"] acceptsContentType := getContentType(req) - policy, ok := mstorage.Parsepolicy(req.Body) + policy, ok := drivers.Parsepolicy(req.Body) if ok == false { error := errorCodeError(InvalidPolicyDocument) errorResponse := getErrorResponse(error, bucket) @@ -43,7 +43,7 @@ func (server *minioAPI) putBucketPolicyHandler(w http.ResponseWriter, req *http. return } - err := server.storage.CreateBucketPolicy(bucket, policy) + err := server.driver.CreateBucketPolicy(bucket, policy) switch err := err.(type) { case nil: { @@ -51,21 +51,21 @@ func (server *minioAPI) putBucketPolicyHandler(w http.ResponseWriter, req *http. writeCommonHeaders(w, getContentString(acceptsContentType)) w.Header().Set("Connection", "keep-alive") } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNotFound: + case drivers.BucketNotFound: { error := errorCodeError(NoSuchBucket) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BackendCorrupted: + case drivers.BackendCorrupted: { log.Errorln(err) error := errorCodeError(InternalError) @@ -73,7 +73,7 @@ func (server *minioAPI) putBucketPolicyHandler(w http.ResponseWriter, req *http. w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { log.Errorln(err) error := errorCodeError(InternalError) @@ -93,7 +93,7 @@ func (server *minioAPI) getBucketPolicyHandler(w http.ResponseWriter, req *http. bucket := vars["bucket"] acceptsContentType := getContentType(req) - p, err := server.storage.GetBucketPolicy(bucket) + p, err := server.driver.GetBucketPolicy(bucket) switch err := err.(type) { case nil: { @@ -108,28 +108,28 @@ func (server *minioAPI) getBucketPolicyHandler(w http.ResponseWriter, req *http. w.Header().Set("Connection", "keep-alive") w.Write(responsePolicy) } - case mstorage.BucketNameInvalid: + case drivers.BucketNameInvalid: { error := errorCodeError(InvalidBucketName) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketNotFound: + case drivers.BucketNotFound: { error := errorCodeError(NoSuchBucket) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BucketPolicyNotFound: + case drivers.BucketPolicyNotFound: { error := errorCodeError(NoSuchBucketPolicy) errorResponse := getErrorResponse(error, bucket) w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.BackendCorrupted: + case drivers.BackendCorrupted: { log.Errorln(err) error := errorCodeError(InternalError) @@ -137,7 +137,7 @@ func (server *minioAPI) getBucketPolicyHandler(w http.ResponseWriter, req *http. w.WriteHeader(error.HTTPStatusCode) w.Write(writeErrorResponse(w, errorResponse, acceptsContentType)) } - case mstorage.ImplementationError: + case drivers.ImplementationError: { log.Errorln(err) error := errorCodeError(InternalError) diff --git a/pkg/api/api_response.go b/pkg/api/api_response.go index 4e0d4c3e8..d70388d51 100644 --- a/pkg/api/api_response.go +++ b/pkg/api/api_response.go @@ -19,7 +19,7 @@ package api import ( "sort" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) // Reply date format @@ -33,7 +33,7 @@ const ( // // output: // populated struct that can be serialized to match xml and json api spec output -func generateBucketsListResult(buckets []mstorage.BucketMetadata) BucketListResponse { +func generateBucketsListResult(buckets []drivers.BucketMetadata) BucketListResponse { var listbuckets []*Bucket var data = BucketListResponse{} var owner = Owner{} @@ -73,7 +73,7 @@ func (b itemKey) Less(i, j int) bool { return b[i].Key < b[j].Key } // // output: // populated struct that can be serialized to match xml and json api spec output -func generateObjectsListResult(bucket string, objects []mstorage.ObjectMetadata, bucketResources mstorage.BucketResourcesMetadata) ObjectListResponse { +func generateObjectsListResult(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ObjectListResponse { var contents []*Item var prefixes []*Prefix var owner = Owner{} diff --git a/pkg/api/api_router.go b/pkg/api/api_router.go index 04c6d45a1..4910eea2e 100644 --- a/pkg/api/api_router.go +++ b/pkg/api/api_router.go @@ -22,13 +22,13 @@ import ( router "github.com/gorilla/mux" "github.com/minio-io/minio/pkg/api/config" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) // private use type minioAPI struct { - domain string - storage mstorage.Storage + domain string + driver drivers.Driver } // Path based routing @@ -72,10 +72,10 @@ func getMux(api minioAPI, mux *router.Router) *router.Router { } // HTTPHandler - http wrapper handler -func HTTPHandler(domain string, storage mstorage.Storage) http.Handler { +func HTTPHandler(domain string, driver drivers.Driver) http.Handler { var mux *router.Router var api = minioAPI{} - api.storage = storage + api.driver = driver api.domain = domain r := router.NewRouter() diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 08ea5a3a5..9721796e1 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -29,8 +29,8 @@ import ( "time" "github.com/minio-io/minio/pkg/api" - mstorage "github.com/minio-io/minio/pkg/storage" - "github.com/minio-io/minio/pkg/storage/memory" + "github.com/minio-io/minio/pkg/drivers" + "github.com/minio-io/minio/pkg/drivers/memory" . "gopkg.in/check.v1" ) @@ -42,8 +42,8 @@ type MySuite struct{} var _ = Suite(&MySuite{}) func (s *MySuite) TestNonExistantObject(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() @@ -54,14 +54,14 @@ func (s *MySuite) TestNonExistantObject(c *C) { } func (s *MySuite) TestEmptyObject(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() buffer := bytes.NewBufferString("") - storage.CreateBucket("bucket") - storage.CreateObject("bucket", "object", "", "", buffer) + driver.CreateBucket("bucket") + driver.CreateObject("bucket", "object", "", "", buffer) response, err := http.Get(testServer.URL + "/bucket/object") c.Assert(err, IsNil) @@ -71,7 +71,7 @@ func (s *MySuite) TestEmptyObject(c *C) { c.Assert(err, IsNil) c.Assert(true, Equals, bytes.Equal(responseBody, buffer.Bytes())) - metadata, err := storage.GetObjectMetadata("bucket", "object", "") + metadata, err := driver.GetObjectMetadata("bucket", "object", "") c.Assert(err, IsNil) verifyHeaders(c, response.Header, metadata.Created, 0, "application/octet-stream", metadata.Md5) @@ -79,14 +79,14 @@ func (s *MySuite) TestEmptyObject(c *C) { } func (s *MySuite) TestObject(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() buffer := bytes.NewBufferString("hello world") - storage.CreateBucket("bucket") - storage.CreateObject("bucket", "object", "", "", buffer) + driver.CreateBucket("bucket") + driver.CreateObject("bucket", "object", "", "", buffer) response, err := http.Get(testServer.URL + "/bucket/object") c.Assert(err, IsNil) @@ -96,14 +96,14 @@ func (s *MySuite) TestObject(c *C) { c.Assert(err, IsNil) c.Assert(true, Equals, bytes.Equal(responseBody, []byte("hello world"))) - metadata, err := storage.GetObjectMetadata("bucket", "object", "") + metadata, err := driver.GetObjectMetadata("bucket", "object", "") c.Assert(err, IsNil) verifyHeaders(c, response.Header, metadata.Created, len("hello world"), "application/octet-stream", metadata.Md5) } func (s *MySuite) TestMultipleObjects(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() @@ -111,10 +111,10 @@ func (s *MySuite) TestMultipleObjects(c *C) { buffer2 := bytes.NewBufferString("hello two") buffer3 := bytes.NewBufferString("hello three") - storage.CreateBucket("bucket") - storage.CreateObject("bucket", "object1", "", "", buffer1) - storage.CreateObject("bucket", "object2", "", "", buffer2) - storage.CreateObject("bucket", "object3", "", "", buffer3) + driver.CreateBucket("bucket") + driver.CreateObject("bucket", "object1", "", "", buffer1) + driver.CreateObject("bucket", "object2", "", "", buffer2) + driver.CreateObject("bucket", "object3", "", "", buffer3) // test non-existant object response, err := http.Get(testServer.URL + "/bucket/object") @@ -129,7 +129,7 @@ func (s *MySuite) TestMultipleObjects(c *C) { c.Assert(err, IsNil) // get metadata - metadata, err := storage.GetObjectMetadata("bucket", "object1", "") + metadata, err := driver.GetObjectMetadata("bucket", "object1", "") c.Assert(err, IsNil) c.Assert(response.StatusCode, Equals, http.StatusOK) @@ -148,7 +148,7 @@ func (s *MySuite) TestMultipleObjects(c *C) { c.Assert(err, IsNil) // get metadata - metadata, err = storage.GetObjectMetadata("bucket", "object2", "") + metadata, err = driver.GetObjectMetadata("bucket", "object2", "") c.Assert(err, IsNil) c.Assert(response.StatusCode, Equals, http.StatusOK) @@ -167,7 +167,7 @@ func (s *MySuite) TestMultipleObjects(c *C) { c.Assert(err, IsNil) // get metadata - metadata, err = storage.GetObjectMetadata("bucket", "object3", "") + metadata, err = driver.GetObjectMetadata("bucket", "object3", "") c.Assert(err, IsNil) c.Assert(response.StatusCode, Equals, http.StatusOK) @@ -182,8 +182,8 @@ func (s *MySuite) TestMultipleObjects(c *C) { } func (s *MySuite) TestNotImplemented(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() @@ -193,8 +193,8 @@ func (s *MySuite) TestNotImplemented(c *C) { } func (s *MySuite) TestHeader(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() @@ -203,25 +203,25 @@ func (s *MySuite) TestHeader(c *C) { c.Assert(response.StatusCode, Equals, http.StatusNotFound) buffer := bytes.NewBufferString("hello world") - storage.CreateBucket("bucket") - storage.CreateObject("bucket", "object", "", "", buffer) + driver.CreateBucket("bucket") + driver.CreateObject("bucket", "object", "", "", buffer) response, err = http.Get(testServer.URL + "/bucket/object") c.Assert(err, IsNil) c.Assert(response.StatusCode, Equals, http.StatusOK) - metadata, err := storage.GetObjectMetadata("bucket", "object", "") + metadata, err := driver.GetObjectMetadata("bucket", "object", "") c.Assert(err, IsNil) verifyHeaders(c, response.Header, metadata.Created, len("hello world"), "application/octet-stream", metadata.Md5) } func (s *MySuite) TestPutBucket(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() - buckets, err := storage.ListBuckets() + buckets, err := driver.ListBuckets() c.Assert(len(buckets), Equals, 0) c.Assert(err, IsNil) @@ -234,23 +234,23 @@ func (s *MySuite) TestPutBucket(c *C) { c.Assert(response.StatusCode, Equals, http.StatusOK) // check bucket exists - buckets, err = storage.ListBuckets() + buckets, err = driver.ListBuckets() c.Assert(len(buckets), Equals, 1) c.Assert(err, IsNil) c.Assert(buckets[0].Name, Equals, "bucket") } func (s *MySuite) TestPutObject(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() - resources := mstorage.BucketResourcesMetadata{} + resources := drivers.BucketResourcesMetadata{} resources.Maxkeys = 1000 resources.Prefix = "" - objects, resources, err := storage.ListObjects("bucket", resources) + objects, resources, err := driver.ListObjects("bucket", resources) c.Assert(len(objects), Equals, 0) c.Assert(resources.IsTruncated, Equals, false) c.Assert(err, Not(IsNil)) @@ -278,18 +278,18 @@ func (s *MySuite) TestPutObject(c *C) { resources.Maxkeys = 1000 resources.Prefix = "" - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = driver.ListObjects("bucket", resources) c.Assert(len(objects), Equals, 1) c.Assert(resources.IsTruncated, Equals, false) c.Assert(err, IsNil) var writer bytes.Buffer - storage.GetObject(&writer, "bucket", "two") + driver.GetObject(&writer, "bucket", "two") c.Assert(bytes.Equal(writer.Bytes(), []byte("hello world")), Equals, true) - metadata, err := storage.GetObjectMetadata("bucket", "two", "") + metadata, err := driver.GetObjectMetadata("bucket", "two", "") c.Assert(err, IsNil) lastModified := metadata.Created @@ -298,8 +298,8 @@ func (s *MySuite) TestPutObject(c *C) { } func (s *MySuite) TestListBuckets(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() @@ -312,7 +312,7 @@ func (s *MySuite) TestListBuckets(c *C) { c.Assert(err, IsNil) c.Assert(len(listResponse.Buckets.Bucket), Equals, 0) - storage.CreateBucket("foo") + driver.CreateBucket("foo") response, err = http.Get(testServer.URL + "/") defer response.Body.Close() @@ -324,7 +324,7 @@ func (s *MySuite) TestListBuckets(c *C) { c.Assert(len(listResponse.Buckets.Bucket), Equals, 1) c.Assert(listResponse.Buckets.Bucket[0].Name, Equals, "foo") - storage.CreateBucket("bar") + driver.CreateBucket("bar") response, err = http.Get(testServer.URL + "/") defer response.Body.Close() @@ -377,12 +377,12 @@ func verifyHeaders(c *C, header http.Header, date time.Time, size int, contentTy } func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() - err := storage.CreateBucket("foo") + err := driver.CreateBucket("foo") c.Assert(err, IsNil) request, err := http.NewRequest("GET", testServer.URL+"/", bytes.NewBufferString("")) @@ -402,12 +402,12 @@ func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) { } func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() - err := storage.CreateBucket("foo") + err := driver.CreateBucket("foo") c.Assert(err, IsNil) request, err := http.NewRequest("GET", testServer.URL+"/foo", bytes.NewBufferString("")) @@ -427,12 +427,12 @@ func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) { } func (s *MySuite) TestContentTypePersists(c *C) { - _, _, storage := memory.Start() - httpHandler := api.HTTPHandler("", storage) + _, _, driver := memory.Start() + httpHandler := api.HTTPHandler("", driver) testServer := httptest.NewServer(httpHandler) defer testServer.Close() - err := storage.CreateBucket("bucket") + err := driver.CreateBucket("bucket") c.Assert(err, IsNil) client := http.Client{} diff --git a/pkg/api/headers.go b/pkg/api/headers.go index 8d2bb5de1..085c8f78e 100644 --- a/pkg/api/headers.go +++ b/pkg/api/headers.go @@ -24,7 +24,7 @@ import ( "strconv" "time" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) // No encoder interface exists, so we create one. @@ -58,7 +58,7 @@ func writeErrorResponse(w http.ResponseWriter, response interface{}, acceptsType } // Write object header -func writeObjectHeaders(w http.ResponseWriter, metadata mstorage.ObjectMetadata) { +func writeObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata) { lastModified := metadata.Created.Format(time.RFC1123) // common headers writeCommonHeaders(w, metadata.ContentType) @@ -69,7 +69,7 @@ func writeObjectHeaders(w http.ResponseWriter, metadata mstorage.ObjectMetadata) } // Write range object header -func writeRangeObjectHeaders(w http.ResponseWriter, metadata mstorage.ObjectMetadata, ra string) { +func writeRangeObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata, ra string) { lastModified := metadata.Created.Format(time.RFC1123) // common headers writeCommonHeaders(w, metadata.ContentType) diff --git a/pkg/api/resources.go b/pkg/api/resources.go index e40abe1c0..e6306bf24 100644 --- a/pkg/api/resources.go +++ b/pkg/api/resources.go @@ -20,11 +20,11 @@ import ( "net/url" "strconv" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) // parse bucket url queries -func getBucketResources(values url.Values) (v mstorage.BucketResourcesMetadata) { +func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) { for key, value := range values { switch true { case key == "prefix": diff --git a/pkg/storage/storage_api_suite.go b/pkg/drivers/api_testsuite.go similarity index 67% rename from pkg/storage/storage_api_suite.go rename to pkg/drivers/api_testsuite.go index 72a49c24f..942e8eff6 100644 --- a/pkg/storage/storage_api_suite.go +++ b/pkg/drivers/api_testsuite.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package storage +package drivers import ( "bytes" @@ -26,7 +26,7 @@ import ( ) // APITestSuite - collection of API tests -func APITestSuite(c *check.C, create func() Storage) { +func APITestSuite(c *check.C, create func() Driver) { testCreateBucket(c, create) testMultipleObjectCreation(c, create) testPaging(c, create) @@ -43,16 +43,16 @@ func APITestSuite(c *check.C, create func() Storage) { //testContentMd5Set(c, create) TODO } -func testCreateBucket(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testCreateBucket(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) } -func testMultipleObjectCreation(c *check.C, create func() Storage) { +func testMultipleObjectCreation(c *check.C, create func() Driver) { objects := make(map[string][]byte) - storage := create() - err := storage.CreateBucket("bucket") + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) for i := 0; i < 10; i++ { randomPerm := rand.Perm(10) @@ -62,7 +62,7 @@ func testMultipleObjectCreation(c *check.C, create func() Storage) { } key := "obj" + strconv.Itoa(i) objects[key] = []byte(randomString) - err := storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(randomString)) + err := drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(randomString)) c.Assert(err, check.IsNil) } @@ -70,11 +70,11 @@ func testMultipleObjectCreation(c *check.C, create func() Storage) { etags := make(map[string]string) for key, value := range objects { var byteBuffer bytes.Buffer - _, err := storage.GetObject(&byteBuffer, "bucket", key) + _, err := drivers.GetObject(&byteBuffer, "bucket", key) c.Assert(err, check.IsNil) c.Assert(byteBuffer.Bytes(), check.DeepEquals, value) - metadata, err := storage.GetObjectMetadata("bucket", key, "") + metadata, err := drivers.GetObjectMetadata("bucket", key, "") c.Assert(err, check.IsNil) c.Assert(metadata.Size, check.Equals, int64(len(value))) @@ -84,20 +84,20 @@ func testMultipleObjectCreation(c *check.C, create func() Storage) { } } -func testPaging(c *check.C, create func() Storage) { - storage := create() - storage.CreateBucket("bucket") +func testPaging(c *check.C, create func() Driver) { + drivers := create() + drivers.CreateBucket("bucket") resources := BucketResourcesMetadata{} - objects, resources, err := storage.ListObjects("bucket", resources) + objects, resources, err := drivers.ListObjects("bucket", resources) c.Assert(err, check.IsNil) c.Assert(len(objects), check.Equals, 0) c.Assert(resources.IsTruncated, check.Equals, false) // check before paging occurs for i := 0; i < 5; i++ { key := "obj" + strconv.Itoa(i) - storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(key)) + drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(key)) resources.Maxkeys = 5 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(len(objects), check.Equals, i+1) c.Assert(resources.IsTruncated, check.Equals, false) c.Assert(err, check.IsNil) @@ -105,20 +105,20 @@ func testPaging(c *check.C, create func() Storage) { // check after paging occurs pages work for i := 6; i <= 10; i++ { key := "obj" + strconv.Itoa(i) - storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(key)) + drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(key)) resources.Maxkeys = 5 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(len(objects), check.Equals, 5) c.Assert(resources.IsTruncated, check.Equals, true) c.Assert(err, check.IsNil) } // check paging with prefix at end returns less objects { - storage.CreateObject("bucket", "newPrefix", "", "", bytes.NewBufferString("prefix1")) - storage.CreateObject("bucket", "newPrefix2", "", "", bytes.NewBufferString("prefix2")) + drivers.CreateObject("bucket", "newPrefix", "", "", bytes.NewBufferString("prefix1")) + drivers.CreateObject("bucket", "newPrefix2", "", "", bytes.NewBufferString("prefix2")) resources.Prefix = "new" resources.Maxkeys = 5 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(len(objects), check.Equals, 2) } @@ -126,7 +126,7 @@ func testPaging(c *check.C, create func() Storage) { { resources.Prefix = "" resources.Maxkeys = 1000 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") c.Assert(objects[2].Key, check.Equals, "obj0") @@ -136,14 +136,14 @@ func testPaging(c *check.C, create func() Storage) { // check delimited results with delimiter and prefix { - storage.CreateObject("bucket", "this/is/delimited", "", "", bytes.NewBufferString("prefix1")) - storage.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", bytes.NewBufferString("prefix2")) + drivers.CreateObject("bucket", "this/is/delimited", "", "", bytes.NewBufferString("prefix1")) + drivers.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", bytes.NewBufferString("prefix2")) var prefixes []string resources.CommonPrefixes = prefixes // allocate new everytime resources.Delimiter = "/" resources.Prefix = "this/is/" resources.Maxkeys = 10 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(err, check.IsNil) c.Assert(len(objects), check.Equals, 1) c.Assert(resources.CommonPrefixes[0], check.Equals, "also/") @@ -157,7 +157,7 @@ func testPaging(c *check.C, create func() Storage) { resources.Delimiter = "/" resources.Prefix = "" resources.Maxkeys = 1000 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") c.Assert(objects[2].Key, check.Equals, "obj0") @@ -171,7 +171,7 @@ func testPaging(c *check.C, create func() Storage) { resources.Prefix = "obj" resources.Delimiter = "" resources.Maxkeys = 1000 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(objects[0].Key, check.Equals, "obj0") c.Assert(objects[1].Key, check.Equals, "obj1") c.Assert(objects[2].Key, check.Equals, "obj10") @@ -182,95 +182,95 @@ func testPaging(c *check.C, create func() Storage) { { resources.Prefix = "new" resources.Maxkeys = 5 - objects, resources, err = storage.ListObjects("bucket", resources) + objects, resources, err = drivers.ListObjects("bucket", resources) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") } } -func testObjectOverwriteFails(c *check.C, create func() Storage) { - storage := create() - storage.CreateBucket("bucket") - err := storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one")) +func testObjectOverwriteFails(c *check.C, create func() Driver) { + drivers := create() + drivers.CreateBucket("bucket") + err := drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one")) c.Assert(err, check.IsNil) - err = storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("three")) + err = drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("three")) c.Assert(err, check.Not(check.IsNil)) var bytesBuffer bytes.Buffer - length, err := storage.GetObject(&bytesBuffer, "bucket", "object") + length, err := drivers.GetObject(&bytesBuffer, "bucket", "object") c.Assert(length, check.Equals, int64(len("one"))) c.Assert(err, check.IsNil) c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one") } -func testNonExistantBucketOperations(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one")) +func testNonExistantBucketOperations(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one")) c.Assert(err, check.Not(check.IsNil)) } -func testBucketRecreateFails(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("string") +func testBucketRecreateFails(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("string") c.Assert(err, check.IsNil) - err = storage.CreateBucket("string") + err = drivers.CreateBucket("string") c.Assert(err, check.Not(check.IsNil)) } -func testPutObjectInSubdir(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testPutObjectInSubdir(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) - err = storage.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world")) + err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world")) c.Assert(err, check.IsNil) var bytesBuffer bytes.Buffer - length, err := storage.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object") + length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object") c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world")) c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length) c.Assert(err, check.IsNil) } -func testListBuckets(c *check.C, create func() Storage) { - storage := create() +func testListBuckets(c *check.C, create func() Driver) { + drivers := create() // test empty list - buckets, err := storage.ListBuckets() + buckets, err := drivers.ListBuckets() c.Assert(err, check.IsNil) c.Assert(len(buckets), check.Equals, 0) // add one and test exists - err = storage.CreateBucket("bucket1") + err = drivers.CreateBucket("bucket1") c.Assert(err, check.IsNil) - buckets, err = storage.ListBuckets() + buckets, err = drivers.ListBuckets() c.Assert(len(buckets), check.Equals, 1) c.Assert(err, check.IsNil) // add two and test exists - err = storage.CreateBucket("bucket2") + err = drivers.CreateBucket("bucket2") c.Assert(err, check.IsNil) - buckets, err = storage.ListBuckets() + buckets, err = drivers.ListBuckets() c.Assert(len(buckets), check.Equals, 2) c.Assert(err, check.IsNil) // add three and test exists + prefix - err = storage.CreateBucket("bucket22") + err = drivers.CreateBucket("bucket22") - buckets, err = storage.ListBuckets() + buckets, err = drivers.ListBuckets() c.Assert(len(buckets), check.Equals, 3) c.Assert(err, check.IsNil) } -func testListBucketsOrder(c *check.C, create func() Storage) { +func testListBucketsOrder(c *check.C, create func() Driver) { // if implementation contains a map, order of map keys will vary. // this ensures they return in the same order each time for i := 0; i < 10; i++ { - storage := create() + drivers := create() // add one and test exists - storage.CreateBucket("bucket1") - storage.CreateBucket("bucket2") + drivers.CreateBucket("bucket1") + drivers.CreateBucket("bucket2") - buckets, err := storage.ListBuckets() + buckets, err := drivers.ListBuckets() c.Assert(len(buckets), check.Equals, 2) c.Assert(err, check.IsNil) c.Assert(buckets[0].Name, check.Equals, "bucket1") @@ -278,22 +278,22 @@ func testListBucketsOrder(c *check.C, create func() Storage) { } } -func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Storage) { - storage := create() +func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Driver) { + drivers := create() resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000} - objects, resources, err := storage.ListObjects("bucket", resources) + objects, resources, err := drivers.ListObjects("bucket", resources) c.Assert(err, check.Not(check.IsNil)) c.Assert(resources.IsTruncated, check.Equals, false) c.Assert(len(objects), check.Equals, 0) } -func testNonExistantObjectInBucket(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testNonExistantObjectInBucket(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) var byteBuffer bytes.Buffer - length, err := storage.GetObject(&byteBuffer, "bucket", "dir1") + length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1") c.Assert(length, check.Equals, int64(0)) c.Assert(err, check.Not(check.IsNil)) c.Assert(len(byteBuffer.Bytes()), check.Equals, 0) @@ -309,16 +309,16 @@ func testNonExistantObjectInBucket(c *check.C, create func() Storage) { } } -func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) - err = storage.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world")) + err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world")) c.Assert(err, check.IsNil) var byteBuffer bytes.Buffer - length, err := storage.GetObject(&byteBuffer, "bucket", "dir1") + length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1") c.Assert(length, check.Equals, int64(0)) switch err := err.(type) { case ObjectNotFound: @@ -335,7 +335,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) { c.Assert(len(byteBuffer.Bytes()), check.Equals, 0) var byteBuffer2 bytes.Buffer - length, err = storage.GetObject(&byteBuffer, "bucket", "dir1/") + length, err = drivers.GetObject(&byteBuffer, "bucket", "dir1/") c.Assert(length, check.Equals, int64(0)) switch err := err.(type) { case ObjectNotFound: @@ -352,40 +352,40 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) { c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0) } -func testDefaultContentType(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testDefaultContentType(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) // test empty - err = storage.CreateObject("bucket", "one", "", "", bytes.NewBufferString("one")) - metadata, err := storage.GetObjectMetadata("bucket", "one", "") + err = drivers.CreateObject("bucket", "one", "", "", bytes.NewBufferString("one")) + metadata, err := drivers.GetObjectMetadata("bucket", "one", "") c.Assert(err, check.IsNil) c.Assert(metadata.ContentType, check.Equals, "application/octet-stream") // test custom - storage.CreateObject("bucket", "two", "application/text", "", bytes.NewBufferString("two")) - metadata, err = storage.GetObjectMetadata("bucket", "two", "") + drivers.CreateObject("bucket", "two", "application/text", "", bytes.NewBufferString("two")) + metadata, err = drivers.GetObjectMetadata("bucket", "two", "") c.Assert(err, check.IsNil) c.Assert(metadata.ContentType, check.Equals, "application/text") // test trim space - storage.CreateObject("bucket", "three", "\tapplication/json ", "", bytes.NewBufferString("three")) - metadata, err = storage.GetObjectMetadata("bucket", "three", "") + drivers.CreateObject("bucket", "three", "\tapplication/json ", "", bytes.NewBufferString("three")) + metadata, err = drivers.GetObjectMetadata("bucket", "three", "") c.Assert(err, check.IsNil) c.Assert(metadata.ContentType, check.Equals, "application/json") } /* -func testContentMd5Set(c *check.C, create func() Storage) { - storage := create() - err := storage.CreateBucket("bucket") +func testContentMd5Set(c *check.C, create func() Driver) { + drivers := create() + err := drivers.CreateBucket("bucket") c.Assert(err, check.IsNil) // test md5 invalid - err = storage.CreateObject("bucket", "one", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA", bytes.NewBufferString("one")) + err = drivers.CreateObject("bucket", "one", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA", bytes.NewBufferString("one")) c.Assert(err, check.Not(check.IsNil)) - err = storage.CreateObject("bucket", "two", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA=", bytes.NewBufferString("one")) + err = drivers.CreateObject("bucket", "two", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA=", bytes.NewBufferString("one")) c.Assert(err, check.IsNil) } */ diff --git a/pkg/storage/storage_bucket_policy.go b/pkg/drivers/bucket_policy.go similarity index 99% rename from pkg/storage/storage_bucket_policy.go rename to pkg/drivers/bucket_policy.go index 5af8cb030..36e200d19 100644 --- a/pkg/storage/storage_bucket_policy.go +++ b/pkg/drivers/bucket_policy.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package storage +package drivers import ( "encoding/json" diff --git a/pkg/storage/storage_bucket_policy_compat.go b/pkg/drivers/bucket_policy_compat.go similarity index 98% rename from pkg/storage/storage_bucket_policy_compat.go rename to pkg/drivers/bucket_policy_compat.go index 10d83a862..5280df224 100644 --- a/pkg/storage/storage_bucket_policy_compat.go +++ b/pkg/drivers/bucket_policy_compat.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package storage +package drivers // This file implements compatability layer for AWS clients diff --git a/pkg/storage/storage_date.go b/pkg/drivers/date.go similarity index 66% rename from pkg/storage/storage_date.go rename to pkg/drivers/date.go index 9ee6e55fc..c47b83fd6 100644 --- a/pkg/storage/storage_date.go +++ b/pkg/drivers/date.go @@ -1,4 +1,20 @@ -package storage +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package drivers import ( "errors" diff --git a/pkg/storage/donutstorage/donutstorage.go b/pkg/drivers/donut/donut.go similarity index 68% rename from pkg/storage/donutstorage/donutstorage.go rename to pkg/drivers/donut/donut.go index 19e345c05..b8cc5de39 100644 --- a/pkg/storage/donutstorage/donutstorage.go +++ b/pkg/drivers/donut/donut.go @@ -14,21 +14,22 @@ * limitations under the License. */ -package donutstorage +package donut import ( "errors" - "github.com/minio-io/minio/pkg/storage" - "github.com/minio-io/minio/pkg/storage/donut" "io" "sort" "strconv" "strings" "time" + + "github.com/minio-io/minio/pkg/drivers" + "github.com/minio-io/minio/pkg/storage/donut" ) -// Storage creates a new single disk storage driver using donut without encoding. -type Storage struct { +// donutDriver - creates a new single disk drivers driver using donut +type donutDriver struct { donut donut.Donut } @@ -37,26 +38,25 @@ const ( ) // Start a single disk subsystem -func Start(path string) (chan<- string, <-chan error, storage.Storage) { - +func Start(path string) (chan<- string, <-chan error, drivers.Driver) { ctrlChannel := make(chan string) errorChannel := make(chan error) - s := new(Storage) + s := new(donutDriver) // TODO donut driver should be passed in as Start param and driven by config - s.donut = donut.NewDonutDriver(path) + s.donut = donut.NewDonut(path) go start(ctrlChannel, errorChannel, s) return ctrlChannel, errorChannel, s } -func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) { +func start(ctrlChannel <-chan string, errorChannel chan<- error, s *donutDriver) { close(errorChannel) } // ListBuckets returns a list of buckets -func (donutStorage Storage) ListBuckets() (results []storage.BucketMetadata, err error) { - buckets, err := donutStorage.donut.ListBuckets() +func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) { + buckets, err := d.donut.ListBuckets() if err != nil { return nil, err } @@ -64,7 +64,7 @@ func (donutStorage Storage) ListBuckets() (results []storage.BucketMetadata, err if err != nil { return nil, err } - result := storage.BucketMetadata{ + result := drivers.BucketMetadata{ Name: bucket, // TODO Add real created date Created: time.Now(), @@ -75,30 +75,30 @@ func (donutStorage Storage) ListBuckets() (results []storage.BucketMetadata, err } // CreateBucket creates a new bucket -func (donutStorage Storage) CreateBucket(bucket string) error { - return donutStorage.donut.CreateBucket(bucket) +func (d donutDriver) CreateBucket(bucket string) error { + return d.donut.CreateBucket(bucket) } // GetBucketMetadata retrieves an bucket's metadata -func (donutStorage Storage) GetBucketMetadata(bucket string) (storage.BucketMetadata, error) { - return storage.BucketMetadata{}, errors.New("Not Implemented") +func (d donutDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { + return drivers.BucketMetadata{}, errors.New("Not Implemented") } // CreateBucketPolicy sets a bucket's access policy -func (donutStorage Storage) CreateBucketPolicy(bucket string, p storage.BucketPolicy) error { +func (d donutDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error { return errors.New("Not Implemented") } // GetBucketPolicy returns a bucket's access policy -func (donutStorage Storage) GetBucketPolicy(bucket string) (storage.BucketPolicy, error) { - return storage.BucketPolicy{}, errors.New("Not Implemented") +func (d donutDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + return drivers.BucketPolicy{}, errors.New("Not Implemented") } // GetObject retrieves an object and writes it to a writer -func (donutStorage Storage) GetObject(target io.Writer, bucket, key string) (int64, error) { - reader, err := donutStorage.donut.GetObject(bucket, key) +func (d donutDriver) GetObject(target io.Writer, bucket, key string) (int64, error) { + reader, err := d.donut.GetObjectReader(bucket, key) if err != nil { - return 0, storage.ObjectNotFound{ + return 0, drivers.ObjectNotFound{ Bucket: bucket, Object: key, } @@ -106,23 +106,23 @@ func (donutStorage Storage) GetObject(target io.Writer, bucket, key string) (int return io.Copy(target, reader) } -// GetPartialObject retrieves an object and writes it to a writer -func (donutStorage Storage) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { +// GetPartialObject retrieves an object range and writes it to a writer +func (d donutDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { return 0, errors.New("Not Implemented") } // GetObjectMetadata retrieves an object's metadata -func (donutStorage Storage) GetObjectMetadata(bucket, key string, prefix string) (storage.ObjectMetadata, error) { - metadata, err := donutStorage.donut.GetObjectMetadata(bucket, key) +func (d donutDriver) GetObjectMetadata(bucket, key string, prefix string) (drivers.ObjectMetadata, error) { + metadata, err := d.donut.GetObjectMetadata(bucket, key) created, err := time.Parse(time.RFC3339Nano, metadata["sys.created"]) if err != nil { - return storage.ObjectMetadata{}, err + return drivers.ObjectMetadata{}, err } size, err := strconv.ParseInt(metadata["sys.size"], 10, 64) if err != nil { - return storage.ObjectMetadata{}, err + return drivers.ObjectMetadata{}, err } - objectMetadata := storage.ObjectMetadata{ + objectMetadata := drivers.ObjectMetadata{ Bucket: bucket, Key: key, @@ -134,12 +134,12 @@ func (donutStorage Storage) GetObjectMetadata(bucket, key string, prefix string) return objectMetadata, nil } -// ListObjects lists objects -func (donutStorage Storage) ListObjects(bucket string, resources storage.BucketResourcesMetadata) ([]storage.ObjectMetadata, storage.BucketResourcesMetadata, error) { +// ListObjects - returns list of objects +func (d donutDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { // TODO Fix IsPrefixSet && IsDelimiterSet and use them - objects, err := donutStorage.donut.ListObjects(bucket) + objects, err := d.donut.ListObjects(bucket) if err != nil { - return nil, storage.BucketResourcesMetadata{}, err + return nil, drivers.BucketResourcesMetadata{}, err } sort.Strings(objects) if resources.Prefix != "" { @@ -162,15 +162,15 @@ func (donutStorage Storage) ListObjects(bucket string, resources storage.BucketR actualObjects = objects } - var results []storage.ObjectMetadata + var results []drivers.ObjectMetadata for _, object := range actualObjects { if len(results) >= resources.Maxkeys { resources.IsTruncated = true break } - metadata, err := donutStorage.GetObjectMetadata(bucket, resources.Prefix+object, "") + metadata, err := d.GetObjectMetadata(bucket, resources.Prefix+object, "") if err != nil { - return nil, storage.BucketResourcesMetadata{}, err + return nil, drivers.BucketResourcesMetadata{}, err } results = append(results, metadata) } @@ -237,8 +237,8 @@ func uniqueObjects(objects []string) []string { } // CreateObject creates a new object -func (donutStorage Storage) CreateObject(bucketKey, objectKey, contentType, expectedMd5sum string, reader io.Reader) error { - writer, err := donutStorage.donut.GetObjectWriter(bucketKey, objectKey) +func (d donutDriver) CreateObject(bucketKey, objectKey, contentType, expectedMd5sum string, reader io.Reader) error { + writer, err := d.donut.GetObjectWriter(bucketKey, objectKey) if err != nil { return err } diff --git a/pkg/storage/donutstorage/donutstorage_test.go b/pkg/drivers/donut/donut_test.go similarity index 89% rename from pkg/storage/donutstorage/donutstorage_test.go rename to pkg/drivers/donut/donut_test.go index d9070f69a..9169726b3 100644 --- a/pkg/storage/donutstorage/donutstorage_test.go +++ b/pkg/drivers/donut/donut_test.go @@ -14,14 +14,14 @@ * limitations under the License. */ -package donutstorage +package donut import ( "io/ioutil" "os" "testing" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" . "gopkg.in/check.v1" ) @@ -35,14 +35,14 @@ var _ = Suite(&MySuite{}) func (s *MySuite) TestAPISuite(c *C) { // c.Skip("Not Implemented") var storageList []string - create := func() mstorage.Storage { + create := func() drivers.Driver { path, err := ioutil.TempDir(os.TempDir(), "minio-fs-") c.Check(err, IsNil) storageList = append(storageList, path) _, _, store := Start(path) // TODO Make InMemory driver return store } - mstorage.APITestSuite(c, create) + drivers.APITestSuite(c, create) removeRoots(c, storageList) } diff --git a/pkg/storage/storage.go b/pkg/drivers/driver.go similarity index 97% rename from pkg/storage/storage.go rename to pkg/drivers/driver.go index cae83faed..d13c9c94e 100644 --- a/pkg/storage/storage.go +++ b/pkg/drivers/driver.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package storage +package drivers import ( "io" @@ -23,8 +23,8 @@ import ( "unicode/utf8" ) -// Storage - generic API interface -type Storage interface { +// Driver - generic API interface for various drivers - donut, file, memory +type Driver interface { // Bucket Operations ListBuckets() ([]BucketMetadata, error) CreateBucket(bucket string) error diff --git a/pkg/storage/storage_errors.go b/pkg/drivers/errors.go similarity index 99% rename from pkg/storage/storage_errors.go rename to pkg/drivers/errors.go index 6b580fd7f..bdede15d9 100644 --- a/pkg/storage/storage_errors.go +++ b/pkg/drivers/errors.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package storage +package drivers // BackendError - generic disk backend error type BackendError struct { diff --git a/pkg/storage/file/file.go b/pkg/drivers/file/file.go similarity index 80% rename from pkg/storage/file/file.go rename to pkg/drivers/file/file.go index 71824d170..60c3ecaa5 100644 --- a/pkg/storage/file/file.go +++ b/pkg/drivers/file/file.go @@ -19,20 +19,22 @@ package file import ( "os" "sync" + + "github.com/minio-io/minio/pkg/drivers" ) // Start filesystem channel -func Start(root string) (chan<- string, <-chan error, *Storage) { +func Start(root string) (chan<- string, <-chan error, drivers.Driver) { ctrlChannel := make(chan string) errorChannel := make(chan error) - s := Storage{} + s := new(fileDriver) s.root = root s.lock = new(sync.Mutex) - go start(ctrlChannel, errorChannel, &s) - return ctrlChannel, errorChannel, &s + go start(ctrlChannel, errorChannel, s) + return ctrlChannel, errorChannel, s } -func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) { +func start(ctrlChannel <-chan string, errorChannel chan<- error, s *fileDriver) { err := os.MkdirAll(s.root, 0700) errorChannel <- err close(errorChannel) diff --git a/pkg/drivers/file/file_bucket.go b/pkg/drivers/file/file_bucket.go new file mode 100644 index 000000000..6724d2547 --- /dev/null +++ b/pkg/drivers/file/file_bucket.go @@ -0,0 +1,133 @@ +/* + * Minimalist Object File, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "path" + "sort" + "strings" + + "io/ioutil" + "path/filepath" + + "github.com/minio-io/minio/pkg/drivers" +) + +/// Bucket Operations + +// ListBuckets - Get service +func (file *fileDriver) ListBuckets() ([]drivers.BucketMetadata, error) { + files, err := ioutil.ReadDir(file.root) + if err != nil { + return []drivers.BucketMetadata{}, drivers.EmbedError("bucket", "", err) + } + + var metadataList []drivers.BucketMetadata + for _, fileName := range files { + // Skip policy files + if strings.HasSuffix(fileName.Name(), "_policy.json") { + continue + } + if !fileName.IsDir() { + return []drivers.BucketMetadata{}, drivers.BackendCorrupted{Path: file.root} + } + metadata := drivers.BucketMetadata{ + Name: fileName.Name(), + Created: fileName.ModTime(), // TODO - provide real created time + } + metadataList = append(metadataList, metadata) + } + return metadataList, nil +} + +// CreateBucket - PUT Bucket +func (file *fileDriver) CreateBucket(bucket string) error { + file.lock.Lock() + defer file.lock.Unlock() + + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + + // check if bucket exists + if _, err := os.Stat(bucketDir); err == nil { + return drivers.BucketExists{ + Bucket: bucket, + } + } + + // make bucket + err := os.Mkdir(bucketDir, 0700) + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + return nil +} + +// ListObjects - GET bucket (list objects) +func (file *fileDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + p := bucketDir{} + p.files = make(map[string]os.FileInfo) + + if drivers.IsValidBucket(bucket) == false { + return []drivers.ObjectMetadata{}, resources, drivers.BucketNameInvalid{Bucket: bucket} + } + if resources.Prefix != "" && drivers.IsValidObject(resources.Prefix) == false { + return []drivers.ObjectMetadata{}, resources, drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix} + } + + rootPrefix := path.Join(file.root, bucket) + // check bucket exists + if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { + return []drivers.ObjectMetadata{}, resources, drivers.BucketNotFound{Bucket: bucket} + } + + p.root = rootPrefix + err := filepath.Walk(rootPrefix, p.getAllFiles) + if err != nil { + return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + + var metadataList []drivers.ObjectMetadata + var metadata drivers.ObjectMetadata + + // Populate filtering mode + resources.Mode = drivers.GetMode(resources) + + for name, f := range p.files { + if len(metadataList) >= resources.Maxkeys { + resources.IsTruncated = true + goto ret + } + metadata, resources, err = file.filter(bucket, name, f, resources) + if err != nil { + return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + if metadata.Bucket != "" { + metadataList = append(metadataList, metadata) + } + } + +ret: + sort.Sort(byObjectKey(metadataList)) + return metadataList, resources, nil +} diff --git a/pkg/storage/file/file_common.go b/pkg/drivers/file/file_common.go similarity index 89% rename from pkg/storage/file/file_common.go rename to pkg/drivers/file/file_common.go index c708fdae5..579c06322 100644 --- a/pkg/storage/file/file_common.go +++ b/pkg/drivers/file/file_common.go @@ -23,17 +23,17 @@ import ( "strings" "sync" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) -// Storage - file local variables -type Storage struct { +// fileDriver - file local variables +type fileDriver struct { root string lock *sync.Mutex } -// Metadata - carries metadata about object -type Metadata struct { +// fileMetadata - carries metadata about object +type fileMetadata struct { Md5sum []byte ContentType string } @@ -77,7 +77,7 @@ func delimiter(object, delimiter string) string { return delimitedStr } -type byObjectKey []mstorage.ObjectMetadata +type byObjectKey []drivers.ObjectMetadata // Len func (b byObjectKey) Len() int { return len(b) } diff --git a/pkg/storage/file/file_filter.go b/pkg/drivers/file/file_filter.go similarity index 63% rename from pkg/storage/file/file_filter.go rename to pkg/drivers/file/file_filter.go index 2f72f2c43..eee761a3f 100644 --- a/pkg/storage/file/file_filter.go +++ b/pkg/drivers/file/file_filter.go @@ -20,13 +20,13 @@ import ( "os" "strings" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) // TODO handle resources.Marker -func (storage *Storage) filter(bucket, name string, file os.FileInfo, resources mstorage.BucketResourcesMetadata) (mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) { +func (file *fileDriver) filter(bucket, name string, f os.FileInfo, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { var err error - var metadata mstorage.ObjectMetadata + var metadata drivers.ObjectMetadata switch true { // Both delimiter and Prefix is present @@ -37,15 +37,15 @@ func (storage *Storage) filter(bucket, name string, file os.FileInfo, resources switch true { case name == resources.Prefix: // Use resources.Prefix to filter out delimited files - metadata, err = storage.GetObjectMetadata(bucket, name, resources.Prefix) + metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix) if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } - case delimitedName == file.Name(): + case delimitedName == f.Name(): // Use resources.Prefix to filter out delimited files - metadata, err = storage.GetObjectMetadata(bucket, name, resources.Prefix) + metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix) if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } case delimitedName != "": if delimitedName == resources.Delimiter { @@ -61,15 +61,15 @@ func (storage *Storage) filter(bucket, name string, file os.FileInfo, resources switch true { case delimitedName == "": // Do not strip prefix object output - metadata, err = storage.GetObjectMetadata(bucket, name, "") + metadata, err = file.GetObjectMetadata(bucket, name, "") if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } - case delimitedName == file.Name(): + case delimitedName == f.Name(): // Do not strip prefix object output - metadata, err = storage.GetObjectMetadata(bucket, name, "") + metadata, err = file.GetObjectMetadata(bucket, name, "") if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } case delimitedName != "": resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName) @@ -78,15 +78,15 @@ func (storage *Storage) filter(bucket, name string, file os.FileInfo, resources case resources.IsPrefixSet(): if strings.HasPrefix(name, resources.Prefix) { // Do not strip prefix object output - metadata, err = storage.GetObjectMetadata(bucket, name, "") + metadata, err = file.GetObjectMetadata(bucket, name, "") if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } } case resources.IsDefault(): - metadata, err = storage.GetObjectMetadata(bucket, name, "") + metadata, err = file.GetObjectMetadata(bucket, name, "") if err != nil { - return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) } } diff --git a/pkg/storage/file/file_object.go b/pkg/drivers/file/file_object.go similarity index 50% rename from pkg/storage/file/file_object.go rename to pkg/drivers/file/file_object.go index 970e2e372..3adfc4014 100644 --- a/pkg/storage/file/file_object.go +++ b/pkg/drivers/file/file_object.go @@ -28,136 +28,136 @@ import ( "encoding/gob" "encoding/hex" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" ) /// Object Operations // GetPartialObject - GET object from range -func (storage *Storage) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { +func (file *fileDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { // validate bucket - if mstorage.IsValidBucket(bucket) == false { - return 0, mstorage.BucketNameInvalid{Bucket: bucket} + if drivers.IsValidBucket(bucket) == false { + return 0, drivers.BucketNameInvalid{Bucket: bucket} } // validate object - if mstorage.IsValidObject(object) == false { - return 0, mstorage.ObjectNameInvalid{Bucket: bucket, Object: object} + if drivers.IsValidObject(object) == false { + return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object} } - objectPath := path.Join(storage.root, bucket, object) + objectPath := path.Join(file.root, bucket, object) filestat, err := os.Stat(objectPath) switch err := err.(type) { case nil: { if filestat.IsDir() { - return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} } } default: { if os.IsNotExist(err) { - return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} } - return 0, mstorage.EmbedError(bucket, object, err) + return 0, drivers.EmbedError(bucket, object, err) } } - file, err := os.Open(objectPath) - defer file.Close() + f, err := os.Open(objectPath) + defer f.Close() if err != nil { - return 0, mstorage.EmbedError(bucket, object, err) + return 0, drivers.EmbedError(bucket, object, err) } - _, err = file.Seek(start, os.SEEK_SET) + _, err = f.Seek(start, os.SEEK_SET) if err != nil { - return 0, mstorage.EmbedError(bucket, object, err) + return 0, drivers.EmbedError(bucket, object, err) } - count, err := io.CopyN(w, file, length) + count, err := io.CopyN(w, f, length) if err != nil { - return count, mstorage.EmbedError(bucket, object, err) + return count, drivers.EmbedError(bucket, object, err) } return count, nil } // GetObject - GET object from key -func (storage *Storage) GetObject(w io.Writer, bucket string, object string) (int64, error) { +func (file *fileDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { // validate bucket - if mstorage.IsValidBucket(bucket) == false { - return 0, mstorage.BucketNameInvalid{Bucket: bucket} + if drivers.IsValidBucket(bucket) == false { + return 0, drivers.BucketNameInvalid{Bucket: bucket} } // validate object - if mstorage.IsValidObject(object) == false { - return 0, mstorage.ObjectNameInvalid{Bucket: bucket, Object: object} + if drivers.IsValidObject(object) == false { + return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object} } - objectPath := path.Join(storage.root, bucket, object) + objectPath := path.Join(file.root, bucket, object) filestat, err := os.Stat(objectPath) switch err := err.(type) { case nil: { if filestat.IsDir() { - return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} } } default: { if os.IsNotExist(err) { - return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} } - return 0, mstorage.EmbedError(bucket, object, err) + return 0, drivers.EmbedError(bucket, object, err) } } - file, err := os.Open(objectPath) - defer file.Close() + f, err := os.Open(objectPath) + defer f.Close() if err != nil { - return 0, mstorage.EmbedError(bucket, object, err) + return 0, drivers.EmbedError(bucket, object, err) } - count, err := io.Copy(w, file) + count, err := io.Copy(w, f) if err != nil { - return count, mstorage.EmbedError(bucket, object, err) + return count, drivers.EmbedError(bucket, object, err) } return count, nil } // GetObjectMetadata - HEAD object -func (storage *Storage) GetObjectMetadata(bucket, object, prefix string) (mstorage.ObjectMetadata, error) { - if mstorage.IsValidBucket(bucket) == false { - return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket} +func (file *fileDriver) GetObjectMetadata(bucket, object, prefix string) (drivers.ObjectMetadata, error) { + if drivers.IsValidBucket(bucket) == false { + return drivers.ObjectMetadata{}, drivers.BucketNameInvalid{Bucket: bucket} } - if mstorage.IsValidObject(object) == false { - return mstorage.ObjectMetadata{}, mstorage.ObjectNameInvalid{Bucket: bucket, Object: bucket} + if drivers.IsValidObject(object) == false { + return drivers.ObjectMetadata{}, drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket} } // Do not use path.Join() since path.Join strips off any object names with '/', use them as is // in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat() - objectPath := storage.root + "/" + bucket + "/" + object + objectPath := file.root + "/" + bucket + "/" + object stat, err := os.Stat(objectPath) if os.IsNotExist(err) { - return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object} } _, err = os.Stat(objectPath + "$metadata") if os.IsNotExist(err) { - return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object} } - file, err := os.Open(objectPath + "$metadata") - defer file.Close() + f, err := os.Open(objectPath + "$metadata") + defer f.Close() if err != nil { - return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err) + return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err) } - var deserializedMetadata Metadata - decoder := gob.NewDecoder(file) + var deserializedMetadata fileMetadata + decoder := gob.NewDecoder(f) err = decoder.Decode(&deserializedMetadata) if err != nil { - return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err) + return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err) } contentType := "application/octet-stream" @@ -171,7 +171,7 @@ func (storage *Storage) GetObjectMetadata(bucket, object, prefix string) (mstora etag = hex.EncodeToString(deserializedMetadata.Md5sum) } trimmedObject := strings.TrimPrefix(object, prefix) - metadata := mstorage.ObjectMetadata{ + metadata := drivers.ObjectMetadata{ Bucket: bucket, Key: trimmedObject, Created: stat.ModTime(), @@ -184,24 +184,24 @@ func (storage *Storage) GetObjectMetadata(bucket, object, prefix string) (mstora } // CreateObject - PUT object -func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { +func (file *fileDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { // TODO Commits should stage then move instead of writing directly - storage.lock.Lock() - defer storage.lock.Unlock() + file.lock.Lock() + defer file.lock.Unlock() // check bucket name valid - if mstorage.IsValidBucket(bucket) == false { - return mstorage.BucketNameInvalid{Bucket: bucket} + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} } // check bucket exists - if _, err := os.Stat(path.Join(storage.root, bucket)); os.IsNotExist(err) { - return mstorage.BucketNotFound{Bucket: bucket} + if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) { + return drivers.BucketNotFound{Bucket: bucket} } // verify object path legal - if mstorage.IsValidObject(key) == false { - return mstorage.ObjectNameInvalid{Bucket: bucket, Object: key} + if drivers.IsValidObject(key) == false { + return drivers.ObjectNameInvalid{Bucket: bucket, Object: key} } // verify content type @@ -211,54 +211,54 @@ func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, da contentType = strings.TrimSpace(contentType) // get object path - objectPath := path.Join(storage.root, bucket, key) + objectPath := path.Join(file.root, bucket, key) objectDir := path.Dir(objectPath) if _, err := os.Stat(objectDir); os.IsNotExist(err) { err = os.MkdirAll(objectDir, 0700) if err != nil { - return mstorage.EmbedError(bucket, key, err) + return drivers.EmbedError(bucket, key, err) } } // check if object exists if _, err := os.Stat(objectPath); !os.IsNotExist(err) { - return mstorage.ObjectExists{ + return drivers.ObjectExists{ Bucket: bucket, Object: key, } } // write object - file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) - defer file.Close() + f, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() if err != nil { - return mstorage.EmbedError(bucket, key, err) + return drivers.EmbedError(bucket, key, err) } h := md5.New() - mw := io.MultiWriter(file, h) + mw := io.MultiWriter(f, h) _, err = io.Copy(mw, data) if err != nil { - return mstorage.EmbedError(bucket, key, err) + return drivers.EmbedError(bucket, key, err) } // - file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) - defer file.Close() + f, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() if err != nil { - return mstorage.EmbedError(bucket, key, err) + return drivers.EmbedError(bucket, key, err) } - metadata := &Metadata{ + metadata := &fileMetadata{ ContentType: contentType, Md5sum: h.Sum(nil), } // serialize metadata to gob - encoder := gob.NewEncoder(file) + encoder := gob.NewEncoder(f) err = encoder.Encode(metadata) if err != nil { - return mstorage.EmbedError(bucket, key, err) + return drivers.EmbedError(bucket, key, err) } // Verify data received to be correct, Content-MD5 received @@ -266,10 +266,10 @@ func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, da var data []byte data, err = base64.StdEncoding.DecodeString(md5sum) if err != nil { - return mstorage.InvalidDigest{Bucket: bucket, Key: key, Md5: md5sum} + return drivers.InvalidDigest{Bucket: bucket, Key: key, Md5: md5sum} } if !bytes.Equal(metadata.Md5sum, data) { - return mstorage.BadDigest{Bucket: bucket, Key: key, Md5: md5sum} + return drivers.BadDigest{Bucket: bucket, Key: key, Md5: md5sum} } } return nil diff --git a/pkg/drivers/file/file_policy.go b/pkg/drivers/file/file_policy.go new file mode 100644 index 000000000..d0d410ffd --- /dev/null +++ b/pkg/drivers/file/file_policy.go @@ -0,0 +1,111 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "path" + + "encoding/json" + "github.com/minio-io/minio/pkg/drivers" +) + +// GetBucketPolicy - GET bucket policy +func (file *fileDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + file.lock.Lock() + defer file.lock.Unlock() + + var p drivers.BucketPolicy + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketPolicy{}, drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + // check if bucket exists + if _, err := os.Stat(bucketDir); err != nil { + return drivers.BucketPolicy{}, drivers.BucketNotFound{Bucket: bucket} + } + + // get policy path + bucketPolicy := path.Join(file.root, bucket+"_policy.json") + filestat, err := os.Stat(bucketPolicy) + + if os.IsNotExist(err) { + return drivers.BucketPolicy{}, drivers.BucketPolicyNotFound{Bucket: bucket} + } + + if filestat.IsDir() { + return drivers.BucketPolicy{}, drivers.BackendCorrupted{Path: bucketPolicy} + } + + f, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666) + defer f.Close() + if err != nil { + return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err) + } + encoder := json.NewDecoder(f) + err = encoder.Decode(&p) + if err != nil { + return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err) + } + + return p, nil + +} + +// CreateBucketPolicy - PUT bucket policy +func (file *fileDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error { + file.lock.Lock() + defer file.lock.Unlock() + + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + // check if bucket exists + if _, err := os.Stat(bucketDir); err != nil { + return drivers.BucketNotFound{ + Bucket: bucket, + } + } + + // get policy path + bucketPolicy := path.Join(file.root, bucket+"_policy.json") + filestat, ret := os.Stat(bucketPolicy) + if !os.IsNotExist(ret) { + if filestat.IsDir() { + return drivers.BackendCorrupted{Path: bucketPolicy} + } + } + + f, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + encoder := json.NewEncoder(f) + err = encoder.Encode(p) + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + return nil +} diff --git a/pkg/storage/file/file_test.go b/pkg/drivers/file/file_test.go similarity index 90% rename from pkg/storage/file/file_test.go rename to pkg/drivers/file/file_test.go index ca9bbb088..65da0324e 100644 --- a/pkg/storage/file/file_test.go +++ b/pkg/drivers/file/file_test.go @@ -21,7 +21,7 @@ import ( "os" "testing" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" . "gopkg.in/check.v1" ) @@ -34,14 +34,14 @@ var _ = Suite(&MySuite{}) func (s *MySuite) TestAPISuite(c *C) { var storageList []string - create := func() mstorage.Storage { + create := func() drivers.Driver { path, err := ioutil.TempDir(os.TempDir(), "minio-file-") c.Check(err, IsNil) storageList = append(storageList, path) _, _, store := Start(path) return store } - mstorage.APITestSuite(c, create) + drivers.APITestSuite(c, create) removeRoots(c, storageList) } diff --git a/pkg/storage/memory/memory.go b/pkg/drivers/memory/memory.go similarity index 62% rename from pkg/storage/memory/memory.go rename to pkg/drivers/memory/memory.go index ef52cc51a..eed816fb0 100644 --- a/pkg/storage/memory/memory.go +++ b/pkg/drivers/memory/memory.go @@ -1,5 +1,5 @@ /* - * Minio Object Storage, (C) 2015 Minio, Inc. + * Minimalist Object Storage, (C) 2015 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ package memory import ( "bufio" "bytes" - "fmt" "io" "sort" "strings" @@ -28,37 +27,40 @@ import ( "crypto/md5" "encoding/hex" - mstorage "github.com/minio-io/minio/pkg/storage" + + "github.com/minio-io/minio/pkg/drivers" ) -// Storage - local variables -type Storage struct { +// memoryDriver - local variables +type memoryDriver struct { bucketdata map[string]storedBucket objectdata map[string]storedObject lock *sync.RWMutex } type storedBucket struct { - metadata mstorage.BucketMetadata + metadata drivers.BucketMetadata // owner string // TODO // id string // TODO } type storedObject struct { - metadata mstorage.ObjectMetadata + metadata drivers.ObjectMetadata data []byte } // Start memory object server -func Start() (chan<- string, <-chan error, *Storage) { +func Start() (chan<- string, <-chan error, drivers.Driver) { ctrlChannel := make(chan string) errorChannel := make(chan error) + + memory := new(memoryDriver) + memory.bucketdata = make(map[string]storedBucket) + memory.objectdata = make(map[string]storedObject) + memory.lock = new(sync.RWMutex) + go start(ctrlChannel, errorChannel) - return ctrlChannel, errorChannel, &Storage{ - bucketdata: make(map[string]storedBucket), - objectdata: make(map[string]storedObject), - lock: new(sync.RWMutex), - } + return ctrlChannel, errorChannel, memory } func start(ctrlChannel <-chan string, errorChannel chan<- error) { @@ -66,43 +68,43 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error) { } // GetObject - GET object from memory buffer -func (storage *Storage) GetObject(w io.Writer, bucket string, object string) (int64, error) { +func (memory memoryDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { // get object key := object - if val, ok := storage.objectdata[key]; ok { + if val, ok := memory.objectdata[key]; ok { objectBuffer := bytes.NewBuffer(val.data) written, err := io.Copy(w, objectBuffer) return written, err } - return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} } // GetPartialObject - GET object from memory buffer range -func (storage *Storage) GetPartialObject(w io.Writer, bucket, object string, start, end int64) (int64, error) { - return 0, mstorage.APINotImplemented{API: "GetPartialObject"} +func (memory memoryDriver) GetPartialObject(w io.Writer, bucket, object string, start, end int64) (int64, error) { + return 0, drivers.APINotImplemented{API: "GetPartialObject"} } // CreateBucketPolicy - Not implemented -func (storage *Storage) CreateBucketPolicy(bucket string, policy mstorage.BucketPolicy) error { - return mstorage.APINotImplemented{API: "PutBucketPolicy"} +func (memory memoryDriver) CreateBucketPolicy(bucket string, policy drivers.BucketPolicy) error { + return drivers.APINotImplemented{API: "PutBucketPolicy"} } // GetBucketPolicy - Not implemented -func (storage *Storage) GetBucketPolicy(bucket string) (mstorage.BucketPolicy, error) { - return mstorage.BucketPolicy{}, mstorage.APINotImplemented{API: "GetBucketPolicy"} +func (memory memoryDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + return drivers.BucketPolicy{}, drivers.APINotImplemented{API: "GetBucketPolicy"} } // CreateObject - PUT object to memory buffer -func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { - storage.lock.Lock() - defer storage.lock.Unlock() +func (memory memoryDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { + memory.lock.Lock() + defer memory.lock.Unlock() - if _, ok := storage.bucketdata[bucket]; ok == false { - return mstorage.BucketNotFound{Bucket: bucket} + if _, ok := memory.bucketdata[bucket]; ok == false { + return drivers.BucketNotFound{Bucket: bucket} } - if _, ok := storage.objectdata[key]; ok == true { - return mstorage.ObjectExists{Bucket: bucket, Object: key} + if _, ok := memory.objectdata[key]; ok == true { + return drivers.ObjectExists{Bucket: bucket, Object: key} } if contentType == "" { @@ -117,7 +119,7 @@ func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, da size := bytesBuffer.Len() md5SumBytes := md5.Sum(bytesBuffer.Bytes()) md5Sum := hex.EncodeToString(md5SumBytes[:]) - newObject.metadata = mstorage.ObjectMetadata{ + newObject.metadata = drivers.ObjectMetadata{ Bucket: bucket, Key: key, @@ -128,27 +130,27 @@ func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, da } newObject.data = bytesBuffer.Bytes() } - storage.objectdata[key] = newObject + memory.objectdata[key] = newObject return nil } // CreateBucket - create bucket in memory -func (storage *Storage) CreateBucket(bucketName string) error { - storage.lock.Lock() - defer storage.lock.Unlock() - if !mstorage.IsValidBucket(bucketName) { - return mstorage.BucketNameInvalid{Bucket: bucketName} +func (memory memoryDriver) CreateBucket(bucketName string) error { + memory.lock.Lock() + defer memory.lock.Unlock() + if !drivers.IsValidBucket(bucketName) { + return drivers.BucketNameInvalid{Bucket: bucketName} } - if _, ok := storage.bucketdata[bucketName]; ok == true { - return mstorage.BucketExists{Bucket: bucketName} + if _, ok := memory.bucketdata[bucketName]; ok == true { + return drivers.BucketExists{Bucket: bucketName} } var newBucket = storedBucket{} - newBucket.metadata = mstorage.BucketMetadata{} + newBucket.metadata = drivers.BucketMetadata{} newBucket.metadata.Name = bucketName newBucket.metadata.Created = time.Now() - storage.bucketdata[bucketName] = newBucket + memory.bucketdata[bucketName] = newBucket return nil } @@ -172,13 +174,13 @@ func appendUniq(slice []string, i string) []string { } // ListObjects - list objects from memory -func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) { - if _, ok := storage.bucketdata[bucket]; ok == false { - return []mstorage.ObjectMetadata{}, mstorage.BucketResourcesMetadata{IsTruncated: false}, mstorage.BucketNotFound{Bucket: bucket} +func (memory memoryDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + if _, ok := memory.bucketdata[bucket]; ok == false { + return []drivers.ObjectMetadata{}, drivers.BucketResourcesMetadata{IsTruncated: false}, drivers.BucketNotFound{Bucket: bucket} } - var results []mstorage.ObjectMetadata + var results []drivers.ObjectMetadata var keys []string - for key := range storage.objectdata { + for key := range memory.objectdata { switch true { // Prefix absent, delimit object key based on delimiter case resources.Delimiter != "" && resources.Prefix == "": @@ -193,7 +195,6 @@ func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketReso case resources.Delimiter != "" && resources.Prefix != "" && strings.HasPrefix(key, resources.Prefix): trimmedName := strings.TrimPrefix(key, resources.Prefix) delimitedName := delimiter(trimmedName, resources.Delimiter) - fmt.Println(trimmedName, delimitedName, key, resources.Prefix) switch true { case key == resources.Prefix: keys = appendUniq(keys, key) @@ -219,9 +220,9 @@ func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketReso sort.Strings(keys) for _, key := range keys { if len(results) == resources.Maxkeys { - return results, mstorage.BucketResourcesMetadata{IsTruncated: true}, nil + return results, drivers.BucketResourcesMetadata{IsTruncated: true}, nil } - object := storage.objectdata[key] + object := memory.objectdata[key] if bucket == object.metadata.Bucket { results = append(results, object.metadata) } @@ -230,7 +231,7 @@ func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketReso } // ByBucketName is a type for sorting bucket metadata by bucket name -type ByBucketName []mstorage.BucketMetadata +type ByBucketName []drivers.BucketMetadata // Len of bucket name func (b ByBucketName) Len() int { return len(b) } @@ -242,9 +243,9 @@ func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } // ListBuckets - List buckets from memory -func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) { - var results []mstorage.BucketMetadata - for _, bucket := range storage.bucketdata { +func (memory memoryDriver) ListBuckets() ([]drivers.BucketMetadata, error) { + var results []drivers.BucketMetadata + for _, bucket := range memory.bucketdata { results = append(results, bucket.metadata) } sort.Sort(ByBucketName(results)) @@ -252,9 +253,9 @@ func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) { } // GetObjectMetadata - get object metadata from memory -func (storage *Storage) GetObjectMetadata(bucket, key, prefix string) (mstorage.ObjectMetadata, error) { - if object, ok := storage.objectdata[key]; ok == true { +func (memory memoryDriver) GetObjectMetadata(bucket, key, prefix string) (drivers.ObjectMetadata, error) { + if object, ok := memory.objectdata[key]; ok == true { return object.metadata, nil } - return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: key} + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: key} } diff --git a/pkg/storage/memory/memory_test.go b/pkg/drivers/memory/memory_test.go similarity index 87% rename from pkg/storage/memory/memory_test.go rename to pkg/drivers/memory/memory_test.go index 262842469..29012c52e 100644 --- a/pkg/storage/memory/memory_test.go +++ b/pkg/drivers/memory/memory_test.go @@ -19,7 +19,7 @@ package memory import ( "testing" - mstorage "github.com/minio-io/minio/pkg/storage" + "github.com/minio-io/minio/pkg/drivers" . "gopkg.in/check.v1" ) @@ -31,10 +31,9 @@ type MySuite struct{} var _ = Suite(&MySuite{}) func (s *MySuite) TestAPISuite(c *C) { - create := func() mstorage.Storage { + create := func() drivers.Driver { _, _, store := Start() return store } - - mstorage.APITestSuite(c, create) + drivers.APITestSuite(c, create) } diff --git a/pkg/server/server.go b/pkg/server/server.go index ec2f0903f..29ee82d27 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -24,11 +24,11 @@ import ( "github.com/minio-io/minio/pkg/api" "github.com/minio-io/minio/pkg/api/web" + "github.com/minio-io/minio/pkg/drivers" + "github.com/minio-io/minio/pkg/drivers/donut" + "github.com/minio-io/minio/pkg/drivers/file" + "github.com/minio-io/minio/pkg/drivers/memory" "github.com/minio-io/minio/pkg/server/httpserver" - mstorage "github.com/minio-io/minio/pkg/storage" - "github.com/minio-io/minio/pkg/storage/donutstorage" - "github.com/minio-io/minio/pkg/storage/file" - "github.com/minio-io/minio/pkg/storage/memory" ) // Config - http server parameters @@ -41,9 +41,9 @@ type Config struct { APIType interface{} } -// MinioAPI - storage type donut, file, memory +// MinioAPI - driver type donut, file, memory type MinioAPI struct { - StorageType StorageType + DriverType DriverType } // Web - web related @@ -51,12 +51,12 @@ type Web struct { Websocket bool // TODO } -// StorageType - different storage types supported by minio -type StorageType int +// DriverType - different driver types supported by minio +type DriverType int -// Storage types +// Driver types const ( - Memory = iota + Memory DriverType = iota File Donut ) @@ -71,7 +71,7 @@ func getHTTPChannels(configs []Config) (ctrlChans []chan<- string, statusChans [ case MinioAPI: { // configure web server - var storage mstorage.Storage + var driver drivers.Driver var httpConfig = httpserver.Config{} httpConfig.Address = config.Address httpConfig.Websocket = false @@ -84,9 +84,9 @@ func getHTTPChannels(configs []Config) (ctrlChans []chan<- string, statusChans [ httpConfig.KeyFile = config.KeyFile } - ctrlChans, statusChans, storage = getStorageChannels(k.StorageType) - // start minio api in a web server, pass storage driver into it - ctrlChan, statusChan, _ = httpserver.Start(api.HTTPHandler(config.Domain, storage), httpConfig) + ctrlChans, statusChans, driver = getDriverChannels(k.DriverType) + // start minio api in a web server, pass driver driver into it + ctrlChan, statusChan, _ = httpserver.Start(api.HTTPHandler(config.Domain, driver), httpConfig) ctrlChans = append(ctrlChans, ctrlChan) statusChans = append(statusChans, statusChan) @@ -113,50 +113,50 @@ func getHTTPChannels(configs []Config) (ctrlChans []chan<- string, statusChans [ return } -func getStorageChannels(storageType StorageType) (ctrlChans []chan<- string, statusChans []<-chan error, storage mstorage.Storage) { +func getDriverChannels(driverType DriverType) (ctrlChans []chan<- string, statusChans []<-chan error, driver drivers.Driver) { // a pair of control channels, we use these primarily to add to the lists above var ctrlChan chan<- string var statusChan <-chan error - // instantiate storage + // instantiate driver // preconditions: - // - storage type specified - // - any configuration for storage is populated + // - driver type specified + // - any configuration for driver is populated // postconditions: - // - storage driver is initialized - // - ctrlChans has channel to communicate to storage - // - statusChans has channel for messages coming from storage + // - driver driver is initialized + // - ctrlChans has channel to communicate to driver + // - statusChans has channel for messages coming from driver switch { - case storageType == Memory: + case driverType == Memory: { - ctrlChan, statusChan, storage = memory.Start() + ctrlChan, statusChan, driver = memory.Start() ctrlChans = append(ctrlChans, ctrlChan) statusChans = append(statusChans, statusChan) } - case storageType == File: + case driverType == File: { u, err := user.Current() if err != nil { return nil, nil, nil } root := path.Join(u.HomeDir, "minio-storage", "file") - ctrlChan, statusChan, storage = file.Start(root) + ctrlChan, statusChan, driver = file.Start(root) ctrlChans = append(ctrlChans, ctrlChan) statusChans = append(statusChans, statusChan) } - case storageType == Donut: + case driverType == Donut: { u, err := user.Current() if err != nil { return nil, nil, nil } - root := path.Join(u.HomeDir, "minio-storage", "donut") - ctrlChan, statusChan, storage = donutstorage.Start(root) + root := path.Join(u.HomeDir, "minio-driver", "donut") + ctrlChan, statusChan, driver = donut.Start(root) ctrlChans = append(ctrlChans, ctrlChan) statusChans = append(statusChans, statusChan) } default: // should never happen - log.Fatal("No storage driver found") + log.Fatal("No driver found") } return } diff --git a/pkg/storage/donut/bucketdriver.go b/pkg/storage/donut/bucket.go similarity index 54% rename from pkg/storage/donut/bucketdriver.go rename to pkg/storage/donut/bucket.go index 783fa9878..9ed34e9fb 100644 --- a/pkg/storage/donut/bucketdriver.go +++ b/pkg/storage/donut/bucket.go @@ -1,11 +1,12 @@ package donut -type bucketDriver struct { +type donutBucket struct { nodes []string objects map[string][]byte } -func (b bucketDriver) GetNodes() ([]string, error) { +// GetNodes - get list of associated nodes for a given bucket +func (b donutBucket) GetNodes() ([]string, error) { var nodes []string for _, node := range b.nodes { nodes = append(nodes, node) diff --git a/pkg/storage/donut/donut.go b/pkg/storage/donut/donut.go index 0514f91af..7d58bace0 100644 --- a/pkg/storage/donut/donut.go +++ b/pkg/storage/donut/donut.go @@ -1,50 +1,160 @@ package donut import ( + "errors" "io" + "sort" + "strconv" + "strings" ) -// INTERFACES +type donut struct { + buckets map[string]Bucket + nodes map[string]Node +} + +// NewDonut - instantiate new donut +func NewDonut(root string) Donut { + nodes := make(map[string]Node) + nodes["localhost"] = localDirectoryNode{root: root} + d := donut{ + buckets: make(map[string]Bucket), + nodes: nodes, + } + return d +} -// Donut interface -type Donut interface { - CreateBucket(bucket string) error - GetObject(bucket, object string) (io.ReadCloser, error) - GetObjectMetadata(bucket, object string) (map[string]string, error) - GetObjectWriter(bucket, object string) (ObjectWriter, error) - ListBuckets() ([]string, error) - ListObjects(bucket string) ([]string, error) +// CreateBucket - create a new bucket +func (d donut) CreateBucket(bucketName string) error { + if _, ok := d.buckets[bucketName]; ok == false { + bucketName = strings.TrimSpace(bucketName) + if bucketName == "" { + return errors.New("Cannot create bucket with no name") + } + // assign nodes + // TODO assign other nodes + nodes := make([]string, 16) + for i := 0; i < 16; i++ { + nodes[i] = "localhost" + if node, ok := d.nodes["localhost"]; ok { + node.CreateBucket(bucketName + ":0:" + strconv.Itoa(i)) + } + } + bucket := donutBucket{ + nodes: nodes, + } + d.buckets[bucketName] = bucket + return nil + } + return errors.New("Bucket exists") } -// Bucket interface -type Bucket interface { - GetNodes() ([]string, error) +// ListBuckets - list all buckets +func (d donut) ListBuckets() ([]string, error) { + var buckets []string + for bucket := range d.buckets { + buckets = append(buckets, bucket) + } + sort.Strings(buckets) + return buckets, nil } -// Node interface -type Node interface { - CreateBucket(bucket string) error - GetBuckets() ([]string, error) - GetDonutMetadata(bucket, object string) (map[string]string, error) - GetMetadata(bucket, object string) (map[string]string, error) - GetReader(bucket, object string) (io.ReadCloser, error) - GetWriter(bucket, object string) (Writer, error) - ListObjects(bucket string) ([]string, error) +// GetObjectWriter - get a new writer interface for a new object +func (d donut) GetObjectWriter(bucketName, objectName string) (ObjectWriter, error) { + if bucket, ok := d.buckets[bucketName]; ok == true { + writers := make([]Writer, 16) + nodes, err := bucket.GetNodes() + if err != nil { + return nil, err + } + for i, nodeID := range nodes { + if node, ok := d.nodes[nodeID]; ok == true { + writer, err := node.GetWriter(bucketName+":0:"+strconv.Itoa(i), objectName) + if err != nil { + for _, writerToClose := range writers { + if writerToClose != nil { + writerToClose.CloseWithError(err) + } + } + return nil, err + } + writers[i] = writer + } + } + return newErasureWriter(writers), nil + } + return nil, errors.New("Bucket not found") } -// ObjectWriter interface -type ObjectWriter interface { - Close() error - CloseWithError(error) error - GetMetadata() (map[string]string, error) - SetMetadata(map[string]string) error - Write([]byte) (int, error) +// GetObjectReader - get a new reader interface for a new object +func (d donut) GetObjectReader(bucketName, objectName string) (io.ReadCloser, error) { + r, w := io.Pipe() + if bucket, ok := d.buckets[bucketName]; ok == true { + readers := make([]io.ReadCloser, 16) + nodes, err := bucket.GetNodes() + if err != nil { + return nil, err + } + var metadata map[string]string + for i, nodeID := range nodes { + if node, ok := d.nodes[nodeID]; ok == true { + bucketID := bucketName + ":0:" + strconv.Itoa(i) + reader, err := node.GetReader(bucketID, objectName) + if err != nil { + return nil, err + } + readers[i] = reader + if metadata == nil { + metadata, err = node.GetDonutMetadata(bucketID, objectName) + if err != nil { + return nil, err + } + } + } + } + go erasureReader(readers, metadata, w) + return r, nil + } + return nil, errors.New("Bucket not found") } -// Writer interface -type Writer interface { - ObjectWriter +// GetObjectMetadata returns metadata for a given object in a bucket +func (d donut) GetObjectMetadata(bucketName, object string) (map[string]string, error) { + if bucket, ok := d.buckets[bucketName]; ok { + nodes, err := bucket.GetNodes() + if err != nil { + return nil, err + } + if node, ok := d.nodes[nodes[0]]; ok { + bucketID := bucketName + ":0:0" + metadata, err := node.GetMetadata(bucketID, object) + if err != nil { + return nil, err + } + donutMetadata, err := node.GetDonutMetadata(bucketID, object) + if err != nil { + return nil, err + } + metadata["sys.created"] = donutMetadata["created"] + metadata["sys.md5"] = donutMetadata["md5"] + metadata["sys.size"] = donutMetadata["size"] + return metadata, nil + } + return nil, errors.New("Cannot connect to node: " + nodes[0]) + } + return nil, errors.New("Bucket not found") +} - GetDonutMetadata() (map[string]string, error) - SetDonutMetadata(map[string]string) error +// ListObjects - list all the available objects in a bucket +func (d donut) ListObjects(bucketName string) ([]string, error) { + if bucket, ok := d.buckets[bucketName]; ok { + nodes, err := bucket.GetNodes() + if err != nil { + return nil, err + } + if node, ok := d.nodes[nodes[0]]; ok { + return node.ListObjects(bucketName + ":0:0") + } + } + return nil, errors.New("Bucket not found") } diff --git a/pkg/storage/donut/donutdriver_test.go b/pkg/storage/donut/donut_test.go similarity index 92% rename from pkg/storage/donut/donutdriver_test.go rename to pkg/storage/donut/donut_test.go index 19cde193e..fb7d3727c 100644 --- a/pkg/storage/donut/donutdriver_test.go +++ b/pkg/storage/donut/donut_test.go @@ -1,14 +1,14 @@ package donut import ( - "testing" - "bytes" - . "gopkg.in/check.v1" "io" "io/ioutil" "os" + "testing" "time" + + . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } @@ -21,7 +21,7 @@ func (s *MySuite) TestEmptyBucket(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) // check buckets are empty buckets, err := donut.ListBuckets() @@ -33,7 +33,7 @@ func (s *MySuite) TestBucketWithoutNameFails(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) // fail to create new bucket without a name err = donut.CreateBucket("") c.Assert(err, Not(IsNil)) @@ -46,7 +46,7 @@ func (s *MySuite) TestCreateBucketAndList(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) // create bucket err = donut.CreateBucket("foo") c.Assert(err, IsNil) @@ -61,7 +61,7 @@ func (s *MySuite) TestCreateBucketWithSameNameFails(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) err = donut.CreateBucket("foo") c.Assert(err, IsNil) @@ -73,7 +73,7 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) // add a second bucket err = donut.CreateBucket("foo") c.Assert(err, IsNil) @@ -97,7 +97,7 @@ func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) writer, err := donut.GetObjectWriter("foo", "obj") c.Assert(err, Not(IsNil)) @@ -108,7 +108,7 @@ func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) writer, err := donut.GetObjectWriter("foo", "") c.Assert(err, Not(IsNil)) @@ -123,7 +123,7 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) err = donut.CreateBucket("foo") c.Assert(err, IsNil) @@ -153,7 +153,7 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) { c.Assert(err, IsNil) - reader, err := donut.GetObject("foo", "obj") + reader, err := donut.GetObjectReader("foo", "obj") c.Assert(err, IsNil) var actualData bytes.Buffer @@ -175,7 +175,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) c.Assert(donut.CreateBucket("foo"), IsNil) writer, err := donut.GetObjectWriter("foo", "obj1") @@ -190,7 +190,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { // c.Skip("not complete") - reader, err := donut.GetObject("foo", "obj1") + reader, err := donut.GetObjectReader("foo", "obj1") c.Assert(err, IsNil) var readerBuffer1 bytes.Buffer _, err = io.Copy(&readerBuffer1, reader) @@ -198,7 +198,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) { // c.Skip("Not Implemented") c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one")) - reader, err = donut.GetObject("foo", "obj2") + reader, err = donut.GetObjectReader("foo", "obj2") c.Assert(err, IsNil) var readerBuffer2 bytes.Buffer _, err = io.Copy(&readerBuffer2, reader) @@ -215,7 +215,7 @@ func (s *MySuite) TestSysPrefixShouldFail(c *C) { root, err := ioutil.TempDir(os.TempDir(), "donut-") c.Assert(err, IsNil) defer os.RemoveAll(root) - donut := NewDonutDriver(root) + donut := NewDonut(root) c.Assert(donut.CreateBucket("foo"), IsNil) writer, err := donut.GetObjectWriter("foo", "obj1") diff --git a/pkg/storage/donut/donutdriver.go b/pkg/storage/donut/donutdriver.go deleted file mode 100644 index f06555a75..000000000 --- a/pkg/storage/donut/donutdriver.go +++ /dev/null @@ -1,155 +0,0 @@ -package donut - -import ( - "errors" - "io" - "sort" - "strconv" - "strings" -) - -type donutDriver struct { - buckets map[string]Bucket - nodes map[string]Node -} - -// NewDonutDriver - instantiate new donut driver -func NewDonutDriver(root string) Donut { - nodes := make(map[string]Node) - nodes["localhost"] = localDirectoryNode{root: root} - driver := donutDriver{ - buckets: make(map[string]Bucket), - nodes: nodes, - } - return driver -} - -func (driver donutDriver) CreateBucket(bucketName string) error { - if _, ok := driver.buckets[bucketName]; ok == false { - bucketName = strings.TrimSpace(bucketName) - if bucketName == "" { - return errors.New("Cannot create bucket with no name") - } - // assign nodes - // TODO assign other nodes - nodes := make([]string, 16) - for i := 0; i < 16; i++ { - nodes[i] = "localhost" - if node, ok := driver.nodes["localhost"]; ok { - node.CreateBucket(bucketName + ":0:" + strconv.Itoa(i)) - } - } - bucket := bucketDriver{ - nodes: nodes, - } - driver.buckets[bucketName] = bucket - return nil - } - return errors.New("Bucket exists") -} - -func (driver donutDriver) ListBuckets() ([]string, error) { - var buckets []string - for bucket := range driver.buckets { - buckets = append(buckets, bucket) - } - sort.Strings(buckets) - return buckets, nil -} - -func (driver donutDriver) GetObjectWriter(bucketName, objectName string) (ObjectWriter, error) { - if bucket, ok := driver.buckets[bucketName]; ok == true { - writers := make([]Writer, 16) - nodes, err := bucket.GetNodes() - if err != nil { - return nil, err - } - for i, nodeID := range nodes { - if node, ok := driver.nodes[nodeID]; ok == true { - writer, err := node.GetWriter(bucketName+":0:"+strconv.Itoa(i), objectName) - if err != nil { - for _, writerToClose := range writers { - if writerToClose != nil { - writerToClose.CloseWithError(err) - } - } - return nil, err - } - writers[i] = writer - } - } - return newErasureWriter(writers), nil - } - return nil, errors.New("Bucket not found") -} - -func (driver donutDriver) GetObject(bucketName, objectName string) (io.ReadCloser, error) { - r, w := io.Pipe() - if bucket, ok := driver.buckets[bucketName]; ok == true { - readers := make([]io.ReadCloser, 16) - nodes, err := bucket.GetNodes() - if err != nil { - return nil, err - } - var metadata map[string]string - for i, nodeID := range nodes { - if node, ok := driver.nodes[nodeID]; ok == true { - bucketID := bucketName + ":0:" + strconv.Itoa(i) - reader, err := node.GetReader(bucketID, objectName) - if err != nil { - return nil, err - } - readers[i] = reader - if metadata == nil { - metadata, err = node.GetDonutMetadata(bucketID, objectName) - if err != nil { - return nil, err - } - } - } - } - go erasureReader(readers, metadata, w) - return r, nil - } - return nil, errors.New("Bucket not found") -} - -// GetObjectMetadata returns metadata for a given object in a bucket -func (driver donutDriver) GetObjectMetadata(bucketName, object string) (map[string]string, error) { - if bucket, ok := driver.buckets[bucketName]; ok { - nodes, err := bucket.GetNodes() - if err != nil { - return nil, err - } - if node, ok := driver.nodes[nodes[0]]; ok { - bucketID := bucketName + ":0:0" - metadata, err := node.GetMetadata(bucketID, object) - if err != nil { - return nil, err - } - donutMetadata, err := node.GetDonutMetadata(bucketID, object) - if err != nil { - return nil, err - } - metadata["sys.created"] = donutMetadata["created"] - metadata["sys.md5"] = donutMetadata["md5"] - metadata["sys.size"] = donutMetadata["size"] - return metadata, nil - } - return nil, errors.New("Cannot connect to node: " + nodes[0]) - } - return nil, errors.New("Bucket not found") -} - -func (driver donutDriver) ListObjects(bucketName string) ([]string, error) { - if bucket, ok := driver.buckets[bucketName]; ok { - nodes, err := bucket.GetNodes() - if err != nil { - return nil, err - } - if node, ok := driver.nodes[nodes[0]]; ok { - return node.ListObjects(bucketName + ":0:0") - } - } - return nil, errors.New("Bucket not found") -} diff --git a/pkg/storage/donut/erasure.go b/pkg/storage/donut/erasure.go index ef89ed77b..be56968ea 100644 --- a/pkg/storage/donut/erasure.go +++ b/pkg/storage/donut/erasure.go @@ -14,6 +14,7 @@ import ( "strings" ) +// erasureReader - returns aligned streaming reads over a PipeWriter func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, writer *io.PipeWriter) { // TODO handle errors totalChunks, _ := strconv.Atoi(donutMetadata["chunkCount"]) @@ -70,6 +71,7 @@ type erasureWriter struct { isClosed <-chan bool } +// newErasureWriter - get a new writer func newErasureWriter(writers []Writer) ObjectWriter { r, w := io.Pipe() isClosed := make(chan bool) diff --git a/pkg/storage/donut/interfaces.go b/pkg/storage/donut/interfaces.go new file mode 100644 index 000000000..12e9e1da2 --- /dev/null +++ b/pkg/storage/donut/interfaces.go @@ -0,0 +1,50 @@ +package donut + +import ( + "io" +) + +// Collection of Donut specification interfaces + +// Donut interface +type Donut interface { + CreateBucket(bucket string) error + GetObjectReader(bucket, object string) (io.ReadCloser, error) + GetObjectWriter(bucket, object string) (ObjectWriter, error) + GetObjectMetadata(bucket, object string) (map[string]string, error) + ListBuckets() ([]string, error) + ListObjects(bucket string) ([]string, error) +} + +// Bucket interface +type Bucket interface { + GetNodes() ([]string, error) +} + +// Node interface +type Node interface { + CreateBucket(bucket string) error + GetBuckets() ([]string, error) + GetDonutMetadata(bucket, object string) (map[string]string, error) + GetMetadata(bucket, object string) (map[string]string, error) + GetReader(bucket, object string) (io.ReadCloser, error) + GetWriter(bucket, object string) (Writer, error) + ListObjects(bucket string) ([]string, error) +} + +// ObjectWriter interface +type ObjectWriter interface { + Close() error + CloseWithError(error) error + GetMetadata() (map[string]string, error) + SetMetadata(map[string]string) error + Write([]byte) (int, error) +} + +// Writer interface +type Writer interface { + ObjectWriter + + GetDonutMetadata() (map[string]string, error) + SetDonutMetadata(map[string]string) error +} diff --git a/pkg/storage/donut/local.go b/pkg/storage/donut/node_local.go similarity index 98% rename from pkg/storage/donut/local.go rename to pkg/storage/donut/node_local.go index c1fcff915..6a9ed0708 100644 --- a/pkg/storage/donut/local.go +++ b/pkg/storage/donut/node_local.go @@ -31,7 +31,7 @@ func (d localDirectoryNode) GetWriter(bucket, object string) (Writer, error) { if err != nil { return nil, err } - return newDonutFileWriter(objectPath) + return newDonutObjectWriter(objectPath) } func (d localDirectoryNode) GetReader(bucket, object string) (io.ReadCloser, error) { diff --git a/pkg/storage/donut/donutwriter.go b/pkg/storage/donut/object_writer.go similarity index 70% rename from pkg/storage/donut/donutwriter.go rename to pkg/storage/donut/object_writer.go index ebf24befe..858a50ffb 100644 --- a/pkg/storage/donut/donutwriter.go +++ b/pkg/storage/donut/object_writer.go @@ -7,12 +7,12 @@ import ( "path" ) -func newDonutFileWriter(objectDir string) (Writer, error) { +func newDonutObjectWriter(objectDir string) (Writer, error) { dataFile, err := os.OpenFile(path.Join(objectDir, "data"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return nil, err } - return donutFileWriter{ + return donutObjectWriter{ root: objectDir, file: dataFile, metadata: make(map[string]string), @@ -20,7 +20,7 @@ func newDonutFileWriter(objectDir string) (Writer, error) { }, nil } -type donutFileWriter struct { +type donutObjectWriter struct { root string file *os.File metadata map[string]string @@ -28,11 +28,11 @@ type donutFileWriter struct { err error } -func (d donutFileWriter) Write(data []byte) (int, error) { +func (d donutObjectWriter) Write(data []byte) (int, error) { return d.file.Write(data) } -func (d donutFileWriter) Close() error { +func (d donutObjectWriter) Close() error { if d.err != nil { return d.err } @@ -44,14 +44,14 @@ func (d donutFileWriter) Close() error { return d.file.Close() } -func (d donutFileWriter) CloseWithError(err error) error { +func (d donutObjectWriter) CloseWithError(err error) error { if d.err != nil { d.err = err } return d.Close() } -func (d donutFileWriter) SetMetadata(metadata map[string]string) error { +func (d donutObjectWriter) SetMetadata(metadata map[string]string) error { for k := range d.metadata { delete(d.metadata, k) } @@ -61,7 +61,7 @@ func (d donutFileWriter) SetMetadata(metadata map[string]string) error { return nil } -func (d donutFileWriter) GetMetadata() (map[string]string, error) { +func (d donutObjectWriter) GetMetadata() (map[string]string, error) { metadata := make(map[string]string) for k, v := range d.metadata { metadata[k] = v @@ -69,7 +69,7 @@ func (d donutFileWriter) GetMetadata() (map[string]string, error) { return metadata, nil } -func (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error { +func (d donutObjectWriter) SetDonutMetadata(metadata map[string]string) error { for k := range d.donutMetadata { delete(d.donutMetadata, k) } @@ -79,7 +79,7 @@ func (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error { return nil } -func (d donutFileWriter) GetDonutMetadata() (map[string]string, error) { +func (d donutObjectWriter) GetDonutMetadata() (map[string]string, error) { donutMetadata := make(map[string]string) for k, v := range d.donutMetadata { donutMetadata[k] = v diff --git a/pkg/storage/donut/objectwriter.go b/pkg/storage/donut/objectwriter.go deleted file mode 100644 index cfba213f7..000000000 --- a/pkg/storage/donut/objectwriter.go +++ /dev/null @@ -1,39 +0,0 @@ -package donut - -import ( - "errors" -) - -type objectWriter struct { - metadata map[string]string -} - -func (obj objectWriter) Write(data []byte) (length int, err error) { - return 11, nil -} - -func (obj objectWriter) Close() error { - return nil -} - -func (obj objectWriter) CloseWithError(err error) error { - return errors.New("Not Implemented") -} - -func (obj objectWriter) SetMetadata(metadata map[string]string) error { - for k := range obj.metadata { - delete(obj.metadata, k) - } - for k, v := range metadata { - obj.metadata[k] = v - } - return nil -} - -func (obj objectWriter) GetMetadata() (map[string]string, error) { - ret := make(map[string]string) - for k, v := range obj.metadata { - ret[k] = v - } - return ret, nil -} diff --git a/pkg/storage/file/file_bucket.go b/pkg/storage/file/file_bucket.go deleted file mode 100644 index 5ac2b1ef0..000000000 --- a/pkg/storage/file/file_bucket.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package file - -import ( - "os" - "path" - "sort" - "strings" - - "io/ioutil" - "path/filepath" - - mstorage "github.com/minio-io/minio/pkg/storage" -) - -/// Bucket Operations - -// ListBuckets - Get service -func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) { - files, err := ioutil.ReadDir(storage.root) - if err != nil { - return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err) - } - - var metadataList []mstorage.BucketMetadata - for _, file := range files { - // Skip policy files - if strings.HasSuffix(file.Name(), "_policy.json") { - continue - } - if !file.IsDir() { - return []mstorage.BucketMetadata{}, mstorage.BackendCorrupted{Path: storage.root} - } - metadata := mstorage.BucketMetadata{ - Name: file.Name(), - Created: file.ModTime(), // TODO - provide real created time - } - metadataList = append(metadataList, metadata) - } - return metadataList, nil -} - -// CreateBucket - PUT Bucket -func (storage *Storage) CreateBucket(bucket string) error { - storage.lock.Lock() - defer storage.lock.Unlock() - - // verify bucket path legal - if mstorage.IsValidBucket(bucket) == false { - return mstorage.BucketNameInvalid{Bucket: bucket} - } - - // get bucket path - bucketDir := path.Join(storage.root, bucket) - - // check if bucket exists - if _, err := os.Stat(bucketDir); err == nil { - return mstorage.BucketExists{ - Bucket: bucket, - } - } - - // make bucket - err := os.Mkdir(bucketDir, 0700) - if err != nil { - return mstorage.EmbedError(bucket, "", err) - } - return nil -} - -// ListObjects - GET bucket (list objects) -func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) { - p := bucketDir{} - p.files = make(map[string]os.FileInfo) - - if mstorage.IsValidBucket(bucket) == false { - return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNameInvalid{Bucket: bucket} - } - if resources.Prefix != "" && mstorage.IsValidObject(resources.Prefix) == false { - return []mstorage.ObjectMetadata{}, resources, mstorage.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix} - } - - rootPrefix := path.Join(storage.root, bucket) - // check bucket exists - if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { - return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNotFound{Bucket: bucket} - } - - p.root = rootPrefix - err := filepath.Walk(rootPrefix, p.getAllFiles) - if err != nil { - return []mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) - } - - var metadataList []mstorage.ObjectMetadata - var metadata mstorage.ObjectMetadata - - // Populate filtering mode - resources.Mode = mstorage.GetMode(resources) - - for name, file := range p.files { - if len(metadataList) >= resources.Maxkeys { - resources.IsTruncated = true - goto ret - } - metadata, resources, err = storage.filter(bucket, name, file, resources) - if err != nil { - return []mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err) - } - if metadata.Bucket != "" { - metadataList = append(metadataList, metadata) - } - } - -ret: - sort.Sort(byObjectKey(metadataList)) - return metadataList, resources, nil -} diff --git a/pkg/storage/file/file_policy.go b/pkg/storage/file/file_policy.go deleted file mode 100644 index b762e52cc..000000000 --- a/pkg/storage/file/file_policy.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package file - -import ( - "os" - "path" - - "encoding/json" - mstorage "github.com/minio-io/minio/pkg/storage" -) - -// GetBucketPolicy - GET bucket policy -func (storage *Storage) GetBucketPolicy(bucket string) (mstorage.BucketPolicy, error) { - storage.lock.Lock() - defer storage.lock.Unlock() - - var p mstorage.BucketPolicy - // verify bucket path legal - if mstorage.IsValidBucket(bucket) == false { - return mstorage.BucketPolicy{}, mstorage.BucketNameInvalid{Bucket: bucket} - } - - // get bucket path - bucketDir := path.Join(storage.root, bucket) - // check if bucket exists - if _, err := os.Stat(bucketDir); err != nil { - return mstorage.BucketPolicy{}, mstorage.BucketNotFound{Bucket: bucket} - } - - // get policy path - bucketPolicy := path.Join(storage.root, bucket+"_policy.json") - filestat, err := os.Stat(bucketPolicy) - - if os.IsNotExist(err) { - return mstorage.BucketPolicy{}, mstorage.BucketPolicyNotFound{Bucket: bucket} - } - - if filestat.IsDir() { - return mstorage.BucketPolicy{}, mstorage.BackendCorrupted{Path: bucketPolicy} - } - - file, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666) - defer file.Close() - if err != nil { - return mstorage.BucketPolicy{}, mstorage.EmbedError(bucket, "", err) - } - encoder := json.NewDecoder(file) - err = encoder.Decode(&p) - if err != nil { - return mstorage.BucketPolicy{}, mstorage.EmbedError(bucket, "", err) - } - - return p, nil - -} - -// CreateBucketPolicy - PUT bucket policy -func (storage *Storage) CreateBucketPolicy(bucket string, p mstorage.BucketPolicy) error { - storage.lock.Lock() - defer storage.lock.Unlock() - - // verify bucket path legal - if mstorage.IsValidBucket(bucket) == false { - return mstorage.BucketNameInvalid{Bucket: bucket} - } - - // get bucket path - bucketDir := path.Join(storage.root, bucket) - // check if bucket exists - if _, err := os.Stat(bucketDir); err != nil { - return mstorage.BucketNotFound{ - Bucket: bucket, - } - } - - // get policy path - bucketPolicy := path.Join(storage.root, bucket+"_policy.json") - filestat, ret := os.Stat(bucketPolicy) - if !os.IsNotExist(ret) { - if filestat.IsDir() { - return mstorage.BackendCorrupted{Path: bucketPolicy} - } - } - - file, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600) - defer file.Close() - if err != nil { - return mstorage.EmbedError(bucket, "", err) - } - encoder := json.NewEncoder(file) - err = encoder.Encode(p) - if err != nil { - return mstorage.EmbedError(bucket, "", err) - } - return nil -}