From 7fd1cc073c9066e137d6dc2ce772a54a4f2671a5 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 8 Apr 2015 17:13:25 -0700 Subject: [PATCH] Add everything back into one project hood, still missing iodine --- Godeps/Godeps.json | 12 +- .../minio-io/donut/Godeps/Godeps.json | 30 -- .../github.com/minio-io/donut/Godeps/Readme | 5 - .../src/github.com/minio-io/donut/Makefile | 69 --- .../minio-io/donut/buildscripts/checkdeps.sh | 201 --------- .../donut/buildscripts/git-commit-id.sh | 18 - .../github.com/minio-io/erasure/.gitignore | 6 - .../src/github.com/minio-io/iodine/iodine.go | 19 +- .../github.com/minio-io/iodine/iodine_test.go | 8 +- Makefile | 1 - pkg/erasure/.gitignore | 1 + .../minio-io => pkg}/erasure/BUILDDEPS.md | 0 .../minio-io => pkg}/erasure/CONTRIBUTING.md | 0 .../minio-io => pkg}/erasure/LICENSE.INTEL | 0 .../minio-io => pkg}/erasure/LICENSE.MINIO | 0 .../minio-io => pkg}/erasure/README.md | 0 .../erasure/RELEASE-NOTES.INTEL | 0 .../erasure/RELEASE-NOTES.MINIO | 0 .../minio-io => pkg}/erasure/cauchy_test.go | 0 .../minio-io => pkg}/erasure/ctypes.go | 0 .../minio-io => pkg}/erasure/doc.go | 0 .../erasure/docs/isa-l_open_src_2.10.pdf | 0 .../erasure/docs/isa-l_open_src_2.13.pdf | Bin .../minio-io => pkg}/erasure/ec_base.c | 0 .../minio-io => pkg}/erasure/ec_base.h | 0 .../minio-io => pkg}/erasure/ec_code.h | 0 .../erasure/ec_highlevel_func.c | 0 .../minio-io => pkg}/erasure/ec_isal-l.h | 0 .../erasure/ec_minio_common.h | 0 .../erasure/ec_minio_decode.c | 0 .../erasure/ec_minio_encode.c | 0 .../erasure/ec_multibinary.asm | 0 .../minio-io => pkg}/erasure/ec_reg_sizes.asm | 0 .../minio-io => pkg}/erasure/ec_types.h | 0 .../erasure/erasure_decode.go | 0 .../erasure/erasure_encode.go | 0 .../erasure/erasure_yasm_darwin.go | 0 .../erasure/erasure_yasm_linux.go | 0 .../erasure/erasure_yasm_windows.go | 0 .../erasure/gf_2vect_dot_prod_avx.asm | 0 .../erasure/gf_2vect_dot_prod_avx2.asm | 0 .../erasure/gf_2vect_dot_prod_sse.asm | 0 .../erasure/gf_2vect_mad_avx.asm | 0 .../erasure/gf_2vect_mad_avx2.asm | 0 .../erasure/gf_2vect_mad_sse.asm | 0 .../erasure/gf_3vect_dot_prod_avx.asm | 0 .../erasure/gf_3vect_dot_prod_avx2.asm | 0 .../erasure/gf_3vect_dot_prod_sse.asm | 0 .../erasure/gf_3vect_mad_avx.asm | 0 .../erasure/gf_3vect_mad_avx2.asm | 0 .../erasure/gf_3vect_mad_sse.asm | 0 .../erasure/gf_4vect_dot_prod_avx.asm | 0 .../erasure/gf_4vect_dot_prod_avx2.asm | 0 .../erasure/gf_4vect_dot_prod_sse.asm | 0 .../erasure/gf_4vect_mad_avx.asm | 0 .../erasure/gf_4vect_mad_avx2.asm | 0 .../erasure/gf_4vect_mad_sse.asm | 0 .../erasure/gf_5vect_dot_prod_avx.asm | 0 .../erasure/gf_5vect_dot_prod_avx2.asm | 0 .../erasure/gf_5vect_dot_prod_sse.asm | 0 .../erasure/gf_5vect_mad_avx.asm | 0 .../erasure/gf_5vect_mad_avx2.asm | 0 .../erasure/gf_5vect_mad_sse.asm | 0 .../erasure/gf_6vect_dot_prod_avx.asm | 0 .../erasure/gf_6vect_dot_prod_avx2.asm | 0 .../erasure/gf_6vect_dot_prod_sse.asm | 0 .../erasure/gf_6vect_mad_avx.asm | 0 .../erasure/gf_6vect_mad_avx2.asm | 0 .../erasure/gf_6vect_mad_sse.asm | 0 .../erasure/gf_vect_dot_prod_avx.asm | 0 .../erasure/gf_vect_dot_prod_avx2.asm | 0 .../erasure/gf_vect_dot_prod_sse.asm | 0 .../erasure/gf_vect_mad_avx.asm | 0 .../erasure/gf_vect_mad_avx2.asm | 0 .../erasure/gf_vect_mad_sse.asm | 0 .../minio-io => pkg}/erasure/gf_vect_mul.h | 0 .../erasure/gf_vect_mul_avx.asm | 0 .../erasure/gf_vect_mul_sse.asm | 0 .../minio-io => pkg}/erasure/stdint.go | 0 .../erasure/vandermonde_test.go | 0 .../minio-io => pkg/storage}/donut/.gitignore | 0 .../minio-io => pkg/storage}/donut/LICENSE | 0 .../minio-io => pkg/storage}/donut/README.md | 0 .../minio-io => pkg/storage}/donut/donut.go | 0 .../storage}/donut/donut_bucket.go | 0 .../storage}/donut/donut_bucket_internal.go | 0 .../storage}/donut/donut_common.go | 0 .../storage}/donut/donut_disk.go | 0 .../storage}/donut/donut_disk_internal.go | 0 .../storage}/donut/donut_encoder.go | 2 +- .../donut/donut_internal_interfaces.go | 0 .../storage}/donut/donut_node.go | 0 .../storage}/donut/donut_object.go | 0 .../storage}/donut/donut_public_interfaces.go | 0 .../storage}/donut/donut_rebalance.go | 0 .../storage}/donut/donut_test.go | 0 .../storage}/donut/management.go | 0 .../storage}/donut/objectstorage.go | 0 .../storage}/donut/objectstorage_internal.go | 0 pkg/storage/drivers | 1 - pkg/storage/drivers/LICENSE | 202 +++++++++ pkg/storage/drivers/README.md | 2 + pkg/storage/drivers/api_testsuite.go | 423 ++++++++++++++++++ pkg/storage/drivers/bucket_policy.go | 199 ++++++++ pkg/storage/drivers/bucket_policy_compat.go | 52 +++ pkg/storage/drivers/date.go | 78 ++++ pkg/storage/drivers/donut/donut.go | 333 ++++++++++++++ pkg/storage/drivers/donut/donut_test.go | 53 +++ pkg/storage/drivers/driver.go | 151 +++++++ pkg/storage/drivers/errors.go | 163 +++++++ pkg/storage/drivers/file/file.go | 41 ++ pkg/storage/drivers/file/file_bucket.go | 146 ++++++ pkg/storage/drivers/file/file_common.go | 89 ++++ pkg/storage/drivers/file/file_filter.go | 100 +++++ pkg/storage/drivers/file/file_object.go | 283 ++++++++++++ pkg/storage/drivers/file/file_policy.go | 112 +++++ pkg/storage/drivers/file/file_test.go | 52 +++ pkg/storage/drivers/memory/memory.go | 287 ++++++++++++ pkg/storage/drivers/memory/memory_test.go | 38 ++ pkg/storage/drivers/mocks/Driver.go | 135 ++++++ 120 files changed, 2963 insertions(+), 349 deletions(-) delete mode 100644 Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Godeps.json delete mode 100644 Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Readme delete mode 100644 Godeps/_workspace/src/github.com/minio-io/donut/Makefile delete mode 100644 Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/checkdeps.sh delete mode 100644 Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/git-commit-id.sh delete mode 100644 Godeps/_workspace/src/github.com/minio-io/erasure/.gitignore create mode 100644 pkg/erasure/.gitignore rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/BUILDDEPS.md (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/CONTRIBUTING.md (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/LICENSE.INTEL (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/LICENSE.MINIO (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/README.md (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/RELEASE-NOTES.INTEL (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/RELEASE-NOTES.MINIO (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/cauchy_test.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ctypes.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/doc.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/docs/isa-l_open_src_2.10.pdf (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/docs/isa-l_open_src_2.13.pdf (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_base.c (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_base.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_code.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_highlevel_func.c (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_isal-l.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_minio_common.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_minio_decode.c (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_minio_encode.c (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_multibinary.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_reg_sizes.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/ec_types.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/erasure_decode.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/erasure_encode.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/erasure_yasm_darwin.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/erasure_yasm_linux.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/erasure_yasm_windows.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_2vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_3vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_4vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_5vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_6vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_dot_prod_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_dot_prod_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_dot_prod_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mad_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mad_avx2.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mad_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mul.h (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mul_avx.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/gf_vect_mul_sse.asm (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/stdint.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg}/erasure/vandermonde_test.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/.gitignore (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/LICENSE (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/README.md (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_bucket.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_bucket_internal.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_common.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_disk.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_disk_internal.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_encoder.go (98%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_internal_interfaces.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_node.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_object.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_public_interfaces.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_rebalance.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/donut_test.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/management.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/objectstorage.go (100%) rename {Godeps/_workspace/src/github.com/minio-io => pkg/storage}/donut/objectstorage_internal.go (100%) delete mode 160000 pkg/storage/drivers create mode 100644 pkg/storage/drivers/LICENSE create mode 100644 pkg/storage/drivers/README.md create mode 100644 pkg/storage/drivers/api_testsuite.go create mode 100644 pkg/storage/drivers/bucket_policy.go create mode 100644 pkg/storage/drivers/bucket_policy_compat.go create mode 100644 pkg/storage/drivers/date.go create mode 100644 pkg/storage/drivers/donut/donut.go create mode 100644 pkg/storage/drivers/donut/donut_test.go create mode 100644 pkg/storage/drivers/driver.go create mode 100644 pkg/storage/drivers/errors.go create mode 100644 pkg/storage/drivers/file/file.go create mode 100644 pkg/storage/drivers/file/file_bucket.go create mode 100644 pkg/storage/drivers/file/file_common.go create mode 100644 pkg/storage/drivers/file/file_filter.go create mode 100644 pkg/storage/drivers/file/file_object.go create mode 100644 pkg/storage/drivers/file/file_policy.go create mode 100644 pkg/storage/drivers/file/file_test.go create mode 100644 pkg/storage/drivers/memory/memory.go create mode 100644 pkg/storage/drivers/memory/memory_test.go create mode 100644 pkg/storage/drivers/mocks/Driver.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 4279baa45..a8865410c 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/minio-io/minio", - "GoVersion": "go1.4.2", + "GoVersion": "go1.4", "Packages": [ "./..." ], @@ -22,17 +22,9 @@ "Comment": "1.2.0-102-gecb385c", "Rev": "ecb385c3fefd53678e3b6beba6a608fb7c8dfac1" }, - { - "ImportPath": "github.com/minio-io/donut", - "Rev": "107c0aff49fb961c2c7bfd1e5d1c26c958accbdc" - }, - { - "ImportPath": "github.com/minio-io/erasure", - "Rev": "8a72b14991a6835b4d30403e7cb201f373b7cb3a" - }, { "ImportPath": "github.com/minio-io/iodine", - "Rev": "55cc4d4256c68fbd6f0775f1a25e37e6a2f6457e" + "Rev": "9a63d02ce3934e159d00732f15e096e4f86a6dbb" }, { "ImportPath": "github.com/stretchr/objx", diff --git a/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Godeps.json deleted file mode 100644 index 4df40bb72..000000000 --- a/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Godeps.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "ImportPath": "github.com/minio-io/donut", - "GoVersion": "go1.4", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/minio-io/check", - "Rev": "bc4e66da8cd7ff58a4b9b84301f906352b8f2c94" - }, - { - "ImportPath": "github.com/minio-io/cli", - "Comment": "1.2.0-102-gecb385c", - "Rev": "ecb385c3fefd53678e3b6beba6a608fb7c8dfac1" - }, - { - "ImportPath": "github.com/minio-io/erasure", - "Rev": "8a72b14991a6835b4d30403e7cb201f373b7cb3a" - }, - { - "ImportPath": "github.com/minio-io/iodine", - "Rev": "55cc4d4256c68fbd6f0775f1a25e37e6a2f6457e" - }, - { - "ImportPath": "github.com/minio-io/minio/pkg/utils/split", - "Rev": "936520e6e0fc5dd4ce8d04504ee991084555e57a" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Readme b/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Readme deleted file mode 100644 index 4cdaa53d5..000000000 --- a/Godeps/_workspace/src/github.com/minio-io/donut/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/minio-io/donut/Makefile b/Godeps/_workspace/src/github.com/minio-io/donut/Makefile deleted file mode 100644 index 13c10ad12..000000000 --- a/Godeps/_workspace/src/github.com/minio-io/donut/Makefile +++ /dev/null @@ -1,69 +0,0 @@ -MINIOPATH=$(GOPATH)/src/github.com/minio-io/donut - -all: getdeps install - -checkdeps: - @echo "Checking deps:" - @(env bash $(PWD)/buildscripts/checkdeps.sh) - -checkgopath: - @echo "Checking if project is at ${MINIOPATH}" - @if [ ! -d ${MINIOPATH} ]; then echo "Project not found in $GOPATH, please follow instructions provided at https://github.com/Minio-io/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi - -getdeps: checkdeps checkgopath - @go get github.com/minio-io/godep && echo "Installed godep:" - @go get github.com/golang/lint/golint && echo "Installed golint:" - @go get golang.org/x/tools/cmd/vet && echo "Installed vet:" - @go get github.com/fzipp/gocyclo && echo "Installed gocyclo:" - -verifiers: getdeps vet fmt lint cyclo - -vet: - @echo "Running $@:" - @go vet ./... -fmt: - @echo "Running $@:" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" -lint: - @echo "Running $@:" - @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" - -cyclo: - @echo "Running $@:" - @test -z "$$(gocyclo -over 15 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" - -pre-build: - @echo "Running pre-build:" - @(env bash $(PWD)/buildscripts/git-commit-id.sh) - -build-all: getdeps verifiers - @echo "Building Libraries:" - @godep go generate github.com/minio-io/erasure - @godep go generate ./... - @godep go build -a ./... # have no stale packages - -test-all: pre-build build-all - @echo "Running Test Suites:" - @godep go test -race ./... - -save: restore - @godep save ./... - -restore: - @godep restore - -env: - @godep go env - -docs-deploy: - @mkdocs gh-deploy --clean - -install: test-all - @echo "Installing donut tool:" - @godep go install -a github.com/minio-io/donut/cmd/donut - @mkdir -p $(HOME)/.minio/donut - -clean: - @rm -fv cover.out - @rm -fv build-constants.go diff --git a/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/checkdeps.sh b/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/checkdeps.sh deleted file mode 100644 index 3d78c0449..000000000 --- a/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/checkdeps.sh +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env bash -# -# Minio Commander, (C) 2015 Minio, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -_init() { - ## Minimum required versions for build dependencies - GCC_VERSION="4.0" - CLANG_VERSION="3.5" - YASM_VERSION="1.2.0" - GIT_VERSION="1.0" - GO_VERSION="1.4" - OSX_VERSION="10.8" - UNAME=$(uname -sm) - - ## Check all dependencies are present - MISSING="" -} - -### -# -# Takes two arguments -# arg1: version number in `x.x.x` format -# arg2: version number in `x.x.x` format -# -# example: check_version "$version1" "$version2" -# -# returns: -# 0 - Installed version is equal to required -# 1 - Installed version is greater than required -# 2 - Installed version is lesser than required -# 3 - If args have length zero -# -#### -check_version () { - ## validate args - [[ -z "$1" ]] && return 3 - [[ -z "$2" ]] && return 3 - - if [[ $1 == $2 ]]; then - return 0 - fi - - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]]; then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})); then - - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})); then - ## Installed version is lesser than required - Bad condition - return 2 - fi - done - return 0 -} - -check_golang_env() { - echo ${GOROOT:?} 2>&1 >/dev/null - if [ $? -eq 1 ]; then - echo "ERROR" - echo "GOROOT environment variable missing, please refer to Go installation document" - echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13" - exit 1 - fi - - echo ${GOPATH:?} 2>&1 >/dev/null - if [ $? -eq 1 ]; then - echo "ERROR" - echo "GOPATH environment variable missing, please refer to Go installation document" - echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13" - exit 1 - fi -} - -is_supported_os() { - case ${UNAME%% *} in - "Linux") - os="linux" - ;; - "Darwin") - osx_host_version=$(env sw_vers -productVersion) - check_version "${osx_host_version}" "${OSX_VERSION}" - [[ $? -ge 2 ]] && die "Minimum OSX version supported is ${OSX_VERSION}" - ;; - "*") - echo "Exiting.. unsupported operating system found" - exit 1; - esac -} - -is_supported_arch() { - local supported - case ${UNAME##* } in - "x86_64") - supported=1 - ;; - *) - supported=0 - ;; - esac - if [ $supported -eq 0 ]; then - echo "Invalid arch: ${UNAME} not supported, please use x86_64/amd64" - exit 1; - fi -} - -check_deps() { - check_version "$(env go version 2>/dev/null | sed 's/^.* go\([0-9.]*\).*$/\1/')" "${GO_VERSION}" - if [ $? -ge 2 ]; then - MISSING="${MISSING} golang(1.4)" - fi - - check_version "$(env git --version 2>/dev/null | sed -e 's/^.* \([0-9.\].*\).*$/\1/' -e 's/^\([0-9.\]*\).*/\1/g')" "${GIT_VERSION}" - if [ $? -ge 2 ]; then - MISSING="${MISSING} git" - fi - - case ${UNAME%% *} in - "Linux") - check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}" - if [ $? -ge 2 ]; then - MISSING="${MISSING} build-essential" - fi - ;; - "Darwin") - check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${CLANG_VERSION}" - if [ $? -ge 2 ]; then - MISSING="${MISSING} xcode-cli" - fi - ;; - "*") - ;; - esac - - check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}" - if [ $? -ge 2 ]; then - MISSING="${MISSING} yasm(1.2.0)" - fi - - env mkdocs help >/dev/null 2>&1 - if [ $? -ne 0 ]; then - MISSING="${MISSING} mkdocs" - fi -} - -main() { - echo -n "Check for supported arch.. " - is_supported_arch - - echo -n "Check for supported os.. " - is_supported_os - - echo -n "Checking if proper environment variables are set.. " - check_golang_env - - echo "Done" - echo "Using GOPATH=${GOPATH} and GOROOT=${GOROOT}" - - echo -n "Checking dependencies for Minio.. " - check_deps - - ## If dependencies are missing, warn the user and abort - if [ "x${MISSING}" != "x" ]; then - echo "ERROR" - echo - echo "The following build tools are missing:" - echo - echo "** ${MISSING} **" - echo - echo "Please install them " - echo "${MISSING}" - echo - echo "Follow https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md for further instructions" - exit 1 - fi - echo "Done" -} - -_init && main "$@" diff --git a/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/git-commit-id.sh b/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/git-commit-id.sh deleted file mode 100644 index f0e597483..000000000 --- a/Godeps/_workspace/src/github.com/minio-io/donut/buildscripts/git-commit-id.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -CONST_FILE=${PWD}/cmd/donut/build-constants.go - -cat > $CONST_FILE < 12 { + err = errors.New("Invalid 0000-00-000 style DATE string: " + str) + return + } + if n, err = strconv.Atoi(str[8:10]); err != nil { + return + } + if n < 1 || n > 31 { + err = errors.New("Invalid 0000-00-000 style DATE string: " + str) + return + } + d.Year = int16(y) + d.Month = byte(m) + d.Day = byte(n) + return +} diff --git a/pkg/storage/drivers/donut/donut.go b/pkg/storage/drivers/donut/donut.go new file mode 100644 index 000000000..c3207c30b --- /dev/null +++ b/pkg/storage/drivers/donut/donut.go @@ -0,0 +1,333 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package donut + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "io" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + "io/ioutil" + + "github.com/minio-io/iodine" + "github.com/minio-io/minio/pkg/storage/donut" + "github.com/minio-io/minio/pkg/storage/drivers" + "github.com/minio-io/minio/pkg/utils/log" +) + +// donutDriver - creates a new single disk drivers driver using donut +type donutDriver struct { + donut donut.Donut +} + +const ( + blockSize = 10 * 1024 * 1024 +) + +// This is a dummy nodeDiskMap which is going to be deprecated soon +// once the Management API is standardized, this map is useful for now +// to show multi disk API correctness behavior. +// +// This should be obtained from donut configuration file +func createNodeDiskMap(p string) map[string][]string { + nodes := make(map[string][]string) + nodes["localhost"] = make([]string, 16) + for i := 0; i < len(nodes["localhost"]); i++ { + diskPath := path.Join(p, strconv.Itoa(i)) + if _, err := os.Stat(diskPath); err != nil { + if os.IsNotExist(err) { + os.MkdirAll(diskPath, 0700) + } + } + nodes["localhost"][i] = diskPath + } + return nodes +} + +// Start a single disk subsystem +func Start(path string) (chan<- string, <-chan error, drivers.Driver) { + ctrlChannel := make(chan string) + errorChannel := make(chan error) + errParams := map[string]string{"path": path} + + // Soon to be user configurable, when Management API + // is finished we remove "default" to something + // which is passed down from configuration + donut, err := donut.NewDonut("default", createNodeDiskMap(path)) + if err != nil { + err = iodine.New(err, errParams) + log.Error.Println(err) + } + + s := new(donutDriver) + s.donut = donut + + go start(ctrlChannel, errorChannel, s) + return ctrlChannel, errorChannel, s +} + +func start(ctrlChannel <-chan string, errorChannel chan<- error, s *donutDriver) { + close(errorChannel) +} + +// byBucketName is a type for sorting bucket metadata by bucket name +type byBucketName []drivers.BucketMetadata + +func (b byBucketName) Len() int { return len(b) } +func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +// ListBuckets returns a list of buckets +func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) { + buckets, err := d.donut.ListBuckets() + if err != nil { + return nil, err + } + for _, name := range buckets { + result := drivers.BucketMetadata{ + Name: name, + // TODO Add real created date + Created: time.Now(), + } + results = append(results, result) + } + sort.Sort(byBucketName(results)) + return results, nil +} + +// CreateBucket creates a new bucket +func (d donutDriver) CreateBucket(bucketName string) error { + if drivers.IsValidBucket(bucketName) && !strings.Contains(bucketName, ".") { + return d.donut.MakeBucket(bucketName) + } + return iodine.New(errors.New("Invalid bucket"), map[string]string{"bucket": bucketName}) +} + +// GetBucketMetadata retrieves an bucket's metadata +func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadata, error) { + if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { + return drivers.BucketMetadata{}, drivers.BucketNameInvalid{Bucket: bucketName} + } + metadata, err := d.donut.GetBucketMetadata(bucketName) + if err != nil { + return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucketName} + } + created, err := time.Parse(time.RFC3339Nano, metadata["created"]) + if err != nil { + return drivers.BucketMetadata{}, iodine.New(err, nil) + } + bucketMetadata := drivers.BucketMetadata{ + Name: metadata["name"], + Created: created, + } + return bucketMetadata, nil +} + +// CreateBucketPolicy sets a bucket's access policy +func (d donutDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error { + return iodine.New(errors.New("Not Implemented"), nil) +} + +// GetBucketPolicy returns a bucket's access policy +func (d donutDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + return drivers.BucketPolicy{}, iodine.New(errors.New("Not Implemented"), nil) +} + +// GetObject retrieves an object and writes it to a writer +func (d donutDriver) GetObject(target io.Writer, bucketName, objectName string) (int64, error) { + errParams := map[string]string{ + "bucketName": bucketName, + "objectName": objectName, + } + if bucketName == "" || strings.TrimSpace(bucketName) == "" { + return 0, iodine.New(errors.New("invalid argument"), errParams) + } + if objectName == "" || strings.TrimSpace(objectName) == "" { + return 0, iodine.New(errors.New("invalid argument"), errParams) + } + reader, size, err := d.donut.GetObject(bucketName, objectName) + if err != nil { + return 0, drivers.ObjectNotFound{ + Bucket: bucketName, + Object: objectName, + } + } + n, err := io.CopyN(target, reader, size) + return n, iodine.New(err, errParams) +} + +// GetPartialObject retrieves an object range and writes it to a writer +func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) { + // TODO more efficient get partial object with proper donut support + errParams := map[string]string{ + "bucketName": bucketName, + "objectName": objectName, + "start": strconv.FormatInt(start, 10), + "length": strconv.FormatInt(length, 10), + } + if bucketName == "" || strings.TrimSpace(bucketName) == "" { + return 0, iodine.New(errors.New("invalid argument"), errParams) + } + if objectName == "" || strings.TrimSpace(objectName) == "" { + return 0, iodine.New(errors.New("invalid argument"), errParams) + } + if start < 0 { + return 0, iodine.New(errors.New("invalid argument"), errParams) + } + reader, size, err := d.donut.GetObject(bucketName, objectName) + defer reader.Close() + if err != nil { + return 0, drivers.ObjectNotFound{ + Bucket: bucketName, + Object: objectName, + } + } + if start > size || (start+length-1) > size { + return 0, iodine.New(errors.New("invalid range"), errParams) + } + _, err = io.CopyN(ioutil.Discard, reader, start) + if err != nil { + return 0, iodine.New(err, errParams) + } + n, err := io.CopyN(w, reader, length) + if err != nil { + return 0, iodine.New(err, errParams) + } + return n, nil +} + +// GetObjectMetadata retrieves an object's metadata +func (d donutDriver) GetObjectMetadata(bucketName, objectName, prefixName string) (drivers.ObjectMetadata, error) { + errParams := map[string]string{ + "bucketName": bucketName, + "objectName": objectName, + "prefixName": prefixName, + } + metadata, err := d.donut.GetObjectMetadata(bucketName, objectName) + if err != nil { + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{ + Bucket: bucketName, + Object: objectName, + } + } + created, err := time.Parse(time.RFC3339Nano, metadata["created"]) + if err != nil { + return drivers.ObjectMetadata{}, iodine.New(err, errParams) + } + size, err := strconv.ParseInt(metadata["size"], 10, 64) + if err != nil { + return drivers.ObjectMetadata{}, iodine.New(err, errParams) + } + objectMetadata := drivers.ObjectMetadata{ + Bucket: bucketName, + Key: objectName, + + ContentType: metadata["contentType"], + Created: created, + Md5: metadata["md5"], + Size: size, + } + return objectMetadata, nil +} + +type byObjectKey []drivers.ObjectMetadata + +func (b byObjectKey) Len() int { return len(b) } +func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key } + +// ListObjects - returns list of objects +func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + errParams := map[string]string{ + "bucketName": bucketName, + } + actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName, + resources.Prefix, + resources.Marker, + resources.Delimiter, + resources.Maxkeys) + if err != nil { + return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) + } + resources.CommonPrefixes = commonPrefixes + resources.IsTruncated = isTruncated + + var results []drivers.ObjectMetadata + for _, objectName := range actualObjects { + objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName) + if err != nil { + return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) + } + t, err := time.Parse(time.RFC3339Nano, objectMetadata["created"]) + if err != nil { + return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil) + } + size, err := strconv.ParseInt(objectMetadata["size"], 10, 64) + if err != nil { + return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil) + } + metadata := drivers.ObjectMetadata{ + Key: objectName, + Created: t, + Size: size, + } + results = append(results, metadata) + } + sort.Sort(byObjectKey(results)) + return results, resources, nil +} + +// CreateObject creates a new object +func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, reader io.Reader) error { + errParams := map[string]string{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": contentType, + } + if bucketName == "" || strings.TrimSpace(bucketName) == "" { + return iodine.New(errors.New("invalid argument"), errParams) + } + if objectName == "" || strings.TrimSpace(objectName) == "" { + return iodine.New(errors.New("invalid argument"), errParams) + } + if strings.TrimSpace(contentType) == "" { + contentType = "application/octet-stream" + } + metadata := make(map[string]string) + metadata["contentType"] = strings.TrimSpace(contentType) + + if strings.TrimSpace(expectedMD5Sum) != "" { + expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) + if err != nil { + return iodine.New(err, nil) + } + expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) + } + + err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, ioutil.NopCloser(reader), metadata) + if err != nil { + return iodine.New(err, errParams) + } + return nil +} diff --git a/pkg/storage/drivers/donut/donut_test.go b/pkg/storage/drivers/donut/donut_test.go new file mode 100644 index 000000000..6dcb74341 --- /dev/null +++ b/pkg/storage/drivers/donut/donut_test.go @@ -0,0 +1,53 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package donut + +import ( + "io/ioutil" + "os" + "testing" + + . "github.com/minio-io/check" + "github.com/minio-io/minio/pkg/storage/drivers" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestAPISuite(c *C) { + // c.Skip("Not Implemented") + var storageList []string + create := func() drivers.Driver { + path, err := ioutil.TempDir(os.TempDir(), "minio-fs-") + c.Check(err, IsNil) + storageList = append(storageList, path) + _, _, store := Start(path) // TODO Make InMemory driver + return store + } + drivers.APITestSuite(c, create) + removeRoots(c, storageList) +} + +func removeRoots(c *C, roots []string) { + for _, root := range roots { + err := os.RemoveAll(root) + c.Check(err, IsNil) + } +} diff --git a/pkg/storage/drivers/driver.go b/pkg/storage/drivers/driver.go new file mode 100644 index 000000000..baecf4707 --- /dev/null +++ b/pkg/storage/drivers/driver.go @@ -0,0 +1,151 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package drivers + +import ( + "io" + "regexp" + "time" + "unicode/utf8" +) + +// Driver - generic API interface for various drivers - donut, file, memory +type Driver interface { + // Bucket Operations + ListBuckets() ([]BucketMetadata, error) + CreateBucket(bucket string) error + GetBucketMetadata(bucket string) (BucketMetadata, error) + CreateBucketPolicy(bucket string, p BucketPolicy) error + GetBucketPolicy(bucket string) (BucketPolicy, error) + + // Object Operations + GetObject(w io.Writer, bucket, object string) (int64, error) + GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) + GetObjectMetadata(bucket string, object string, prefix string) (ObjectMetadata, error) + ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) + CreateObject(bucket string, key string, contentType string, md5sum string, data io.Reader) error +} + +// BucketMetadata - name and create date +type BucketMetadata struct { + Name string + Created time.Time +} + +// ObjectMetadata - object key and its relevant metadata +type ObjectMetadata struct { + Bucket string + Key string + + ContentType string + Created time.Time + Md5 string + Size int64 +} + +// FilterMode type +type FilterMode int + +// FilterMode list +const ( + DelimiterPrefixMode FilterMode = iota + DelimiterMode + PrefixMode + DefaultMode +) + +// BucketResourcesMetadata - various types of bucket resources +type BucketResourcesMetadata struct { + Prefix string + Marker string + Maxkeys int + Delimiter string + IsTruncated bool + CommonPrefixes []string + Mode FilterMode + + Policy bool + // TODO + Logging string + Notification string +} + +// GetMode - Populate filter mode +func GetMode(resources BucketResourcesMetadata) FilterMode { + var f FilterMode + switch true { + case resources.Delimiter != "" && resources.Prefix != "": + f = DelimiterPrefixMode + case resources.Delimiter != "" && resources.Prefix == "": + f = DelimiterMode + case resources.Delimiter == "" && resources.Prefix != "": + f = PrefixMode + case resources.Delimiter == "" && resources.Prefix == "": + f = DefaultMode + } + + return f +} + +// IsDelimiterPrefixSet Delimiter and Prefix set +func (b BucketResourcesMetadata) IsDelimiterPrefixSet() bool { + return b.Mode == DelimiterPrefixMode +} + +// IsDelimiterSet Delimiter set +func (b BucketResourcesMetadata) IsDelimiterSet() bool { + return b.Mode == DelimiterMode +} + +// IsPrefixSet Prefix set +func (b BucketResourcesMetadata) IsPrefixSet() bool { + return b.Mode == PrefixMode +} + +// IsDefault No query values +func (b BucketResourcesMetadata) IsDefault() bool { + return b.Mode == DefaultMode +} + +// IsValidBucket - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func IsValidBucket(bucket string) bool { + if len(bucket) < 3 || len(bucket) > 63 { + return false + } + if bucket[0] == '.' || bucket[len(bucket)-1] == '.' { + return false + } + if match, _ := regexp.MatchString("\\.\\.", bucket); match == true { + return false + } + // We don't support buckets with '.' in them + match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket) + return match +} + +// IsValidObject - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func IsValidObject(object string) bool { + if len(object) > 1024 || len(object) == 0 { + return false + } + if !utf8.ValidString(object) { + return false + } + return true +} diff --git a/pkg/storage/drivers/errors.go b/pkg/storage/drivers/errors.go new file mode 100644 index 000000000..bdede15d9 --- /dev/null +++ b/pkg/storage/drivers/errors.go @@ -0,0 +1,163 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package drivers + +// BackendError - generic disk backend error +type BackendError struct { + Path string +} + +// BackendCorrupted - path has corrupted data +type BackendCorrupted BackendError + +// APINotImplemented - generic API not implemented error +type APINotImplemented struct { + API string +} + +// GenericBucketError - generic bucket error +type GenericBucketError struct { + Bucket string +} + +// GenericObjectError - generic object error +type GenericObjectError struct { + Bucket string + Object string +} + +// ImplementationError - generic implementation error +type ImplementationError struct { + Bucket string + Object string + Err error +} + +// DigestError - Generic Md5 error +type DigestError struct { + Bucket string + Key string + Md5 string +} + +/// Bucket related errors + +// BucketPolicyNotFound - missing bucket policy +type BucketPolicyNotFound GenericBucketError + +// BucketNameInvalid - bucketname provided is invalid +type BucketNameInvalid GenericBucketError + +// BucketExists - bucket already exists +type BucketExists GenericBucketError + +// BucketNotFound - requested bucket not found +type BucketNotFound GenericBucketError + +/// Object related errors + +// ObjectNotFound - requested object not found +type ObjectNotFound GenericObjectError + +// ObjectExists - object already exists +type ObjectExists GenericObjectError + +// ObjectNameInvalid - object name provided is invalid +type ObjectNameInvalid GenericObjectError + +// BadDigest - md5 mismatch from data received +type BadDigest DigestError + +// InvalidDigest - md5 in request header invalid +type InvalidDigest DigestError + +// Return string an error formatted as the given text +func (e ImplementationError) Error() string { + error := "" + if e.Bucket != "" { + error = error + "Bucket: " + e.Bucket + " " + } + if e.Object != "" { + error = error + "Object: " + e.Object + " " + } + error = error + "Error: " + e.Err.Error() + return error +} + +// EmbedError - wrapper function for error object +func EmbedError(bucket, object string, err error) ImplementationError { + return ImplementationError{ + Bucket: bucket, + Object: object, + Err: err, + } +} + +// Return string an error formatted as the given text +func (e BucketPolicyNotFound) Error() string { + return "Bucket policy not found for: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e ObjectNotFound) Error() string { + return "Object not Found: " + e.Bucket + "#" + e.Object +} + +// Return string an error formatted as the given text +func (e APINotImplemented) Error() string { + return "Api not implemented: " + e.API +} + +// Return string an error formatted as the given text +func (e ObjectExists) Error() string { + return "Object exists: " + e.Bucket + "#" + e.Object +} + +// Return string an error formatted as the given text +func (e BucketNameInvalid) Error() string { + return "Bucket name invalid: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e BucketExists) Error() string { + return "Bucket exists: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e BucketNotFound) Error() string { + return "Bucket not Found: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e ObjectNameInvalid) Error() string { + return "Object name invalid: " + e.Bucket + "#" + e.Object +} + +// Return string an error formatted as the given text +func (e BackendCorrupted) Error() string { + return "Backend corrupted: " + e.Path +} + +// Return string an error formatted as the given text +func (e BadDigest) Error() string { + return "Md5 provided " + e.Md5 + " mismatches for: " + e.Bucket + "#" + e.Key +} + +// Return string an error formatted as the given text +func (e InvalidDigest) Error() string { + return "Md5 provided " + e.Md5 + " is invalid" +} diff --git a/pkg/storage/drivers/file/file.go b/pkg/storage/drivers/file/file.go new file mode 100644 index 000000000..2dc03d0f3 --- /dev/null +++ b/pkg/storage/drivers/file/file.go @@ -0,0 +1,41 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "sync" + + "github.com/minio-io/minio/pkg/storage/drivers" +) + +// Start filesystem channel +func Start(root string) (chan<- string, <-chan error, drivers.Driver) { + ctrlChannel := make(chan string) + errorChannel := make(chan error) + s := new(fileDriver) + s.root = root + s.lock = new(sync.Mutex) + go start(ctrlChannel, errorChannel, s) + return ctrlChannel, errorChannel, s +} + +func start(ctrlChannel <-chan string, errorChannel chan<- error, s *fileDriver) { + err := os.MkdirAll(s.root, 0700) + errorChannel <- err + close(errorChannel) +} diff --git a/pkg/storage/drivers/file/file_bucket.go b/pkg/storage/drivers/file/file_bucket.go new file mode 100644 index 000000000..def84f6aa --- /dev/null +++ b/pkg/storage/drivers/file/file_bucket.go @@ -0,0 +1,146 @@ +/* + * Minimalist Object File, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "path" + "sort" + "strings" + + "io/ioutil" + "path/filepath" + + "github.com/minio-io/minio/pkg/storage/drivers" +) + +/// Bucket Operations + +// GetBucketMetadata - head +func (file *fileDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { + st, err := os.Stat(path.Join(file.root, bucket)) + if err != nil { + return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucket} + } + bucketMetadata := drivers.BucketMetadata{ + Name: st.Name(), + Created: st.ModTime(), + } + return bucketMetadata, nil +} + +// ListBuckets - Get service +func (file *fileDriver) ListBuckets() ([]drivers.BucketMetadata, error) { + files, err := ioutil.ReadDir(file.root) + if err != nil { + return []drivers.BucketMetadata{}, drivers.EmbedError("bucket", "", err) + } + + var metadataList []drivers.BucketMetadata + for _, fileName := range files { + // Skip policy files + if strings.HasSuffix(fileName.Name(), "_policy.json") { + continue + } + if !fileName.IsDir() { + return []drivers.BucketMetadata{}, drivers.BackendCorrupted{Path: file.root} + } + metadata := drivers.BucketMetadata{ + Name: fileName.Name(), + Created: fileName.ModTime(), // TODO - provide real created time + } + metadataList = append(metadataList, metadata) + } + return metadataList, nil +} + +// CreateBucket - PUT Bucket +func (file *fileDriver) CreateBucket(bucket string) error { + file.lock.Lock() + defer file.lock.Unlock() + + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + + // check if bucket exists + if _, err := os.Stat(bucketDir); err == nil { + return drivers.BucketExists{ + Bucket: bucket, + } + } + + // make bucket + err := os.Mkdir(bucketDir, 0700) + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + return nil +} + +// ListObjects - GET bucket (list objects) +func (file *fileDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + p := bucketDir{} + p.files = make(map[string]os.FileInfo) + + if drivers.IsValidBucket(bucket) == false { + return []drivers.ObjectMetadata{}, resources, drivers.BucketNameInvalid{Bucket: bucket} + } + if resources.Prefix != "" && drivers.IsValidObject(resources.Prefix) == false { + return []drivers.ObjectMetadata{}, resources, drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix} + } + + rootPrefix := path.Join(file.root, bucket) + // check bucket exists + if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { + return []drivers.ObjectMetadata{}, resources, drivers.BucketNotFound{Bucket: bucket} + } + + p.root = rootPrefix + err := filepath.Walk(rootPrefix, p.getAllFiles) + if err != nil { + return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + + var metadataList []drivers.ObjectMetadata + var metadata drivers.ObjectMetadata + + // Populate filtering mode + resources.Mode = drivers.GetMode(resources) + + for name, f := range p.files { + if len(metadataList) >= resources.Maxkeys { + resources.IsTruncated = true + goto ret + } + metadata, resources, err = file.filter(bucket, name, f, resources) + if err != nil { + return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + if metadata.Bucket != "" { + metadataList = append(metadataList, metadata) + } + } + +ret: + sort.Sort(byObjectKey(metadataList)) + return metadataList, resources, nil +} diff --git a/pkg/storage/drivers/file/file_common.go b/pkg/storage/drivers/file/file_common.go new file mode 100644 index 000000000..f623ae4d8 --- /dev/null +++ b/pkg/storage/drivers/file/file_common.go @@ -0,0 +1,89 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "bufio" + "bytes" + "os" + "strings" + "sync" + + "github.com/minio-io/minio/pkg/storage/drivers" +) + +// fileDriver - file local variables +type fileDriver struct { + root string + lock *sync.Mutex +} + +// fileMetadata - carries metadata about object +type fileMetadata struct { + Md5sum []byte + ContentType string +} + +func appendUniq(slice []string, i string) []string { + for _, ele := range slice { + if ele == i { + return slice + } + } + return append(slice, i) +} + +type bucketDir struct { + files map[string]os.FileInfo + root string +} + +func (p *bucketDir) getAllFiles(object string, fl os.FileInfo, err error) error { + if err != nil { + return err + } + if fl.Mode().IsRegular() { + if strings.HasSuffix(object, "$metadata") { + return nil + } + _p := strings.Split(object, p.root+"/") + if len(_p) > 1 { + p.files[_p[1]] = fl + } + } + return nil +} + +func delimiter(object, delimiter string) string { + readBuffer := bytes.NewBufferString(object) + reader := bufio.NewReader(readBuffer) + stringReader := strings.NewReader(delimiter) + delimited, _ := stringReader.ReadByte() + delimitedStr, _ := reader.ReadString(delimited) + return delimitedStr +} + +type byObjectKey []drivers.ObjectMetadata + +// Len +func (b byObjectKey) Len() int { return len(b) } + +// Swap +func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// Less +func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key } diff --git a/pkg/storage/drivers/file/file_filter.go b/pkg/storage/drivers/file/file_filter.go new file mode 100644 index 000000000..4294aa947 --- /dev/null +++ b/pkg/storage/drivers/file/file_filter.go @@ -0,0 +1,100 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "strings" + + "github.com/minio-io/minio/pkg/storage/drivers" +) + +func (file *fileDriver) filterDelimiterPrefix(bucket, name, fname, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + var err error + var metadata drivers.ObjectMetadata + switch true { + case name == resources.Prefix: + // Use resources.Prefix to filter out delimited files + metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix) + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + case delimitedName == fname: + // Use resources.Prefix to filter out delimited files + metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix) + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + case delimitedName != "": + resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName) + } + return metadata, resources, nil +} + +// TODO handle resources.Marker +func (file *fileDriver) filter(bucket, name string, f os.FileInfo, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + var err error + var metadata drivers.ObjectMetadata + + switch true { + // Both delimiter and Prefix is present + case resources.IsDelimiterPrefixSet(): + if strings.HasPrefix(name, resources.Prefix) { + trimmedName := strings.TrimPrefix(name, resources.Prefix) + delimitedName := delimiter(trimmedName, resources.Delimiter) + metadata, resources, err = file.filterDelimiterPrefix(bucket, name, f.Name(), delimitedName, resources) + if err != nil { + return drivers.ObjectMetadata{}, resources, err + } + } + // Delimiter present and Prefix is absent + case resources.IsDelimiterSet(): + delimitedName := delimiter(name, resources.Delimiter) + switch true { + case delimitedName == "": + // Do not strip prefix object output + metadata, err = file.GetObjectMetadata(bucket, name, "") + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + case delimitedName == f.Name(): + // Do not strip prefix object output + metadata, err = file.GetObjectMetadata(bucket, name, "") + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + case delimitedName != "": + resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName) + } + // Delimiter is absent and only Prefix is present + case resources.IsPrefixSet(): + if strings.HasPrefix(name, resources.Prefix) { + // Do not strip prefix object output + metadata, err = file.GetObjectMetadata(bucket, name, "") + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + } + case resources.IsDefault(): + metadata, err = file.GetObjectMetadata(bucket, name, "") + if err != nil { + return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err) + } + } + + return metadata, resources, nil +} diff --git a/pkg/storage/drivers/file/file_object.go b/pkg/storage/drivers/file/file_object.go new file mode 100644 index 000000000..799049e24 --- /dev/null +++ b/pkg/storage/drivers/file/file_object.go @@ -0,0 +1,283 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "bytes" + "io" + "os" + "path" + "strings" + + "github.com/minio-io/minio/pkg/storage/drivers" + + "crypto/md5" + "encoding/base64" + "encoding/gob" + "encoding/hex" +) + +/// Object Operations + +// GetPartialObject - GET object from range +func (file *fileDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { + // validate bucket + if drivers.IsValidBucket(bucket) == false { + return 0, drivers.BucketNameInvalid{Bucket: bucket} + } + + // validate object + if drivers.IsValidObject(object) == false { + return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object} + } + + objectPath := path.Join(file.root, bucket, object) + filestat, err := os.Stat(objectPath) + switch err := err.(type) { + case nil: + { + if filestat.IsDir() { + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + } + default: + { + if os.IsNotExist(err) { + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + return 0, drivers.EmbedError(bucket, object, err) + } + } + f, err := os.Open(objectPath) + defer f.Close() + if err != nil { + return 0, drivers.EmbedError(bucket, object, err) + } + + _, err = f.Seek(start, os.SEEK_SET) + if err != nil { + return 0, drivers.EmbedError(bucket, object, err) + } + + count, err := io.CopyN(w, f, length) + if err != nil { + return count, drivers.EmbedError(bucket, object, err) + } + + return count, nil +} + +// GetObject - GET object from key +func (file *fileDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { + // validate bucket + if drivers.IsValidBucket(bucket) == false { + return 0, drivers.BucketNameInvalid{Bucket: bucket} + } + + // check bucket exists + if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) { + return 0, drivers.BucketNotFound{Bucket: bucket} + } + + // validate object + if drivers.IsValidObject(object) == false { + return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object} + } + + objectPath := path.Join(file.root, bucket, object) + + filestat, err := os.Stat(objectPath) + switch err := err.(type) { + case nil: + { + if filestat.IsDir() { + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + } + default: + { + if os.IsNotExist(err) { + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + return 0, drivers.EmbedError(bucket, object, err) + } + } + f, err := os.Open(objectPath) + defer f.Close() + if err != nil { + return 0, drivers.EmbedError(bucket, object, err) + } + + count, err := io.Copy(w, f) + if err != nil { + return count, drivers.EmbedError(bucket, object, err) + } + return count, nil +} + +// GetObjectMetadata - HEAD object +func (file *fileDriver) GetObjectMetadata(bucket, object, prefix string) (drivers.ObjectMetadata, error) { + if drivers.IsValidBucket(bucket) == false { + return drivers.ObjectMetadata{}, drivers.BucketNameInvalid{Bucket: bucket} + } + if drivers.IsValidObject(object) == false { + return drivers.ObjectMetadata{}, drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket} + } + // check bucket exists + if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) { + return drivers.ObjectMetadata{}, drivers.BucketNotFound{Bucket: bucket} + } + // Do not use path.Join() since path.Join strips off any object names with '/', use them as is + // in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat() + objectPath := file.root + "/" + bucket + "/" + object + stat, err := os.Stat(objectPath) + if os.IsNotExist(err) { + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + + _, err = os.Stat(objectPath + "$metadata") + if os.IsNotExist(err) { + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object} + } + + f, err := os.Open(objectPath + "$metadata") + defer f.Close() + if err != nil { + return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err) + } + + var deserializedMetadata fileMetadata + decoder := gob.NewDecoder(f) + err = decoder.Decode(&deserializedMetadata) + if err != nil { + return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err) + } + + contentType := "application/octet-stream" + if deserializedMetadata.ContentType != "" { + contentType = deserializedMetadata.ContentType + } + contentType = strings.TrimSpace(contentType) + + etag := bucket + "#" + path.Base(object) + if len(deserializedMetadata.Md5sum) != 0 { + etag = hex.EncodeToString(deserializedMetadata.Md5sum) + } + trimmedObject := strings.TrimPrefix(object, prefix) + metadata := drivers.ObjectMetadata{ + Bucket: bucket, + Key: trimmedObject, + Created: stat.ModTime(), + Size: stat.Size(), + Md5: etag, + ContentType: contentType, + } + + return metadata, nil +} + +// CreateObject - PUT object +func (file *fileDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { + // TODO Commits should stage then move instead of writing directly + file.lock.Lock() + defer file.lock.Unlock() + + // check bucket name valid + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} + } + + // check bucket exists + if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) { + return drivers.BucketNotFound{Bucket: bucket} + } + + // verify object path legal + if drivers.IsValidObject(key) == false { + return drivers.ObjectNameInvalid{Bucket: bucket, Object: key} + } + + // verify content type + if contentType == "" { + contentType = "application/octet-stream" + } + contentType = strings.TrimSpace(contentType) + + // get object path + objectPath := path.Join(file.root, bucket, key) + objectDir := path.Dir(objectPath) + if _, err := os.Stat(objectDir); os.IsNotExist(err) { + err = os.MkdirAll(objectDir, 0700) + if err != nil { + return drivers.EmbedError(bucket, key, err) + } + } + + // check if object exists + if _, err := os.Stat(objectPath); !os.IsNotExist(err) { + return drivers.ObjectExists{ + Bucket: bucket, + Object: key, + } + } + + // write object + f, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() + if err != nil { + return drivers.EmbedError(bucket, key, err) + } + + h := md5.New() + mw := io.MultiWriter(f, h) + + _, err = io.Copy(mw, data) + if err != nil { + return drivers.EmbedError(bucket, key, err) + } + + // + f, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() + if err != nil { + return drivers.EmbedError(bucket, key, err) + } + + metadata := &fileMetadata{ + ContentType: contentType, + Md5sum: h.Sum(nil), + } + // serialize metadata to gob + encoder := gob.NewEncoder(f) + err = encoder.Encode(metadata) + if err != nil { + return drivers.EmbedError(bucket, key, err) + } + + // Verify data received to be correct, Content-MD5 received + if md5sum != "" { + var data []byte + data, err = base64.StdEncoding.DecodeString(md5sum) + if err != nil { + return drivers.InvalidDigest{Bucket: bucket, Key: key, Md5: md5sum} + } + if !bytes.Equal(metadata.Md5sum, data) { + return drivers.BadDigest{Bucket: bucket, Key: key, Md5: md5sum} + } + } + return nil +} diff --git a/pkg/storage/drivers/file/file_policy.go b/pkg/storage/drivers/file/file_policy.go new file mode 100644 index 000000000..ed0cb5b82 --- /dev/null +++ b/pkg/storage/drivers/file/file_policy.go @@ -0,0 +1,112 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "os" + "path" + + "github.com/minio-io/minio/pkg/storage/drivers" + + "encoding/json" +) + +// GetBucketPolicy - GET bucket policy +func (file *fileDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + file.lock.Lock() + defer file.lock.Unlock() + + var p drivers.BucketPolicy + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketPolicy{}, drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + // check if bucket exists + if _, err := os.Stat(bucketDir); err != nil { + return drivers.BucketPolicy{}, drivers.BucketNotFound{Bucket: bucket} + } + + // get policy path + bucketPolicy := path.Join(file.root, bucket+"_policy.json") + filestat, err := os.Stat(bucketPolicy) + + if os.IsNotExist(err) { + return drivers.BucketPolicy{}, drivers.BucketPolicyNotFound{Bucket: bucket} + } + + if filestat.IsDir() { + return drivers.BucketPolicy{}, drivers.BackendCorrupted{Path: bucketPolicy} + } + + f, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666) + defer f.Close() + if err != nil { + return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err) + } + encoder := json.NewDecoder(f) + err = encoder.Decode(&p) + if err != nil { + return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err) + } + + return p, nil + +} + +// CreateBucketPolicy - PUT bucket policy +func (file *fileDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error { + file.lock.Lock() + defer file.lock.Unlock() + + // verify bucket path legal + if drivers.IsValidBucket(bucket) == false { + return drivers.BucketNameInvalid{Bucket: bucket} + } + + // get bucket path + bucketDir := path.Join(file.root, bucket) + // check if bucket exists + if _, err := os.Stat(bucketDir); err != nil { + return drivers.BucketNotFound{ + Bucket: bucket, + } + } + + // get policy path + bucketPolicy := path.Join(file.root, bucket+"_policy.json") + filestat, ret := os.Stat(bucketPolicy) + if !os.IsNotExist(ret) { + if filestat.IsDir() { + return drivers.BackendCorrupted{Path: bucketPolicy} + } + } + + f, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600) + defer f.Close() + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + encoder := json.NewEncoder(f) + err = encoder.Encode(p) + if err != nil { + return drivers.EmbedError(bucket, "", err) + } + return nil +} diff --git a/pkg/storage/drivers/file/file_test.go b/pkg/storage/drivers/file/file_test.go new file mode 100644 index 000000000..d29c434fd --- /dev/null +++ b/pkg/storage/drivers/file/file_test.go @@ -0,0 +1,52 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package file + +import ( + "io/ioutil" + "os" + "testing" + + . "github.com/minio-io/check" + "github.com/minio-io/minio/pkg/storage/drivers" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestAPISuite(c *C) { + var storageList []string + create := func() drivers.Driver { + path, err := ioutil.TempDir(os.TempDir(), "minio-file-") + c.Check(err, IsNil) + storageList = append(storageList, path) + _, _, store := Start(path) + return store + } + drivers.APITestSuite(c, create) + removeRoots(c, storageList) +} + +func removeRoots(c *C, roots []string) { + for _, root := range roots { + err := os.RemoveAll(root) + c.Check(err, IsNil) + } +} diff --git a/pkg/storage/drivers/memory/memory.go b/pkg/storage/drivers/memory/memory.go new file mode 100644 index 000000000..0b0bb061d --- /dev/null +++ b/pkg/storage/drivers/memory/memory.go @@ -0,0 +1,287 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package memory + +import ( + "bufio" + "bytes" + "io" + "sort" + "strings" + "sync" + "time" + + "github.com/minio-io/minio/pkg/storage/drivers" + + "crypto/md5" + "encoding/hex" + + "io/ioutil" +) + +// memoryDriver - local variables +type memoryDriver struct { + bucketdata map[string]storedBucket + objectdata map[string]storedObject + lock *sync.RWMutex +} + +type storedBucket struct { + metadata drivers.BucketMetadata + // owner string // TODO + // id string // TODO +} + +type storedObject struct { + metadata drivers.ObjectMetadata + data []byte +} + +// Start memory object server +func Start() (chan<- string, <-chan error, drivers.Driver) { + ctrlChannel := make(chan string) + errorChannel := make(chan error) + + memory := new(memoryDriver) + memory.bucketdata = make(map[string]storedBucket) + memory.objectdata = make(map[string]storedObject) + memory.lock = new(sync.RWMutex) + + go start(ctrlChannel, errorChannel) + return ctrlChannel, errorChannel, memory +} + +func start(ctrlChannel <-chan string, errorChannel chan<- error) { + close(errorChannel) +} + +// GetObject - GET object from memory buffer +func (memory memoryDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { + if _, ok := memory.bucketdata[bucket]; ok == false { + return 0, drivers.BucketNotFound{Bucket: bucket} + } + // get object + key := object + if val, ok := memory.objectdata[key]; ok { + objectBuffer := bytes.NewBuffer(val.data) + written, err := io.Copy(w, objectBuffer) + return written, err + } + return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object} +} + +// GetPartialObject - GET object from memory buffer range +func (memory memoryDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { + var sourceBuffer bytes.Buffer + if _, err := memory.GetObject(&sourceBuffer, bucket, object); err != nil { + return 0, err + } + if _, err := io.CopyN(ioutil.Discard, &sourceBuffer, start); err != nil { + return 0, err + } + return io.CopyN(w, &sourceBuffer, length) +} + +// GetBucketMetadata - +func (memory memoryDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { + if _, ok := memory.bucketdata[bucket]; ok == false { + return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucket} + } + return memory.bucketdata[bucket].metadata, nil +} + +// CreateBucketPolicy - Not implemented +func (memory memoryDriver) CreateBucketPolicy(bucket string, policy drivers.BucketPolicy) error { + return drivers.APINotImplemented{API: "PutBucketPolicy"} +} + +// GetBucketPolicy - Not implemented +func (memory memoryDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + return drivers.BucketPolicy{}, drivers.APINotImplemented{API: "GetBucketPolicy"} +} + +// CreateObject - PUT object to memory buffer +func (memory memoryDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error { + memory.lock.Lock() + defer memory.lock.Unlock() + + if _, ok := memory.bucketdata[bucket]; ok == false { + return drivers.BucketNotFound{Bucket: bucket} + } + + if _, ok := memory.objectdata[key]; ok == true { + return drivers.ObjectExists{Bucket: bucket, Object: key} + } + + if contentType == "" { + contentType = "application/octet-stream" + } + + contentType = strings.TrimSpace(contentType) + + var bytesBuffer bytes.Buffer + var newObject = storedObject{} + if _, ok := io.Copy(&bytesBuffer, data); ok == nil { + size := bytesBuffer.Len() + md5SumBytes := md5.Sum(bytesBuffer.Bytes()) + md5Sum := hex.EncodeToString(md5SumBytes[:]) + newObject.metadata = drivers.ObjectMetadata{ + Bucket: bucket, + Key: key, + + ContentType: contentType, + Created: time.Now(), + Md5: md5Sum, + Size: int64(size), + } + newObject.data = bytesBuffer.Bytes() + } + memory.objectdata[key] = newObject + return nil +} + +// CreateBucket - create bucket in memory +func (memory memoryDriver) CreateBucket(bucketName string) error { + memory.lock.Lock() + defer memory.lock.Unlock() + if !drivers.IsValidBucket(bucketName) { + return drivers.BucketNameInvalid{Bucket: bucketName} + } + + if _, ok := memory.bucketdata[bucketName]; ok == true { + return drivers.BucketExists{Bucket: bucketName} + } + + var newBucket = storedBucket{} + newBucket.metadata = drivers.BucketMetadata{} + newBucket.metadata.Name = bucketName + newBucket.metadata.Created = time.Now() + memory.bucketdata[bucketName] = newBucket + + return nil +} + +func delimiter(object, delimiter string) string { + readBuffer := bytes.NewBufferString(object) + reader := bufio.NewReader(readBuffer) + stringReader := strings.NewReader(delimiter) + delimited, _ := stringReader.ReadByte() + delimitedStr, _ := reader.ReadString(delimited) + return delimitedStr +} + +func appendUniq(slice []string, i string) []string { + for _, ele := range slice { + if ele == i { + return slice + } + } + return append(slice, i) +} + +func (memory memoryDriver) filterDelimiterPrefix(keys []string, key, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.BucketResourcesMetadata, []string) { + switch true { + case key == resources.Prefix: + keys = appendUniq(keys, key) + // DelimitedName - requires resources.Prefix as it was trimmed off earlier in the flow + case key == resources.Prefix+delimitedName: + keys = appendUniq(keys, key) + case delimitedName != "": + resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName) + } + return resources, keys +} + +// ListObjects - list objects from memory +func (memory memoryDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + if _, ok := memory.bucketdata[bucket]; ok == false { + return []drivers.ObjectMetadata{}, drivers.BucketResourcesMetadata{IsTruncated: false}, drivers.BucketNotFound{Bucket: bucket} + } + var results []drivers.ObjectMetadata + var keys []string + for key := range memory.objectdata { + switch true { + // Prefix absent, delimit object key based on delimiter + case resources.IsDelimiterSet(): + delimitedName := delimiter(key, resources.Delimiter) + switch true { + case delimitedName == "" || delimitedName == key: + keys = appendUniq(keys, key) + case delimitedName != "": + resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName) + } + // Prefix present, delimit object key with prefix key based on delimiter + case resources.IsDelimiterPrefixSet(): + if strings.HasPrefix(key, resources.Prefix) { + trimmedName := strings.TrimPrefix(key, resources.Prefix) + delimitedName := delimiter(trimmedName, resources.Delimiter) + resources, keys = memory.filterDelimiterPrefix(keys, key, delimitedName, resources) + } + // Prefix present, nothing to delimit + case resources.IsPrefixSet(): + keys = appendUniq(keys, key) + // Prefix and delimiter absent + case resources.IsDefault(): + keys = appendUniq(keys, key) + } + } + sort.Strings(keys) + for _, key := range keys { + if len(results) == resources.Maxkeys { + return results, drivers.BucketResourcesMetadata{IsTruncated: true}, nil + } + object := memory.objectdata[key] + if bucket == object.metadata.Bucket { + results = append(results, object.metadata) + } + } + return results, resources, nil +} + +// ByBucketName is a type for sorting bucket metadata by bucket name +type ByBucketName []drivers.BucketMetadata + +// Len of bucket name +func (b ByBucketName) Len() int { return len(b) } + +// Swap bucket i, j +func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// Less +func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +// ListBuckets - List buckets from memory +func (memory memoryDriver) ListBuckets() ([]drivers.BucketMetadata, error) { + var results []drivers.BucketMetadata + for _, bucket := range memory.bucketdata { + results = append(results, bucket.metadata) + } + sort.Sort(ByBucketName(results)) + return results, nil +} + +// GetObjectMetadata - get object metadata from memory +func (memory memoryDriver) GetObjectMetadata(bucket, key, prefix string) (drivers.ObjectMetadata, error) { + // check if bucket exists + if _, ok := memory.bucketdata[bucket]; ok == false { + return drivers.ObjectMetadata{}, drivers.BucketNotFound{Bucket: bucket} + } + if object, ok := memory.objectdata[key]; ok == true { + return object.metadata, nil + } + return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: key} +} diff --git a/pkg/storage/drivers/memory/memory_test.go b/pkg/storage/drivers/memory/memory_test.go new file mode 100644 index 000000000..b0b8f177d --- /dev/null +++ b/pkg/storage/drivers/memory/memory_test.go @@ -0,0 +1,38 @@ +/* + * Minimalist Object Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package memory + +import ( + "testing" + + . "github.com/minio-io/check" + "github.com/minio-io/minio/pkg/storage/drivers" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestAPISuite(c *C) { + create := func() drivers.Driver { + _, _, store := Start() + return store + } + drivers.APITestSuite(c, create) +} diff --git a/pkg/storage/drivers/mocks/Driver.go b/pkg/storage/drivers/mocks/Driver.go new file mode 100644 index 000000000..1d9bd9ebf --- /dev/null +++ b/pkg/storage/drivers/mocks/Driver.go @@ -0,0 +1,135 @@ +package mocks + +import ( + "bytes" + "io" + + "github.com/minio-io/iodine" + "github.com/minio-io/minio/pkg/storage/drivers" + "github.com/stretchr/testify/mock" +) + +// Driver is a mock +type Driver struct { + mock.Mock + + ObjectWriterData map[string][]byte +} + +// ListBuckets is a mock +func (m *Driver) ListBuckets() ([]drivers.BucketMetadata, error) { + ret := m.Called() + + r0 := ret.Get(0).([]drivers.BucketMetadata) + r1 := ret.Error(1) + + return r0, r1 +} + +// CreateBucket is a mock +func (m *Driver) CreateBucket(bucket string) error { + ret := m.Called(bucket) + + r0 := ret.Error(0) + + return r0 +} + +// GetBucketMetadata is a mock +func (m *Driver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { + ret := m.Called(bucket) + r0 := ret.Get(0).(drivers.BucketMetadata) + r1 := ret.Error(1) + + return r0, r1 +} + +// CreateBucketPolicy is a mock +func (m *Driver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error { + ret := m.Called(bucket, p) + + r0 := ret.Error(0) + + return r0 +} + +// GetBucketPolicy is a mock +func (m *Driver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) { + ret := m.Called(bucket) + + r0 := ret.Get(0).(drivers.BucketPolicy) + r1 := ret.Error(1) + + return r0, r1 +} + +// SetGetObjectWriter is a mock +func (m *Driver) SetGetObjectWriter(bucket, object string, data []byte) { + m.ObjectWriterData[bucket+":"+object] = data + // println(string(m.ObjectWriterData["bucket:object"])) +} + +// GetObject is a mock +func (m *Driver) GetObject(w io.Writer, bucket string, object string) (int64, error) { + ret := m.Called(w, bucket, object) + r0 := ret.Get(0).(int64) + r1 := ret.Error(1) + if r1 == nil { + if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok { + n, _ := io.Copy(w, bytes.NewBuffer(obj)) + r0 = n + } + } + return r0, r1 +} + +// GetPartialObject is a mock +func (m *Driver) GetPartialObject(w io.Writer, bucket string, object string, start int64, length int64) (int64, error) { + ret := m.Called(w, bucket, object, start, length) + + r0 := ret.Get(0).(int64) + r1 := ret.Error(1) + + if r1 == nil { + if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok { + source := bytes.NewBuffer(obj) + var nilSink bytes.Buffer + io.CopyN(&nilSink, source, start) + n, _ := io.CopyN(w, source, length) + r0 = n + } + } + r1 = iodine.New(r1, nil) + + return r0, r1 +} + +// GetObjectMetadata is a mock +func (m *Driver) GetObjectMetadata(bucket string, object string, prefix string) (drivers.ObjectMetadata, error) { + ret := m.Called(bucket, object, prefix) + + r0 := ret.Get(0).(drivers.ObjectMetadata) + r1 := ret.Error(1) + + return r0, r1 +} + +// ListObjects is a mock +func (m *Driver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { + ret := m.Called(bucket, resources) + + r0 := ret.Get(0).([]drivers.ObjectMetadata) + r1 := ret.Get(1).(drivers.BucketResourcesMetadata) + r2 := ret.Error(2) + + return r0, r1, r2 +} + +// CreateObject is a mock +func (m *Driver) CreateObject(bucket string, key string, contentType string, md5sum string, data io.Reader) error { + ret := m.Called(bucket, key, contentType, md5sum, data) + + r0 := ret.Error(0) + + return r0 +}