parent
c4c67581dc
commit
2571342451
@ -1,42 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"os" |
||||
"sync" |
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
type fsDriver struct { |
||||
root string |
||||
lock *sync.Mutex |
||||
multiparts *Multiparts |
||||
} |
||||
|
||||
// NewDriver instantiate a new filesystem driver
|
||||
func NewDriver(root string) (drivers.Driver, error) { |
||||
fs := new(fsDriver) |
||||
fs.root = root |
||||
fs.lock = new(sync.Mutex) |
||||
// internal related to multiparts
|
||||
fs.multiparts = new(Multiparts) |
||||
fs.multiparts.ActiveSession = make(map[string]*MultipartSession) |
||||
err := os.MkdirAll(fs.root, 0700) |
||||
return fs, err |
||||
} |
@ -1,205 +0,0 @@ |
||||
/* |
||||
* Mini Object Fs, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"os" |
||||
"sort" |
||||
|
||||
"io/ioutil" |
||||
"path/filepath" |
||||
|
||||
"github.com/minio/minio/pkg/iodine" |
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// ListBuckets - Get service
|
||||
func (fs *fsDriver) ListBuckets() ([]drivers.BucketMetadata, error) { |
||||
files, err := ioutil.ReadDir(fs.root) |
||||
if err != nil { |
||||
return []drivers.BucketMetadata{}, iodine.New(err, nil) |
||||
} |
||||
|
||||
var metadataList []drivers.BucketMetadata |
||||
for _, file := range files { |
||||
if !file.IsDir() { |
||||
// if files found ignore them
|
||||
continue |
||||
} |
||||
if file.IsDir() { |
||||
// if directories found with odd names, skip them too
|
||||
if !drivers.IsValidBucket(file.Name()) { |
||||
continue |
||||
} |
||||
} |
||||
|
||||
metadata := drivers.BucketMetadata{ |
||||
Name: file.Name(), |
||||
Created: file.ModTime(), |
||||
} |
||||
metadataList = append(metadataList, metadata) |
||||
} |
||||
return metadataList, nil |
||||
} |
||||
|
||||
// CreateBucket - PUT Bucket
|
||||
func (fs *fsDriver) CreateBucket(bucket, acl string) error { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
// verify bucket path legal
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket) |
||||
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err == nil { |
||||
return iodine.New(drivers.BucketExists{ |
||||
Bucket: bucket, |
||||
}, nil) |
||||
} |
||||
|
||||
// make bucket
|
||||
err := os.Mkdir(bucketDir, 0700) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetBucketMetadata -
|
||||
func (fs *fsDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
if !drivers.IsValidBucket(bucket) { |
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket) |
||||
bucketMetadata := drivers.BucketMetadata{} |
||||
fi, err := os.Stat(bucketDir) |
||||
// check if bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return drivers.BucketMetadata{}, iodine.New(err, nil) |
||||
} |
||||
|
||||
bucketMetadata.Name = fi.Name() |
||||
bucketMetadata.Created = fi.ModTime() |
||||
// TODO convert os.FileMode to meaningful ACL's
|
||||
bucketMetadata.ACL = drivers.BucketACL("private") |
||||
return bucketMetadata, nil |
||||
} |
||||
|
||||
// aclToPerm - convert acl to filesystem mode
|
||||
func aclToPerm(acl string) os.FileMode { |
||||
switch acl { |
||||
case "private": |
||||
return os.FileMode(0700) |
||||
case "public-read": |
||||
return os.FileMode(0500) |
||||
case "public-read-write": |
||||
return os.FileMode(0777) |
||||
case "authenticated-read": |
||||
return os.FileMode(0770) |
||||
default: |
||||
return os.FileMode(0700) |
||||
} |
||||
} |
||||
|
||||
// SetBucketMetadata -
|
||||
func (fs *fsDriver) SetBucketMetadata(bucket, acl string) error { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
if !drivers.IsValidBucket(bucket) { |
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
if !drivers.IsValidBucketACL(acl) { |
||||
return iodine.New(drivers.InvalidACL{ACL: acl}, nil) |
||||
} |
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket) |
||||
err := os.Chmod(bucketDir, aclToPerm(acl)) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ListObjects - GET bucket (list objects)
|
||||
func (fs *fsDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { |
||||
p := bucketDir{} |
||||
p.files = make(map[string]os.FileInfo) |
||||
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
if resources.Prefix != "" && drivers.IsValidObjectName(resources.Prefix) == false { |
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}, nil) |
||||
} |
||||
|
||||
rootPrefix := filepath.Join(fs.root, bucket) |
||||
// check bucket exists
|
||||
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { |
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
p.root = rootPrefix |
||||
err := filepath.Walk(rootPrefix, p.getAllFiles) |
||||
if err != nil { |
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
|
||||
var metadataList []drivers.ObjectMetadata |
||||
var metadata drivers.ObjectMetadata |
||||
|
||||
// Populate filtering mode
|
||||
resources.Mode = drivers.GetMode(resources) |
||||
|
||||
var fileNames []string |
||||
for name := range p.files { |
||||
fileNames = append(fileNames, name) |
||||
} |
||||
sort.Strings(fileNames) |
||||
for _, name := range fileNames { |
||||
if len(metadataList) >= resources.Maxkeys { |
||||
resources.IsTruncated = true |
||||
if resources.IsTruncated && resources.IsDelimiterSet() { |
||||
resources.NextMarker = metadataList[len(metadataList)-1].Key |
||||
} |
||||
break |
||||
} |
||||
if name > resources.Marker { |
||||
metadata, resources, err = fs.filterObjects(bucket, name, p.files[name], resources) |
||||
if err != nil { |
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
if metadata.Bucket != "" { |
||||
metadataList = append(metadataList, metadata) |
||||
} |
||||
} |
||||
} |
||||
sort.Sort(byObjectKey(metadataList)) |
||||
return metadataList, resources, nil |
||||
} |
@ -1,88 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"os" |
||||
"regexp" |
||||
"strings" |
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
// Metadata - carries metadata about object
|
||||
type Metadata struct { |
||||
Md5sum []byte |
||||
ContentType string |
||||
} |
||||
|
||||
func appendUniq(slice []string, i string) []string { |
||||
for _, ele := range slice { |
||||
if ele == i { |
||||
return slice |
||||
} |
||||
} |
||||
return append(slice, i) |
||||
} |
||||
|
||||
type bucketDir struct { |
||||
files map[string]os.FileInfo |
||||
root string |
||||
} |
||||
|
||||
func (p *bucketDir) getAllFiles(object string, fl os.FileInfo, err error) error { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if fl.Mode().IsRegular() { |
||||
if strings.HasSuffix(object, "$metadata") { |
||||
return nil |
||||
} |
||||
if strings.HasSuffix(object, "$multiparts") { |
||||
return nil |
||||
} |
||||
matched, err := regexp.MatchString("\\$[0-9].*$", object) |
||||
if err != nil { |
||||
return nil |
||||
} |
||||
if matched { |
||||
return nil |
||||
} |
||||
_p := strings.Split(object, p.root+"/") |
||||
if len(_p) > 1 { |
||||
p.files[_p[1]] = fl |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func delimiter(object, delimiter string) string { |
||||
readBuffer := bytes.NewBufferString(object) |
||||
reader := bufio.NewReader(readBuffer) |
||||
stringReader := strings.NewReader(delimiter) |
||||
delimited, _ := stringReader.ReadByte() |
||||
delimitedStr, _ := reader.ReadString(delimited) |
||||
return delimitedStr |
||||
} |
||||
|
||||
type byObjectKey []drivers.ObjectMetadata |
||||
|
||||
func (b byObjectKey) Len() int { return len(b) } |
||||
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } |
||||
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key } |
@ -1,88 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"os" |
||||
"strings" |
||||
|
||||
"github.com/minio/minio/pkg/iodine" |
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
func (fs *fsDriver) filterObjects(bucket, name string, file os.FileInfo, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { |
||||
var err error |
||||
var metadata drivers.ObjectMetadata |
||||
|
||||
switch true { |
||||
// Both delimiter and Prefix is present
|
||||
case resources.IsDelimiterPrefixSet(): |
||||
if strings.HasPrefix(name, resources.Prefix) { |
||||
trimmedName := strings.TrimPrefix(name, resources.Prefix) |
||||
delimitedName := delimiter(trimmedName, resources.Delimiter) |
||||
switch true { |
||||
case name == resources.Prefix: |
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
case delimitedName == file.Name(): |
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
case delimitedName != "": |
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName) |
||||
} |
||||
} |
||||
// Delimiter present and Prefix is absent
|
||||
case resources.IsDelimiterSet(): |
||||
delimitedName := delimiter(name, resources.Delimiter) |
||||
switch true { |
||||
case delimitedName == "": |
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
case delimitedName == file.Name(): |
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
case delimitedName != "": |
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName) |
||||
} |
||||
// Delimiter is absent and only Prefix is present
|
||||
case resources.IsPrefixSet(): |
||||
if strings.HasPrefix(name, resources.Prefix) { |
||||
// Do not strip prefix object output
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
} |
||||
case resources.IsDefault(): |
||||
metadata, err = fs.GetObjectMetadata(bucket, name) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil) |
||||
} |
||||
} |
||||
|
||||
return metadata, resources, nil |
||||
} |
@ -1,618 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/md5" |
||||
"crypto/sha512" |
||||
"encoding/base64" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"os" |
||||
"path/filepath" |
||||
"sort" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/minio/minio/pkg/iodine" |
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
// MultipartSession holds active session information
|
||||
type MultipartSession struct { |
||||
TotalParts int |
||||
UploadID string |
||||
Initiated time.Time |
||||
Parts []*drivers.PartMetadata |
||||
} |
||||
|
||||
// Multiparts collection of many parts
|
||||
type Multiparts struct { |
||||
ActiveSession map[string]*MultipartSession |
||||
} |
||||
|
||||
func (fs *fsDriver) loadActiveSessions(bucket string) { |
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
if err != nil { |
||||
return |
||||
} |
||||
activeSessionFile, err := os.OpenFile(bucketPath+"$activeSession", os.O_RDONLY, 0600) |
||||
if err != nil { |
||||
return |
||||
} |
||||
defer activeSessionFile.Close() |
||||
var deserializedActiveSession map[string]*MultipartSession |
||||
decoder := json.NewDecoder(activeSessionFile) |
||||
err = decoder.Decode(&deserializedActiveSession) |
||||
if err != nil { |
||||
return |
||||
} |
||||
for key, value := range deserializedActiveSession { |
||||
fs.multiparts.ActiveSession[key] = value |
||||
} |
||||
return |
||||
} |
||||
|
||||
func (fs *fsDriver) isValidUploadID(key, uploadID string) bool { |
||||
s, ok := fs.multiparts.ActiveSession[key] |
||||
if !ok { |
||||
return false |
||||
} |
||||
if uploadID == s.UploadID { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (fs *fsDriver) writePart(objectPath string, partID int, size int64, data io.Reader) (drivers.PartMetadata, error) { |
||||
partPath := objectPath + fmt.Sprintf("$%d", partID) |
||||
// write part
|
||||
partFile, err := os.OpenFile(partPath, os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return drivers.PartMetadata{}, iodine.New(err, nil) |
||||
} |
||||
defer partFile.Close() |
||||
|
||||
h := md5.New() |
||||
mw := io.MultiWriter(partFile, h) |
||||
|
||||
_, err = io.CopyN(mw, data, size) |
||||
if err != nil { |
||||
return drivers.PartMetadata{}, iodine.New(err, nil) |
||||
} |
||||
|
||||
fi, err := os.Stat(partPath) |
||||
if err != nil { |
||||
return drivers.PartMetadata{}, iodine.New(err, nil) |
||||
} |
||||
partMetadata := drivers.PartMetadata{} |
||||
partMetadata.ETag = hex.EncodeToString(h.Sum(nil)) |
||||
partMetadata.PartNumber = partID |
||||
partMetadata.Size = fi.Size() |
||||
partMetadata.LastModified = fi.ModTime() |
||||
return partMetadata, nil |
||||
} |
||||
|
||||
// byKey is a sortable interface for UploadMetadata slice
|
||||
type byKey []*drivers.UploadMetadata |
||||
|
||||
func (a byKey) Len() int { return len(a) } |
||||
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } |
||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } |
||||
|
||||
func (fs *fsDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
if !drivers.IsValidBucket(bucket) { |
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
// load from disk
|
||||
fs.loadActiveSessions(bucket) |
||||
|
||||
var uploads []*drivers.UploadMetadata |
||||
for key, session := range fs.multiparts.ActiveSession { |
||||
if strings.HasPrefix(key, resources.Prefix) { |
||||
if len(uploads) > resources.MaxUploads { |
||||
sort.Sort(byKey(uploads)) |
||||
resources.Upload = uploads |
||||
resources.NextKeyMarker = key |
||||
resources.NextUploadIDMarker = session.UploadID |
||||
resources.IsTruncated = true |
||||
return resources, nil |
||||
} |
||||
// uploadIDMarker is ignored if KeyMarker is empty
|
||||
switch { |
||||
case resources.KeyMarker != "" && resources.UploadIDMarker == "": |
||||
if key > resources.KeyMarker { |
||||
upload := new(drivers.UploadMetadata) |
||||
upload.Key = key |
||||
upload.UploadID = session.UploadID |
||||
upload.Initiated = session.Initiated |
||||
uploads = append(uploads, upload) |
||||
} |
||||
case resources.KeyMarker != "" && resources.UploadIDMarker != "": |
||||
if session.UploadID > resources.UploadIDMarker { |
||||
if key >= resources.KeyMarker { |
||||
upload := new(drivers.UploadMetadata) |
||||
upload.Key = key |
||||
upload.UploadID = session.UploadID |
||||
upload.Initiated = session.Initiated |
||||
uploads = append(uploads, upload) |
||||
} |
||||
} |
||||
default: |
||||
upload := new(drivers.UploadMetadata) |
||||
upload.Key = key |
||||
upload.UploadID = session.UploadID |
||||
upload.Initiated = session.Initiated |
||||
uploads = append(uploads, upload) |
||||
} |
||||
} |
||||
} |
||||
sort.Sort(byKey(uploads)) |
||||
resources.Upload = uploads |
||||
return resources, nil |
||||
} |
||||
|
||||
func (fs *fsDriver) concatParts(parts map[int]string, objectPath string, mw io.Writer) error { |
||||
for i := 1; i <= len(parts); i++ { |
||||
recvMD5 := parts[i] |
||||
partFile, err := os.OpenFile(objectPath+fmt.Sprintf("$%d", i), os.O_RDONLY, 0600) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
obj, err := ioutil.ReadAll(partFile) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
calcMD5Bytes := md5.Sum(obj) |
||||
// complete multi part request header md5sum per part is hex encoded
|
||||
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) |
||||
if err != nil { |
||||
return iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil) |
||||
} |
||||
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { |
||||
return iodine.New(drivers.BadDigest{Md5: recvMD5}, nil) |
||||
} |
||||
_, err = io.Copy(mw, bytes.NewBuffer(obj)) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (fs *fsDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
if !drivers.IsValidBucket(bucket) { |
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
if !drivers.IsValidObjectName(key) { |
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) |
||||
} |
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return "", iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
objectPath := filepath.Join(bucketPath, key) |
||||
objectDir := filepath.Dir(objectPath) |
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) { |
||||
err = os.MkdirAll(objectDir, 0700) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
} |
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.ObjectExists{ |
||||
Bucket: bucket, |
||||
Object: key, |
||||
}, nil) |
||||
} |
||||
|
||||
var activeSessionFile *os.File |
||||
_, err = os.Stat(bucketPath + "$activeSession") |
||||
switch { |
||||
case os.IsNotExist(err): |
||||
activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
default: |
||||
activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_APPEND, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
} |
||||
defer activeSessionFile.Close() |
||||
|
||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) |
||||
uploadIDSum := sha512.Sum512(id) |
||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] |
||||
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer multiPartfile.Close() |
||||
|
||||
mpartSession := new(MultipartSession) |
||||
mpartSession.TotalParts = 0 |
||||
mpartSession.UploadID = uploadID |
||||
mpartSession.Initiated = time.Now().UTC() |
||||
var parts []*drivers.PartMetadata |
||||
mpartSession.Parts = parts |
||||
fs.multiparts.ActiveSession[key] = mpartSession |
||||
|
||||
encoder := json.NewEncoder(multiPartfile) |
||||
err = encoder.Encode(mpartSession) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
encoder = json.NewEncoder(activeSessionFile) |
||||
err = encoder.Encode(fs.multiparts.ActiveSession) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
|
||||
return uploadID, nil |
||||
} |
||||
|
||||
// partNumber is a sortable interface for Part slice
|
||||
type partNumber []*drivers.PartMetadata |
||||
|
||||
func (a partNumber) Len() int { return len(a) } |
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } |
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } |
||||
|
||||
func (fs *fsDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
if partID <= 0 { |
||||
return "", iodine.New(errors.New("invalid part id, cannot be zero or less than zero"), nil) |
||||
} |
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false { |
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) |
||||
} |
||||
|
||||
if !fs.isValidUploadID(key, uploadID) { |
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) |
||||
} |
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" { |
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) |
||||
if err != nil { |
||||
// pro-actively close the connection
|
||||
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) |
||||
} |
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) |
||||
} |
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return "", iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
|
||||
objectPath := filepath.Join(bucketPath, key) |
||||
objectDir := filepath.Dir(objectPath) |
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) { |
||||
err = os.MkdirAll(objectDir, 0700) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
} |
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.ObjectExists{ |
||||
Bucket: bucket, |
||||
Object: key, |
||||
}, nil) |
||||
} |
||||
partMetadata, err := fs.writePart(objectPath, partID, size, data) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" { |
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), partMetadata.ETag); err != nil { |
||||
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) |
||||
} |
||||
} |
||||
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer multiPartfile.Close() |
||||
|
||||
var deserializedMultipartSession MultipartSession |
||||
decoder := json.NewDecoder(multiPartfile) |
||||
err = decoder.Decode(&deserializedMultipartSession) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata) |
||||
deserializedMultipartSession.TotalParts++ |
||||
fs.multiparts.ActiveSession[key] = &deserializedMultipartSession |
||||
|
||||
sort.Sort(partNumber(deserializedMultipartSession.Parts)) |
||||
encoder := json.NewEncoder(multiPartfile) |
||||
err = encoder.Encode(&deserializedMultipartSession) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
return partMetadata.ETag, nil |
||||
} |
||||
|
||||
func (fs *fsDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false { |
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) |
||||
} |
||||
|
||||
if !fs.isValidUploadID(key, uploadID) { |
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) |
||||
} |
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return "", iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
|
||||
objectPath := filepath.Join(bucketPath, key) |
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.ObjectExists{ |
||||
Bucket: bucket, |
||||
Object: key, |
||||
}, nil) |
||||
} |
||||
|
||||
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer file.Close() |
||||
h := md5.New() |
||||
mw := io.MultiWriter(file, h) |
||||
err = fs.concatParts(parts, objectPath, mw) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
md5sum := hex.EncodeToString(h.Sum(nil)) |
||||
|
||||
delete(fs.multiparts.ActiveSession, key) |
||||
for partNumber := range parts { |
||||
err = os.Remove(objectPath + fmt.Sprintf("$%d", partNumber)) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
} |
||||
err = os.Remove(objectPath + "$multiparts") |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
|
||||
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer file.Close() |
||||
|
||||
metadata := &Metadata{ |
||||
ContentType: "application/octet-stream", |
||||
Md5sum: h.Sum(nil), |
||||
} |
||||
// serialize metadata to json
|
||||
encoder := json.NewEncoder(file) |
||||
err = encoder.Encode(metadata) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
|
||||
activeSessionFile, err := os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_TRUNC, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer activeSessionFile.Close() |
||||
encoder = json.NewEncoder(activeSessionFile) |
||||
err = encoder.Encode(fs.multiparts.ActiveSession) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
return md5sum, nil |
||||
} |
||||
|
||||
func (fs *fsDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
// load from disk
|
||||
fs.loadActiveSessions(bucket) |
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) |
||||
} |
||||
|
||||
if !fs.isValidUploadID(key, resources.UploadID) { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil) |
||||
} |
||||
|
||||
objectResourcesMetadata := resources |
||||
objectResourcesMetadata.Bucket = bucket |
||||
objectResourcesMetadata.Key = key |
||||
var startPartNumber int |
||||
switch { |
||||
case objectResourcesMetadata.PartNumberMarker == 0: |
||||
startPartNumber = 1 |
||||
default: |
||||
startPartNumber = objectResourcesMetadata.PartNumberMarker |
||||
} |
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
|
||||
objectPath := filepath.Join(bucketPath, key) |
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600) |
||||
if err != nil { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil) |
||||
} |
||||
defer multiPartfile.Close() |
||||
|
||||
var deserializedMultipartSession MultipartSession |
||||
decoder := json.NewDecoder(multiPartfile) |
||||
err = decoder.Decode(&deserializedMultipartSession) |
||||
if err != nil { |
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil) |
||||
} |
||||
var parts []*drivers.PartMetadata |
||||
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ { |
||||
if len(parts) > objectResourcesMetadata.MaxParts { |
||||
sort.Sort(partNumber(parts)) |
||||
objectResourcesMetadata.IsTruncated = true |
||||
objectResourcesMetadata.Part = parts |
||||
objectResourcesMetadata.NextPartNumberMarker = i |
||||
return objectResourcesMetadata, nil |
||||
} |
||||
parts = append(parts, deserializedMultipartSession.Parts[i-1]) |
||||
} |
||||
sort.Sort(partNumber(parts)) |
||||
objectResourcesMetadata.Part = parts |
||||
return objectResourcesMetadata, nil |
||||
} |
||||
|
||||
func (fs *fsDriver) AbortMultipartUpload(bucket, key, uploadID string) error { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false { |
||||
return iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) |
||||
} |
||||
|
||||
if !fs.isValidUploadID(key, uploadID) { |
||||
return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) |
||||
} |
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket) |
||||
_, err := os.Stat(bucketPath) |
||||
// check bucket exists
|
||||
if os.IsNotExist(err) { |
||||
return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
if err != nil { |
||||
return iodine.New(drivers.InternalError{}, nil) |
||||
} |
||||
|
||||
objectPath := filepath.Join(bucketPath, key) |
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR, 0600) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
|
||||
var deserializedMultipartSession MultipartSession |
||||
decoder := json.NewDecoder(multiPartfile) |
||||
err = decoder.Decode(&deserializedMultipartSession) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
multiPartfile.Close() // close it right here, since we will delete it subsequently
|
||||
|
||||
delete(fs.multiparts.ActiveSession, key) |
||||
for _, part := range deserializedMultipartSession.Parts { |
||||
err = os.RemoveAll(objectPath + fmt.Sprintf("$%d", part.PartNumber)) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
} |
||||
err = os.RemoveAll(objectPath + "$multiparts") |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
return nil |
||||
} |
@ -1,295 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
|
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"errors" |
||||
|
||||
"github.com/minio/minio/pkg/iodine" |
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetPartialObject - GET object from range
|
||||
func (fs *fsDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { |
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// validate object
|
||||
if drivers.IsValidObjectName(object) == false { |
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
|
||||
objectPath := filepath.Join(fs.root, bucket, object) |
||||
filestat, err := os.Stat(objectPath) |
||||
switch err := err.(type) { |
||||
case nil: |
||||
{ |
||||
if filestat.IsDir() { |
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
} |
||||
default: |
||||
{ |
||||
if os.IsNotExist(err) { |
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
return 0, iodine.New(err, nil) |
||||
} |
||||
} |
||||
file, err := os.Open(objectPath) |
||||
if err != nil { |
||||
return 0, iodine.New(err, nil) |
||||
} |
||||
defer file.Close() |
||||
|
||||
_, err = file.Seek(start, os.SEEK_SET) |
||||
if err != nil { |
||||
return 0, iodine.New(err, nil) |
||||
} |
||||
|
||||
count, err := io.CopyN(w, file, length) |
||||
if err != nil { |
||||
return count, iodine.New(err, nil) |
||||
} |
||||
|
||||
return count, nil |
||||
} |
||||
|
||||
// GetObject - GET object from key
|
||||
func (fs *fsDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { |
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// validate object
|
||||
if drivers.IsValidObjectName(object) == false { |
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
objectPath := filepath.Join(fs.root, bucket, object) |
||||
filestat, err := os.Stat(objectPath) |
||||
switch err := err.(type) { |
||||
case nil: |
||||
{ |
||||
if filestat.IsDir() { |
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
} |
||||
default: |
||||
{ |
||||
if os.IsNotExist(err) { |
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
return 0, iodine.New(err, nil) |
||||
} |
||||
} |
||||
file, err := os.Open(objectPath) |
||||
defer file.Close() |
||||
if err != nil { |
||||
return 0, drivers.EmbedError(bucket, object, err) |
||||
} |
||||
|
||||
count, err := io.Copy(w, file) |
||||
if err != nil { |
||||
return count, iodine.New(err, nil) |
||||
} |
||||
return count, nil |
||||
} |
||||
|
||||
// GetObjectMetadata - HEAD object
|
||||
func (fs *fsDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) { |
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
if drivers.IsValidObjectName(object) == false { |
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket}, nil) |
||||
} |
||||
|
||||
// Do not use filepath.Join() since filepath.Join strips off any object names with '/', use them as is
|
||||
// in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat()
|
||||
objectPath := fs.root + "/" + bucket + "/" + object |
||||
stat, err := os.Stat(objectPath) |
||||
if os.IsNotExist(err) { |
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
|
||||
_, err = os.Stat(objectPath + "$metadata") |
||||
if os.IsNotExist(err) { |
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) |
||||
} |
||||
|
||||
file, err := os.Open(objectPath + "$metadata") |
||||
defer file.Close() |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, iodine.New(err, nil) |
||||
} |
||||
|
||||
var deserializedMetadata Metadata |
||||
decoder := json.NewDecoder(file) |
||||
err = decoder.Decode(&deserializedMetadata) |
||||
if err != nil { |
||||
return drivers.ObjectMetadata{}, iodine.New(err, nil) |
||||
} |
||||
|
||||
contentType := "application/octet-stream" |
||||
if deserializedMetadata.ContentType != "" { |
||||
contentType = deserializedMetadata.ContentType |
||||
} |
||||
contentType = strings.TrimSpace(contentType) |
||||
|
||||
etag := bucket + "#" + filepath.Base(object) |
||||
if len(deserializedMetadata.Md5sum) != 0 { |
||||
etag = hex.EncodeToString(deserializedMetadata.Md5sum) |
||||
} |
||||
|
||||
metadata := drivers.ObjectMetadata{ |
||||
Bucket: bucket, |
||||
Key: object, |
||||
Created: stat.ModTime(), |
||||
Size: stat.Size(), |
||||
Md5: etag, |
||||
ContentType: contentType, |
||||
} |
||||
|
||||
return metadata, nil |
||||
} |
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { |
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { |
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum) |
||||
if err != nil { |
||||
return iodine.New(err, nil) |
||||
} |
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) { |
||||
return iodine.New(errors.New("bad digest, md5sum mismatch"), nil) |
||||
} |
||||
return nil |
||||
} |
||||
return iodine.New(errors.New("invalid argument"), nil) |
||||
} |
||||
|
||||
// CreateObject - PUT object
|
||||
func (fs *fsDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { |
||||
fs.lock.Lock() |
||||
defer fs.lock.Unlock() |
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false { |
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(filepath.Join(fs.root, bucket)); os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) |
||||
} |
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false { |
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) |
||||
} |
||||
|
||||
// verify content type
|
||||
if contentType == "" { |
||||
contentType = "application/octet-stream" |
||||
} |
||||
contentType = strings.TrimSpace(contentType) |
||||
|
||||
// get object path
|
||||
objectPath := filepath.Join(fs.root, bucket, key) |
||||
objectDir := filepath.Dir(objectPath) |
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) { |
||||
err = os.MkdirAll(objectDir, 0700) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
} |
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) { |
||||
return "", iodine.New(drivers.ObjectExists{ |
||||
Bucket: bucket, |
||||
Object: key, |
||||
}, nil) |
||||
} |
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" { |
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) |
||||
if err != nil { |
||||
// pro-actively close the connection
|
||||
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) |
||||
} |
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) |
||||
} |
||||
|
||||
// write object
|
||||
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer file.Close() |
||||
|
||||
h := md5.New() |
||||
mw := io.MultiWriter(file, h) |
||||
|
||||
_, err = io.CopyN(mw, data, size) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
|
||||
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) |
||||
if err != nil { |
||||
return "", iodine.New(err, nil) |
||||
} |
||||
defer file.Close() |
||||
|
||||
metadata := &Metadata{ |
||||
ContentType: contentType, |
||||
Md5sum: h.Sum(nil), |
||||
} |
||||
// serialize metadata to json
|
||||
encoder := json.NewEncoder(file) |
||||
err = encoder.Encode(metadata) |
||||
|
||||
md5Sum := hex.EncodeToString(metadata.Md5sum) |
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" { |
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { |
||||
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) |
||||
} |
||||
} |
||||
return md5Sum, nil |
||||
} |
@ -1,54 +0,0 @@ |
||||
/* |
||||
* Mini Object Storage, (C) 2015 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package filesystem |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"os" |
||||
"testing" |
||||
|
||||
. "github.com/minio/check" |
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers" |
||||
) |
||||
|
||||
func Test(t *testing.T) { TestingT(t) } |
||||
|
||||
type MySuite struct{} |
||||
|
||||
var _ = Suite(&MySuite{}) |
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) { |
||||
var storageList []string |
||||
create := func() drivers.Driver { |
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-") |
||||
c.Check(err, IsNil) |
||||
storageList = append(storageList, path) |
||||
store, err := NewDriver(path) |
||||
c.Check(err, IsNil) |
||||
return store |
||||
} |
||||
drivers.APITestSuite(c, create) |
||||
defer removeRoots(c, storageList) |
||||
} |
||||
|
||||
func removeRoots(c *C, roots []string) { |
||||
for _, root := range roots { |
||||
err := os.RemoveAll(root) |
||||
c.Check(err, IsNil) |
||||
} |
||||
} |
Loading…
Reference in new issue