@ -20,8 +20,8 @@ import (
"crypto/md5"
"crypto/md5"
"encoding/hex"
"encoding/hex"
"encoding/json"
"encoding/json"
"fmt"
"io"
"io"
"path"
"path/filepath"
"path/filepath"
"strings"
"strings"
@ -29,7 +29,8 @@ import (
)
)
const (
const (
multipartMetaFile = "multipart.json"
multipartSuffix = ".minio.multipart"
multipartMetaFile = "00000" + multipartSuffix
)
)
// xlObjects - Implements fs object layer.
// xlObjects - Implements fs object layer.
@ -72,25 +73,6 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// GetObject - get an object.
// GetObject - get an object.
func ( xl xlObjects ) GetObject ( bucket , object string , startOffset int64 ) ( io . ReadCloser , error ) {
func ( xl xlObjects ) GetObject ( bucket , object string , startOffset int64 ) ( io . ReadCloser , error ) {
findPartOffset := func ( parts completedParts ) ( partIndex int , partOffset int64 , err error ) {
partOffset = startOffset
for i , part := range parts {
partIndex = i
var fileInfo FileInfo
fileInfo , err = xl . storage . StatFile ( bucket , pathJoin ( object , fmt . Sprint ( part . PartNumber ) ) )
if err != nil {
return
}
if partOffset < fileInfo . Size {
return
}
partOffset -= fileInfo . Size
}
// Offset beyond the size of the object
err = errUnexpected
return
}
// Verify if bucket is valid.
// Verify if bucket is valid.
if ! IsValidBucketName ( bucket ) {
if ! IsValidBucketName ( bucket ) {
return nil , BucketNameInvalid { Bucket : bucket }
return nil , BucketNameInvalid { Bucket : bucket }
@ -111,18 +93,18 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.Read
return nil , toObjectErr ( err , bucket , object )
return nil , toObjectErr ( err , bucket , object )
}
}
fileReader , fileWriter := io . Pipe ( )
fileReader , fileWriter := io . Pipe ( )
parts , err := xl . getParts ( bucket , object )
info , err := xl . getMultipartObjectInfo ( bucket , object )
if err != nil {
if err != nil {
return nil , toObjectErr ( err , bucket , object )
return nil , toObjectErr ( err , bucket , object )
}
}
partIndex , offset , err := findPartOffset ( parts )
partIndex , offset , err := info . GetPartNumberOffset ( startOffset )
if err != nil {
if err != nil {
return nil , toObjectErr ( err , bucket , object )
return nil , toObjectErr ( err , bucket , object )
}
}
go func ( ) {
go func ( ) {
for ; partIndex < len ( parts ) ; partIndex ++ {
for ; partIndex < len ( info ) ; partIndex ++ {
part := parts [ partIndex ]
part := info [ partIndex ]
r , err := xl . storage . ReadFile ( bucket , pathJoin ( object , fmt . Sprint ( part . PartNumber ) ) , offset )
r , err := xl . storage . ReadFile ( bucket , pathJoin ( object , partNumToPartFileName ( part . PartNumber ) ) , offset )
if err != nil {
if err != nil {
fileWriter . CloseWithError ( err )
fileWriter . CloseWithError ( err )
return
return
@ -138,38 +120,19 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.Read
}
}
// Return the parts of a multipart upload.
// Return the parts of a multipart upload.
func ( xl xlObjects ) getParts ( bucket , object string ) ( parts completedParts , err error ) {
func ( xl xlObjects ) getMultipartObjectInfo ( bucket , object string ) ( info MultipartObjectInfo , err error ) {
offset := int64 ( 0 )
offset := int64 ( 0 )
r , err := xl . storage . ReadFile ( bucket , pathJoin ( object , multipartMetaFile ) , offset )
r , err := xl . storage . ReadFile ( bucket , pathJoin ( object , multipartMetaFile ) , offset )
if err != nil {
if err != nil {
return
return
}
}
// FIXME: what if multipart.json is > 4MB
decoder := json . NewDecoder ( r )
b := make ( [ ] byte , 4 * 1024 * 1024 )
err = decoder . Decode ( & info )
n , err := io . ReadFull ( r , b )
if err != nil && err != io . ErrUnexpectedEOF {
return
}
b = b [ : n ]
err = json . Unmarshal ( b , & parts )
if err != nil {
return
}
return
return
}
}
// GetObjectInfo - get object info.
// GetObjectInfo - get object info.
func ( xl xlObjects ) GetObjectInfo ( bucket , object string ) ( ObjectInfo , error ) {
func ( xl xlObjects ) GetObjectInfo ( bucket , object string ) ( ObjectInfo , error ) {
getMultpartFileSize := func ( parts completedParts ) ( size int64 ) {
for _ , part := range parts {
fi , err := xl . storage . StatFile ( bucket , pathJoin ( object , fmt . Sprint ( part . PartNumber ) ) )
if err != nil {
continue
}
size += fi . Size
}
return size
}
// Verify if bucket is valid.
// Verify if bucket is valid.
if ! IsValidBucketName ( bucket ) {
if ! IsValidBucketName ( bucket ) {
return ObjectInfo { } , BucketNameInvalid { Bucket : bucket }
return ObjectInfo { } , BucketNameInvalid { Bucket : bucket }
@ -180,11 +143,11 @@ func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
}
}
fi , err := xl . storage . StatFile ( bucket , object )
fi , err := xl . storage . StatFile ( bucket , object )
if err != nil {
if err != nil {
parts , err := xl . getParts ( bucket , object )
info , err := xl . getMultipartObjectInfo ( bucket , object )
if err != nil {
if err != nil {
return ObjectInfo { } , toObjectErr ( err , bucket , object )
return ObjectInfo { } , toObjectErr ( err , bucket , object )
}
}
fi . Size = getMultpartFileSize ( parts )
fi . Size = info . GetSize ( )
}
}
contentType := "application/octet-stream"
contentType := "application/octet-stream"
if objectExt := filepath . Ext ( object ) ; objectExt != "" {
if objectExt := filepath . Ext ( object ) ; objectExt != "" {
@ -287,6 +250,8 @@ func (xl xlObjects) DeleteObject(bucket, object string) error {
return nil
return nil
}
}
// TODO - support non-recursive case, figure out file size for files uploaded using multipart.
func ( xl xlObjects ) ListObjects ( bucket , prefix , marker , delimiter string , maxKeys int ) ( ListObjectsInfo , error ) {
func ( xl xlObjects ) ListObjects ( bucket , prefix , marker , delimiter string , maxKeys int ) ( ListObjectsInfo , error ) {
// Verify if bucket is valid.
// Verify if bucket is valid.
if ! IsValidBucketName ( bucket ) {
if ! IsValidBucketName ( bucket ) {
@ -311,21 +276,71 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
}
}
}
}
if maxKeys == 0 {
return ListObjectsInfo { } , nil
}
// Default is recursive, if delimiter is set then list non recursive.
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
recursive := true
if delimiter == slashSeparator {
if delimiter == slashSeparator {
recursive = false
recursive = false
}
}
fileInfos , eof , err := xl . storage . ListFiles ( bucket , prefix , marker , recursive , maxKeys )
var allFileInfos , fileInfos [ ] FileInfo
if err != nil {
var eof bool
return ListObjectsInfo { } , toObjectErr ( err , bucket )
var err error
}
for {
if maxKeys == 0 {
fileInfos , eof , err = xl . storage . ListFiles ( bucket , prefix , marker , recursive , maxKeys )
return ListObjectsInfo { } , nil
if err != nil {
return ListObjectsInfo { } , toObjectErr ( err , bucket )
}
for _ , fileInfo := range fileInfos {
// FIXME: use fileInfo.Mode.IsDir() instead after fixing the bug in
// XL listing which is not reseting the Mode to 0 for leaf dirs.
if strings . HasSuffix ( fileInfo . Name , slashSeparator ) {
if isLeafDirectory ( xl . storage , bucket , fileInfo . Name ) {
fileInfo . Name = strings . TrimSuffix ( fileInfo . Name , slashSeparator )
// Set the Mode to a "regular" file.
fileInfo . Mode = 0
var info MultipartObjectInfo
info , err = xl . getMultipartObjectInfo ( bucket , fileInfo . Name )
if err != nil {
return ListObjectsInfo { } , toObjectErr ( err , bucket )
}
fileInfo . Size = info . GetSize ( )
allFileInfos = append ( allFileInfos , fileInfo )
maxKeys --
continue
}
}
if strings . HasSuffix ( fileInfo . Name , multipartMetaFile ) {
fileInfo . Name = path . Dir ( fileInfo . Name )
var info MultipartObjectInfo
info , err = xl . getMultipartObjectInfo ( bucket , fileInfo . Name )
if err != nil {
return ListObjectsInfo { } , toObjectErr ( err , bucket )
}
fileInfo . Size = info . GetSize ( )
allFileInfos = append ( allFileInfos , fileInfo )
maxKeys --
continue
}
if strings . HasSuffix ( fileInfo . Name , multipartSuffix ) {
continue
}
allFileInfos = append ( allFileInfos , fileInfo )
maxKeys --
}
if maxKeys == 0 {
break
}
if eof {
break
}
}
}
result := ListObjectsInfo { IsTruncated : ! eof }
result := ListObjectsInfo { IsTruncated : ! eof }
for _ , fileInfo := range fileInfos {
for _ , fileInfo := range allFileInfos {
// With delimiter set we fill in NextMarker and Prefixes.
// With delimiter set we fill in NextMarker and Prefixes.
if delimiter == slashSeparator {
if delimiter == slashSeparator {
result . NextMarker = fileInfo . Name
result . NextMarker = fileInfo . Name