@ -39,19 +39,19 @@ const configDir = ".minio"
const uploadIDSuffix = ".uploadid"
func removeFileTree ( fileName string , level string ) error {
if err := os . Remove ( fileName ) ; err != nil {
return err
if e := os . Remove ( fileName ) ; e != nil {
return e
}
for fileDir := filepath . Dir ( fileName ) ; fileDir > level ; fileDir = filepath . Dir ( fileDir ) {
if status , err := isDirEmpty ( fileDir ) ; err != nil {
return err
if status , e := isDirEmpty ( fileDir ) ; e != nil {
return e
} else if ! status {
break
}
if err := os . Remove ( fileDir ) ; err != nil {
return err
if e := os . Remove ( fileDir ) ; e != nil {
return e
}
}
@ -59,17 +59,17 @@ func removeFileTree(fileName string, level string) error {
}
func safeWrite ( fileName string , data io . Reader , size int64 , md5sum string ) error {
tempFile , err := ioutil . TempFile ( filepath . Dir ( fileName ) , filepath . Base ( fileName ) + "-" )
if err != nil {
return err
tempFile , e := ioutil . TempFile ( filepath . Dir ( fileName ) , filepath . Base ( fileName ) + "-" )
if e != nil {
return e
}
md5Hasher := md5 . New ( )
multiWriter := io . MultiWriter ( md5Hasher , tempFile )
if _ , err := io . CopyN ( multiWriter , data , size ) ; err != nil {
if _ , e := io . CopyN ( multiWriter , data , size ) ; e != nil {
tempFile . Close ( )
os . Remove ( tempFile . Name ( ) )
return err
return e
}
tempFile . Close ( )
@ -79,22 +79,22 @@ func safeWrite(fileName string, data io.Reader, size int64, md5sum string) error
return BadDigest { ExpectedMD5 : md5sum , CalculatedMD5 : dataMd5sum }
}
if err := os . Rename ( tempFile . Name ( ) , fileName ) ; err != nil {
if e := os . Rename ( tempFile . Name ( ) , fileName ) ; e != nil {
os . Remove ( tempFile . Name ( ) )
return err
return e
}
return nil
}
func isFileExist ( filename string ) ( bool , error ) {
fi , err := os . Lstat ( filename )
if err != nil {
if os . IsNotExist ( err ) {
fi , e := os . Lstat ( filename )
if e != nil {
if os . IsNotExist ( e ) {
return false , nil
}
return false , err
return false , e
}
return fi . Mode ( ) . IsRegular ( ) , nil
@ -120,30 +120,30 @@ func (fs Filesystem) newUploadID(bucket, object string) (string, error) {
metaObjectDir := filepath . Join ( fs . path , configDir , bucket , object )
// create metaObjectDir if not exist
if status , err := isDirExist ( metaObjectDir ) ; err != nil {
return "" , err
if status , e := isDirExist ( metaObjectDir ) ; e != nil {
return "" , e
} else if ! status {
if err := os . MkdirAll ( metaObjectDir , 0755 ) ; err != nil {
return "" , err
if e := os . MkdirAll ( metaObjectDir , 0755 ) ; e != nil {
return "" , e
}
}
for {
uuid , err := uuid . New ( )
if err != nil {
return "" , err
uuid , e := uuid . New ( )
if e != nil {
return "" , e
}
uploadID := uuid . String ( )
uploadIDFile := filepath . Join ( metaObjectDir , uploadID + uploadIDSuffix )
if _ , err := os . Lstat ( uploadIDFile ) ; err != nil {
if ! os . IsNotExist ( err ) {
return "" , err
if _ , e := os . Lstat ( uploadIDFile ) ; e != nil {
if ! os . IsNotExist ( e ) {
return "" , e
}
// uploadIDFile doesn't exist, so create empty file to reserve the name
if err := ioutil . WriteFile ( uploadIDFile , [ ] byte { } , 0644 ) ; err != nil {
return "" , err
if e := ioutil . WriteFile ( uploadIDFile , [ ] byte { } , 0644 ) ; e != nil {
return "" , e
}
return uploadID , nil
@ -161,32 +161,32 @@ func (fs Filesystem) cleanupUploadID(bucket, object, uploadID string) error {
metaObjectDir := filepath . Join ( fs . path , configDir , bucket , object )
uploadIDPrefix := uploadID + "."
names , err := filteredReaddirnames ( metaObjectDir ,
names , e := filteredReaddirnames ( metaObjectDir ,
func ( name string ) bool {
return strings . HasPrefix ( name , uploadIDPrefix )
} ,
)
if err != nil {
return err
if e != nil {
return e
}
for _ , name := range names {
if err := os . Remove ( filepath . Join ( metaObjectDir , name ) ) ; err != nil {
if e := os . Remove ( filepath . Join ( metaObjectDir , name ) ) ; e != nil {
//return InternalError{Err: err}
return err
return e
}
}
if status , err := isDirEmpty ( metaObjectDir ) ; err != nil {
if status , e := isDirEmpty ( metaObjectDir ) ; e != nil {
// TODO: add log than returning error
//return InternalError{Err: err}
return err
return e
} else if status {
if err := removeFileTree ( metaObjectDir , filepath . Join ( fs . path , configDir , bucket ) ) ; err != nil {
if e := removeFileTree ( metaObjectDir , filepath . Join ( fs . path , configDir , bucket ) ) ; e != nil {
// TODO: add log than returning error
//return InternalError{Err: err}
return err
return e
}
}
@ -199,9 +199,9 @@ func (fs Filesystem) checkBucketArg(bucket string) (string, error) {
}
bucket = getActualBucketname ( fs . path , bucket )
if status , err := isDirExist ( filepath . Join ( fs . path , bucket ) ) ; err != nil {
if status , e := isDirExist ( filepath . Join ( fs . path , bucket ) ) ; e != nil {
//return "", InternalError{Err: err}
return "" , err
return "" , e
} else if ! status {
return "" , BucketNotFound { Bucket : bucket }
}
@ -210,13 +210,12 @@ func (fs Filesystem) checkBucketArg(bucket string) (string, error) {
}
func ( fs Filesystem ) checkDiskFree ( ) error {
di , err := disk . GetInfo ( fs . path )
if err != nil {
return err
di , e := disk . GetInfo ( fs . path )
if e != nil {
return e
}
// Remove 5% from total space for cumulative disk space used for
// journalling, inodes etc.
// Remove 5% from total space for cumulative disk space used for journalling, inodes etc.
availableDiskSpace := ( float64 ( di . Free ) / ( float64 ( di . Total ) - ( 0.05 * float64 ( di . Total ) ) ) ) * 100
if int64 ( availableDiskSpace ) <= fs . minFreeDisk {
return RootPathFull { Path : fs . path }
@ -226,9 +225,9 @@ func (fs Filesystem) checkDiskFree() error {
}
func ( fs Filesystem ) checkMultipartArgs ( bucket , object string ) ( string , error ) {
bucket , err := fs . checkBucketArg ( bucket )
if err != nil {
return "" , err
bucket , e := fs . checkBucketArg ( bucket )
if e != nil {
return "" , e
}
if ! IsValidObjectName ( object ) {
@ -240,19 +239,19 @@ func (fs Filesystem) checkMultipartArgs(bucket, object string) (string, error) {
// NewMultipartUpload - initiate a new multipart session
func ( fs Filesystem ) NewMultipartUpload ( bucket , object string ) ( string , * probe . Error ) {
if bucketDirName , err := fs . checkMultipartArgs ( bucket , object ) ; err == nil {
if bucketDirName , e := fs . checkMultipartArgs ( bucket , object ) ; e == nil {
bucket = bucketDirName
} else {
return "" , probe . NewError ( err )
return "" , probe . NewError ( e )
}
if err := fs . checkDiskFree ( ) ; err != nil {
return "" , probe . NewError ( err )
if e := fs . checkDiskFree ( ) ; e != nil {
return "" , probe . NewError ( e )
}
uploadID , err := fs . newUploadID ( bucket , object )
if err != nil {
return "" , probe . NewError ( err )
uploadID , e := fs . newUploadID ( bucket , object )
if e != nil {
return "" , probe . NewError ( e )
}
return uploadID , nil
@ -260,15 +259,15 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E
// PutObjectPart - create a part in a multipart session
func ( fs Filesystem ) PutObjectPart ( bucket , object , uploadID string , partNumber int , size int64 , data io . Reader , md5Hex string ) ( string , * probe . Error ) {
if bucketDirName , err := fs . checkMultipartArgs ( bucket , object ) ; err == nil {
if bucketDirName , e := fs . checkMultipartArgs ( bucket , object ) ; e == nil {
bucket = bucketDirName
} else {
return "" , probe . NewError ( err )
return "" , probe . NewError ( e )
}
if status , err := fs . isUploadIDExist ( bucket , object , uploadID ) ; err != nil {
if status , e := fs . isUploadIDExist ( bucket , object , uploadID ) ; e != nil {
//return "", probe.NewError(InternalError{Err: err})
return "" , probe . NewError ( err )
return "" , probe . NewError ( e )
} else if ! status {
return "" , probe . NewError ( InvalidUploadID { UploadID : uploadID } )
}
@ -282,13 +281,13 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partNumber i
return "" , probe . NewError ( errors . New ( "invalid part id, should be not more than 10000" ) )
}
if err := fs . checkDiskFree ( ) ; err != nil {
return "" , probe . NewError ( err )
if e := fs . checkDiskFree ( ) ; e != nil {
return "" , probe . NewError ( e )
}
partFile := filepath . Join ( fs . path , configDir , bucket , object , uploadID + "." + strconv . Itoa ( partNumber ) + "." + md5Hex )
if err := safeWrite ( partFile , data , size , md5Hex ) ; err != nil {
return "" , probe . NewError ( err )
if e := safeWrite ( partFile , data , size , md5Hex ) ; e != nil {
return "" , probe . NewError ( e )
}
return md5Hex , nil
@ -296,21 +295,21 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partNumber i
// AbortMultipartUpload - abort an incomplete multipart session
func ( fs Filesystem ) AbortMultipartUpload ( bucket , object , uploadID string ) * probe . Error {
if bucketDirName , err := fs . checkMultipartArgs ( bucket , object ) ; err == nil {
if bucketDirName , e := fs . checkMultipartArgs ( bucket , object ) ; e == nil {
bucket = bucketDirName
} else {
return probe . NewError ( err )
return probe . NewError ( e )
}
if status , err := fs . isUploadIDExist ( bucket , object , uploadID ) ; err != nil {
if status , e := fs . isUploadIDExist ( bucket , object , uploadID ) ; e != nil {
//return probe.NewError(InternalError{Err: err})
return probe . NewError ( err )
return probe . NewError ( e )
} else if ! status {
return probe . NewError ( InvalidUploadID { UploadID : uploadID } )
}
if err := fs . cleanupUploadID ( bucket , object , uploadID ) ; err != nil {
return probe . NewError ( err )
if e := fs . cleanupUploadID ( bucket , object , uploadID ) ; e != nil {
return probe . NewError ( e )
}
return nil
@ -318,49 +317,48 @@ func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *prob
// CompleteMultipartUpload - complete a multipart upload and persist the data
func ( fs Filesystem ) CompleteMultipartUpload ( bucket , object , uploadID string , parts [ ] completePart ) ( ObjectInfo , * probe . Error ) {
if bucketDirName , err := fs . checkMultipartArgs ( bucket , object ) ; err == nil {
if bucketDirName , e := fs . checkMultipartArgs ( bucket , object ) ; e == nil {
bucket = bucketDirName
} else {
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
if status , err := fs . isUploadIDExist ( bucket , object , uploadID ) ; err != nil {
if status , e := fs . isUploadIDExist ( bucket , object , uploadID ) ; e != nil {
//return probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
} else if ! status {
return ObjectInfo { } , probe . NewError ( InvalidUploadID { UploadID : uploadID } )
}
if err := fs . checkDiskFree ( ) ; err != nil {
return ObjectInfo { } , probe . NewError ( err )
if e := fs . checkDiskFree ( ) ; e != nil {
return ObjectInfo { } , probe . NewError ( e )
}
metaObjectDir := filepath . Join ( fs . path , configDir , bucket , object )
var md5s ums [ ] string
var md5S ums [ ] string
for _ , part := range parts {
partNumber := part . PartNumber
md5sum := strings . Trim ( part . ETag , "\"" )
partFile := filepath . Join ( metaObjectDir , uploadID + "." + strconv . Itoa ( partNumber ) + "." + md5sum )
if status , err := isFileExist ( partFile ) ; err != nil {
//return ObjectInfo{}, probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
} else if ! status {
return ObjectInfo { } , probe . NewError ( InvalidPart { } )
}
md5s ums = append ( md5s ums , md5sum )
md5S ums = append ( md5S ums , md5sum )
}
// Save the s3 md5.
s3MD5 , p err := makeS3MD5 ( md5s ums ... )
if p err != nil {
return ObjectInfo { } , p err
s3MD5 , err := makeS3MD5 ( md5S ums ... )
if err != nil {
return ObjectInfo { } , err . Trace ( md5Sums ... )
}
tempFile , err := ioutil . TempFile ( metaObjectDir , uploadID + ".complete." )
if err != nil {
tempFile , e := ioutil . TempFile ( metaObjectDir , uploadID + ".complete." )
if e != nil {
//return ObjectInfo{}, probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
for _ , part := range parts {
@ -368,38 +366,38 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, pa
md5sum := strings . Trim ( part . ETag , "\"" )
partFile := filepath . Join ( metaObjectDir , uploadID + "." + strconv . Itoa ( partNumber ) + "." + md5sum )
var f * os . File
f , err = os . Open ( partFile )
if err != nil {
f , e = os . Open ( partFile )
if e != nil {
tempFile . Close ( )
os . Remove ( tempFile . Name ( ) )
//return ObjectInfo{}, probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
} else if _ , err = io . Copy ( tempFile , f ) ; err != nil {
return ObjectInfo { } , probe . NewError ( e )
} else if _ , e = io . Copy ( tempFile , f ) ; e != nil {
tempFile . Close ( )
os . Remove ( tempFile . Name ( ) )
//return ObjectInfo{}, probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
f . Close ( )
}
tempFile . Close ( )
// fi is used later
fi , err := os . Stat ( tempFile . Name ( ) )
if err != nil {
fi , e := os . Stat ( tempFile . Name ( ) )
if e != nil {
os . Remove ( tempFile . Name ( ) )
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
bucketPath := filepath . Join ( fs . path , bucket )
objectPath := filepath . Join ( bucketPath , object )
if err = os . MkdirAll ( filepath . Dir ( objectPath ) , 0755 ) ; err != nil {
if e = os . MkdirAll ( filepath . Dir ( objectPath ) , 0755 ) ; e != nil {
os . Remove ( tempFile . Name ( ) )
//return ObjectInfo{}, probe.NewError(InternalError{Err: err})
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
if err = os . Rename ( tempFile . Name ( ) , objectPath ) ; err != nil {
if e = os . Rename ( tempFile . Name ( ) , objectPath ) ; e != nil {
os . Remove ( tempFile . Name ( ) )
return ObjectInfo { } , probe . NewError ( err )
return ObjectInfo { } , probe . NewError ( e )
}
fs . cleanupUploadID ( bucket , object , uploadID ) // TODO: handle and log the error