@ -17,7 +17,9 @@
package cmd
import (
"bytes"
"errors"
"path"
"strconv"
"testing"
"time"
@ -25,6 +27,162 @@ import (
humanize "github.com/dustin/go-humanize"
)
// Tests for reading XL object info.
func TestXLReadStat ( t * testing . T ) {
ExecObjectLayerDiskAlteredTest ( t , testXLReadStat )
}
func testXLReadStat ( obj ObjectLayer , instanceType string , disks [ ] string , t * testing . T ) {
// Setup for the tests.
bucketName := getRandomBucketName ( )
objectName := "test-object"
// create bucket.
err := obj . MakeBucket ( bucketName )
// Stop the test if creation of the bucket fails.
if err != nil {
t . Fatalf ( "%s : %s" , instanceType , err . Error ( ) )
}
// set of byte data for PutObject.
// object has to be created before running tests for GetObject.
// this is required even to assert the GetObject data,
// since dataInserted === dataFetched back is a primary criteria for any object storage this assertion is critical.
bytesData := [ ] struct {
byteData [ ] byte
} {
{ generateBytesData ( 6 * humanize . MiByte ) } ,
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := [ ] struct {
bucketName string
objectName string
contentLength int64
textData [ ] byte
metaData map [ string ] string
} {
// case - 1.
{ bucketName , objectName , int64 ( len ( bytesData [ 0 ] . byteData ) ) , bytesData [ 0 ] . byteData , make ( map [ string ] string ) } ,
}
sha256sum := ""
// iterate through the above set of inputs and upkoad the object.
for i , input := range putObjectInputs {
// uploading the object.
_ , err = obj . PutObject ( input . bucketName , input . objectName , input . contentLength , bytes . NewBuffer ( input . textData ) , input . metaData , sha256sum )
// if object upload fails stop the test.
if err != nil {
t . Fatalf ( "Put Object case %d: Error uploading object: <ERROR> %v" , i + 1 , err )
}
}
_ , _ , err = obj . ( * xlObjects ) . readXLMetaStat ( bucketName , objectName )
if err != nil {
t . Fatal ( err )
}
// Remove one disk.
removeDiskN ( disks , 7 )
// Removing disk shouldn't affect reading object info.
_ , _ , err = obj . ( * xlObjects ) . readXLMetaStat ( bucketName , objectName )
if err != nil {
t . Fatal ( err )
}
for _ , disk := range disks {
removeAll ( path . Join ( disk , bucketName ) )
}
_ , _ , err = obj . ( * xlObjects ) . readXLMetaStat ( bucketName , objectName )
if errorCause ( err ) != errVolumeNotFound {
t . Fatal ( err )
}
}
// Tests for reading XL meta parts.
func TestXLReadMetaParts ( t * testing . T ) {
ExecObjectLayerDiskAlteredTest ( t , testXLReadMetaParts )
}
// testListObjectParts - Tests validate listing of object parts when disks go offline.
func testXLReadMetaParts ( obj ObjectLayer , instanceType string , disks [ ] string , t * testing . T ) {
bucketNames := [ ] string { "minio-bucket" , "minio-2-bucket" }
objectNames := [ ] string { "minio-object-1.txt" }
uploadIDs := [ ] string { }
// bucketnames[0].
// objectNames[0].
// uploadIds [0].
// Create bucket before intiating NewMultipartUpload.
err := obj . MakeBucket ( bucketNames [ 0 ] )
if err != nil {
// Failed to create newbucket, abort.
t . Fatalf ( "%s : %s" , instanceType , err . Error ( ) )
}
// Initiate Multipart Upload on the above created bucket.
uploadID , err := obj . NewMultipartUpload ( bucketNames [ 0 ] , objectNames [ 0 ] , nil )
if err != nil {
// Failed to create NewMultipartUpload, abort.
t . Fatalf ( "%s : %s" , instanceType , err . Error ( ) )
}
uploadIDs = append ( uploadIDs , uploadID )
// Create multipart parts.
// Need parts to be uploaded before MultipartLists can be called and tested.
createPartCases := [ ] struct {
bucketName string
objName string
uploadID string
PartID int
inputReaderData string
inputMd5 string
intputDataSize int64
expectedMd5 string
} {
// Case 1-4.
// Creating sequence of parts for same uploadID.
// Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID.
{ bucketNames [ 0 ] , objectNames [ 0 ] , uploadIDs [ 0 ] , 1 , "abcd" , "e2fc714c4727ee9395f324cd2e7f331f" , int64 ( len ( "abcd" ) ) , "e2fc714c4727ee9395f324cd2e7f331f" } ,
{ bucketNames [ 0 ] , objectNames [ 0 ] , uploadIDs [ 0 ] , 2 , "efgh" , "1f7690ebdd9b4caf8fab49ca1757bf27" , int64 ( len ( "efgh" ) ) , "1f7690ebdd9b4caf8fab49ca1757bf27" } ,
{ bucketNames [ 0 ] , objectNames [ 0 ] , uploadIDs [ 0 ] , 3 , "ijkl" , "09a0877d04abf8759f99adec02baf579" , int64 ( len ( "abcd" ) ) , "09a0877d04abf8759f99adec02baf579" } ,
{ bucketNames [ 0 ] , objectNames [ 0 ] , uploadIDs [ 0 ] , 4 , "mnop" , "e132e96a5ddad6da8b07bba6f6131fef" , int64 ( len ( "abcd" ) ) , "e132e96a5ddad6da8b07bba6f6131fef" } ,
}
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _ , testCase := range createPartCases {
_ , perr := obj . PutObjectPart ( testCase . bucketName , testCase . objName , testCase . uploadID , testCase . PartID , testCase . intputDataSize , bytes . NewBufferString ( testCase . inputReaderData ) , testCase . inputMd5 , sha256sum )
if perr != nil {
t . Fatalf ( "%s : %s" , instanceType , perr )
}
}
uploadIDPath := path . Join ( bucketNames [ 0 ] , objectNames [ 0 ] , uploadIDs [ 0 ] )
_ , err = obj . ( * xlObjects ) . readXLMetaParts ( minioMetaMultipartBucket , uploadIDPath )
if err != nil {
t . Fatal ( err )
}
// Remove one disk.
removeDiskN ( disks , 7 )
// Removing disk shouldn't affect reading object parts info.
_ , err = obj . ( * xlObjects ) . readXLMetaParts ( minioMetaMultipartBucket , uploadIDPath )
if err != nil {
t . Fatal ( err )
}
for _ , disk := range disks {
removeAll ( path . Join ( disk , bucketNames [ 0 ] ) )
removeAll ( path . Join ( disk , minioMetaMultipartBucket , bucketNames [ 0 ] ) )
}
_ , err = obj . ( * xlObjects ) . readXLMetaParts ( minioMetaMultipartBucket , uploadIDPath )
if errorCause ( err ) != errFileNotFound {
t . Fatal ( err )
}
}
// Test xlMetaV1.AddObjectPart()
func TestAddObjectPart ( t * testing . T ) {
testCases := [ ] struct {