tests: xl-v1-metadata.go, xl-v1-multipart-common.go - remove unused methods, add enhance tests to improve code coverage. (#2260)

master
Krishna Srinivas 8 years ago committed by Harshavardhana
parent a7b5b8e63f
commit 303f216150
  1. 15
      fs-v1-multipart-common.go
  2. 25
      server_test.go
  3. 42
      xl-v1-metadata.go
  4. 26
      xl-v1-multipart-common.go

@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"path"
"strings"
"time"
)
@ -29,20 +28,6 @@ func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
return err == nil
}
// listUploadsInfo - list all uploads info.
func (fs fsObjects) listUploadsInfo(prefixPath string) (uploads []uploadInfo, err error) {
splitPrefixes := strings.SplitN(prefixPath, "/", 3)
uploadIDs, err := readUploadsJSON(splitPrefixes[1], splitPrefixes[2], fs.storage)
if err != nil {
if err == errFileNotFound {
return []uploadInfo{}, nil
}
return nil, err
}
uploads = uploadIDs.Uploads
return uploads, nil
}
// Checks whether bucket exists.
func (fs fsObjects) isBucketExist(bucket string) bool {
// Check whether bucket exists.

@ -1686,6 +1686,14 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK)
objectName := "test-multipart-object"
// 1. Initiate 2 uploads for the same object
// 2. Upload 2 parts for the second upload
// 3. Abort the second upload.
// 4. Abort the first upload.
// This will test abort upload when there are more than one upload IDs
// and the case where there is only one upload ID.
// construct HTTP request to initiate a NewMultipart upload.
request, err = newTestSignedRequest("POST", getNewMultipartURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
@ -1699,6 +1707,23 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
decoder := xml.NewDecoder(response.Body)
newResponse := &InitiateMultipartUploadResponse{}
err = decoder.Decode(newResponse)
c.Assert(err, IsNil)
c.Assert(len(newResponse.UploadID) > 0, Equals, true)
// construct HTTP request to initiate a NewMultipart upload.
request, err = newTestSignedRequest("POST", getNewMultipartURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID.
decoder = xml.NewDecoder(response.Body)
newResponse = &InitiateMultipartUploadResponse{}
err = decoder.Decode(newResponse)
c.Assert(err, IsNil)
c.Assert(len(newResponse.UploadID) > 0, Equals, true)

@ -65,24 +65,6 @@ type erasureInfo struct {
Checksum []checkSumInfo `json:"checksum,omitempty"`
}
// IsValid - tells if the erasure info is sane by validating the data
// blocks, parity blocks and distribution.
func (e erasureInfo) IsValid() bool {
return e.DataBlocks != 0 && e.ParityBlocks != 0 && len(e.Distribution) != 0
}
// pickValidErasureInfo - picks one valid erasure info content and returns, from a
// slice of erasure info content. If no value is found this function panics
// and dies.
func pickValidErasureInfo(eInfos []erasureInfo) erasureInfo {
for _, eInfo := range eInfos {
if eInfo.IsValid() {
return eInfo
}
}
panic("Unable to look for valid erasure info content")
}
// statInfo - carries stat information of the object.
type statInfo struct {
Size int64 `json:"size"` // Size of the object `xl.json`.
@ -249,30 +231,6 @@ func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err
return xlMetaV1{}, err
}
// Undo rename xl metadata, renames successfully renamed `xl.json` back to source location.
func (xl xlObjects) undoRenameXLMetadata(srcBucket, srcPrefix, dstBucket, dstPrefix string, errs []error) {
var wg = &sync.WaitGroup{}
srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile)
dstJSONFile := path.Join(dstPrefix, xlMetaJSONFile)
// Undo rename `xl.json` on disks where RenameFile succeeded.
for index, disk := range xl.storageDisks {
if disk == nil {
continue
}
// Undo rename object in parallel.
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if errs[index] != nil {
return
}
_ = disk.RenameFile(dstBucket, dstJSONFile, srcBucket, srcJSONFile)
}(index, disk)
}
wg.Wait()
}
// deleteXLMetadata - deletes `xl.json` on a single disk.
func deleteXLMetdata(disk StorageAPI, bucket, prefix string) error {
jsonFile := path.Join(prefix, xlMetaJSONFile)

@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"path"
"strings"
"sync"
"time"
)
@ -198,31 +197,6 @@ func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
return false
}
// listUploadsInfo - list all uploads info.
func (xl xlObjects) listUploadsInfo(prefixPath string) (uploadsInfo []uploadInfo, err error) {
for _, disk := range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
splitPrefixes := strings.SplitN(prefixPath, "/", 3)
var uploadsJSON uploadsV1
uploadsJSON, err = readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk)
if err == nil {
uploadsInfo = uploadsJSON.Uploads
return uploadsInfo, nil
}
if err == errFileNotFound {
return []uploadInfo{}, nil
}
// For any reason disk was deleted or goes offline, continue
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
}
break
}
return []uploadInfo{}, err
}
// isUploadIDExists - verify if a given uploadID exists and is valid.
func (xl xlObjects) isUploadIDExists(bucket, object, uploadID string) bool {
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)

Loading…
Cancel
Save