xl: Rename getOrderedDisks as shuffleDisks appropriately. (#3796)

This PR is for readability cleanup

- getOrderedDisks as shuffleDisks
- getOrderedPartsMetadata as shufflePartsMetadata

Distribution is now a second argument instead being the
primary input argument for brevity.

Also change the usage of type casted int64(0), instead
rely on direct type reference as `var variable int64` everywhere.
master
Harshavardhana 8 years ago committed by GitHub
parent 25b5a0534f
commit bcc5b6e1ef
  1. 4
      cmd/erasure-readfile.go
  2. 32
      cmd/erasure-readfile_test.go
  3. 4
      cmd/erasure-utils.go
  4. 2
      cmd/fs-v1-background-append.go
  5. 4
      cmd/fs-v1-multipart.go
  6. 2
      cmd/fs-v1.go
  7. 24
      cmd/lock-instrument_test.go
  8. 2
      cmd/object-api-putobject_test.go
  9. 4
      cmd/object-handlers.go
  10. 2
      cmd/server_utils_test.go
  11. 6
      cmd/xl-v1-healing.go
  12. 9
      cmd/xl-v1-multipart.go
  13. 19
      cmd/xl-v1-object.go
  14. 32
      cmd/xl-v1-utils.go
  15. 48
      cmd/xl-v1-utils_test.go

@ -192,7 +192,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
}() }()
// Total bytes written to writer // Total bytes written to writer
bytesWritten := int64(0) var bytesWritten int64
startBlock := offset / blockSize startBlock := offset / blockSize
endBlock := (offset + length) / blockSize endBlock := (offset + length) / blockSize
@ -263,7 +263,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
} }
// Offset in enBlocks from where data should be read from. // Offset in enBlocks from where data should be read from.
enBlocksOffset := int64(0) var enBlocksOffset int64
// Total data to be read from enBlocks. // Total data to be read from enBlocks.
enBlocksLength := curBlockSize enBlocksLength := curBlockSize

@ -121,35 +121,6 @@ func testGetReadDisks(t *testing.T, xl *xlObjects) {
} }
} }
// Test getOrderedDisks which returns ordered slice of disks from their
// actual distribution.
func testGetOrderedDisks(t *testing.T, xl *xlObjects) {
disks := xl.storageDisks
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
orderedDisks := getOrderedDisks(distribution, disks)
// From the "distribution" above you can notice that:
// 1st data block is in the 9th disk (i.e distribution index 8)
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
if orderedDisks[0] != disks[8] ||
orderedDisks[1] != disks[7] ||
orderedDisks[2] != disks[9] ||
orderedDisks[3] != disks[6] ||
orderedDisks[4] != disks[10] ||
orderedDisks[5] != disks[5] ||
orderedDisks[6] != disks[11] ||
orderedDisks[7] != disks[4] ||
orderedDisks[8] != disks[12] ||
orderedDisks[9] != disks[3] ||
orderedDisks[10] != disks[13] ||
orderedDisks[11] != disks[2] ||
orderedDisks[12] != disks[14] ||
orderedDisks[13] != disks[1] ||
orderedDisks[14] != disks[15] ||
orderedDisks[15] != disks[0] {
t.Errorf("getOrderedDisks returned incorrect order.")
}
}
// Test for isSuccessDataBlocks and isSuccessDecodeBlocks. // Test for isSuccessDataBlocks and isSuccessDecodeBlocks.
func TestIsSuccessBlocks(t *testing.T) { func TestIsSuccessBlocks(t *testing.T) {
dataBlocks := 8 dataBlocks := 8
@ -217,7 +188,7 @@ func TestIsSuccessBlocks(t *testing.T) {
} }
} }
// Wrapper function for testGetReadDisks, testGetOrderedDisks. // Wrapper function for testGetReadDisks, testShuffleDisks.
func TestErasureReadUtils(t *testing.T) { func TestErasureReadUtils(t *testing.T) {
nDisks := 16 nDisks := 16
disks, err := getRandomDisks(nDisks) disks, err := getRandomDisks(nDisks)
@ -236,7 +207,6 @@ func TestErasureReadUtils(t *testing.T) {
defer removeRoots(disks) defer removeRoots(disks)
xl := objLayer.(*xlObjects) xl := objLayer.(*xlObjects)
testGetReadDisks(t, xl) testGetReadDisks(t, xl)
testGetOrderedDisks(t, xl)
} }
// Simulates a faulty disk for ReadFile() // Simulates a faulty disk for ReadFile()

@ -116,7 +116,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
write := length write := length
// Counter to increment total written. // Counter to increment total written.
totalWritten := int64(0) var totalWritten int64
// Write all data blocks to dst. // Write all data blocks to dst.
for _, block := range enBlocks[:dataBlocks] { for _, block := range enBlocks[:dataBlocks] {
@ -180,7 +180,7 @@ func copyBuffer(writer io.Writer, disk StorageAPI, volume string, path string, b
} }
// Starting offset for Reading the file. // Starting offset for Reading the file.
startOffset := int64(0) var startOffset int64
// Read until io.EOF. // Read until io.EOF.
for { for {

@ -210,7 +210,7 @@ func (fs fsObjects) appendParts(bucket, object, uploadID string, info bgAppendPa
func (fs fsObjects) appendPart(bucket, object, uploadID string, part objectPartInfo, buf []byte) error { func (fs fsObjects) appendPart(bucket, object, uploadID string, part objectPartInfo, buf []byte) error {
partPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadID, part.Name) partPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadID, part.Name)
offset := int64(0) var offset int64
// Read each file part to start writing to the temporary concatenated object. // Read each file part to start writing to the temporary concatenated object.
file, size, err := fsOpenFile(partPath, offset) file, size, err := fsOpenFile(partPath, offset)
if err != nil { if err != nil {

@ -463,7 +463,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
startOffset := int64(0) // Read the whole file. var startOffset int64 // Read the whole file.
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject) errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
pipeWriter.CloseWithError(gerr) pipeWriter.CloseWithError(gerr)
@ -864,7 +864,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix) multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
var reader io.ReadCloser var reader io.ReadCloser
offset := int64(0) var offset int64
reader, _, err = fsOpenFile(multipartPartFile, offset) reader, _, err = fsOpenFile(multipartPartFile, offset)
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)

@ -395,7 +395,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
startOffset := int64(0) // Read the whole file. var startOffset int64 // Read the whole file.
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject) errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
pipeWriter.CloseWithError(gerr) pipeWriter.CloseWithError(gerr)

@ -653,14 +653,14 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
} else { } else {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path) t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
} }
if globalNSMutex.counters.granted != int64(0) { if globalNSMutex.counters.granted != 0 {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted) t.Errorf("Expected the count of total running locks to be %v, but got %v", 0, globalNSMutex.counters.granted)
} }
if globalNSMutex.counters.blocked != int64(0) { if globalNSMutex.counters.blocked != 0 {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked) t.Errorf("Expected the count of total blocked locks to be %v, but got %v", 0, globalNSMutex.counters.blocked)
} }
if globalNSMutex.counters.total != int64(0) { if globalNSMutex.counters.total != 0 {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total) t.Errorf("Expected the count of all locks to be %v, but got %v", 0, globalNSMutex.counters.total)
} }
} }
@ -723,13 +723,13 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path) t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path)
} }
// The lock count values should be 0. // The lock count values should be 0.
if globalNSMutex.counters.granted != int64(0) { if globalNSMutex.counters.granted != 0 {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted) t.Errorf("Expected the count of total running locks to be %v, but got %v", 0, globalNSMutex.counters.granted)
} }
if globalNSMutex.counters.blocked != int64(0) { if globalNSMutex.counters.blocked != 0 {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked) t.Errorf("Expected the count of total blocked locks to be %v, but got %v", 0, globalNSMutex.counters.blocked)
} }
if globalNSMutex.counters.total != int64(0) { if globalNSMutex.counters.total != 0 {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total) t.Errorf("Expected the count of all locks to be %v, but got %v", 0, globalNSMutex.counters.total)
} }
} }

@ -146,7 +146,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
// data with size different from the actual number of bytes available in the reader // data with size different from the actual number of bytes available in the reader
{bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil}, {bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil},
{bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{}}, {bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{}},
{bucket, object, fiveMBBytes, nil, "", int64(0), getMD5Hash(fiveMBBytes), nil}, {bucket, object, fiveMBBytes, nil, "", 0, getMD5Hash(fiveMBBytes), nil},
// Test case 30 // Test case 30
// valid data with X-Amz-Meta- meta // valid data with X-Amz-Meta- meta

@ -133,7 +133,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
} }
// Get the object. // Get the object.
startOffset := int64(0) var startOffset int64
length := objInfo.Size length := objInfo.Size
if hrange != nil { if hrange != nil {
startOffset = hrange.offsetBegin startOffset = hrange.offsetBegin
@ -620,7 +620,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
} }
// Get the object. // Get the object.
startOffset := int64(0) var startOffset int64
length := objInfo.Size length := objInfo.Size
if hrange != nil { if hrange != nil {
startOffset = hrange.offsetBegin startOffset = hrange.offsetBegin

@ -91,7 +91,7 @@ func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
} }
chunksCount := int64(dataLen / chunkSize) chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize) remainingBytes := int64(dataLen % chunkSize)
streamLen := int64(0) var streamLen int64
streamLen += chunksCount * calculateSignedChunkLength(chunkSize) streamLen += chunksCount * calculateSignedChunkLength(chunkSize)
if remainingBytes > 0 { if remainingBytes > 0 {
streamLen += calculateSignedChunkLength(remainingBytes) streamLen += calculateSignedChunkLength(remainingBytes)

@ -371,9 +371,9 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum
} }
// Reorder so that we have data disks first and parity disks next. // Reorder so that we have data disks first and parity disks next.
latestDisks = getOrderedDisks(latestMeta.Erasure.Distribution, latestDisks) latestDisks = shuffleDisks(latestDisks, latestMeta.Erasure.Distribution)
outDatedDisks = getOrderedDisks(latestMeta.Erasure.Distribution, outDatedDisks) outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
partsMetadata = getOrderedPartsMetadata(latestMeta.Erasure.Distribution, partsMetadata) partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
// We write at temporary location and then rename to fianal location. // We write at temporary location and then rename to fianal location.
tmpID := mustGetUUID() tmpID := mustGetUUID()

@ -544,7 +544,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
startOffset := int64(0) // Read the whole file. var startOffset int64 // Read the whole file.
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject) errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject)) pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
@ -607,8 +607,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
return PartInfo{}, err return PartInfo{}, err
} }
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
_ = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
// Need a unique name for the part being written in minioMetaBucket to // Need a unique name for the part being written in minioMetaBucket to
// accommodate concurrent PutObjectPart requests // accommodate concurrent PutObjectPart requests
@ -919,10 +918,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
} }
// Order online disks in accordance with distribution order. // Order online disks in accordance with distribution order.
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Order parts metadata in accordance with distribution order. // Order parts metadata in accordance with distribution order.
partsMetadata = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata) partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution)
// Save current xl meta for validation. // Save current xl meta for validation.
var currentXLMeta = xlMeta var currentXLMeta = xlMeta

@ -58,7 +58,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
} }
// Reorder online disks based on erasure distribution order. // Reorder online disks based on erasure distribution order.
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Length of the file to read. // Length of the file to read.
length := xlMeta.Stat.Size length := xlMeta.Stat.Size
@ -106,7 +106,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
startOffset := int64(0) // Read the whole file. var startOffset int64 // Read the whole file.
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject) errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject)) pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
@ -163,10 +163,10 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
} }
// Reorder online disks based on erasure distribution order. // Reorder online disks based on erasure distribution order.
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Reorder parts metadata based on erasure distribution order. // Reorder parts metadata based on erasure distribution order.
metaArr = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, metaArr) metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution)
// For negative length read everything. // For negative length read everything.
if length < 0 { if length < 0 {
@ -242,7 +242,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
} }
} }
totalBytesRead := int64(0) var totalBytesRead int64
chunkSize := getChunkSize(xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks) chunkSize := getChunkSize(xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
pool := bpool.NewBytePool(chunkSize, len(onlineDisks)) pool := bpool.NewBytePool(chunkSize, len(onlineDisks))
@ -525,7 +525,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
} }
// Order disks according to erasure distribution // Order disks according to erasure distribution
onlineDisks := getOrderedDisks(partsMetadata[0].Erasure.Distribution, xl.storageDisks) onlineDisks := shuffleDisks(xl.storageDisks, partsMetadata[0].Erasure.Distribution)
// Delete temporary object in the event of failure. // Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary // If PutObject succeeded there would be no temporary
@ -533,7 +533,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
defer xl.deleteObject(minioMetaTmpBucket, tempObj) defer xl.deleteObject(minioMetaTmpBucket, tempObj)
// Total size of the written object // Total size of the written object
sizeWritten := int64(0) var sizeWritten int64
// Read data and split into parts - similar to multipart mechanism // Read data and split into parts - similar to multipart mechanism
for partIdx := 1; ; partIdx++ { for partIdx := 1; ; partIdx++ {
@ -550,9 +550,10 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
// Prepare file for eventual optimization in the disk // Hint the filesystem to pre-allocate one continuous large block.
// This is only an optimization.
if curPartSize > 0 { if curPartSize > 0 {
// Calculate the real size of the part in the disk and prepare it for eventual optimization // Calculate the real size of the part in the disk.
actualSize := xl.sizeOnDisk(curPartSize, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks) actualSize := xl.sizeOnDisk(curPartSize, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
for _, disk := range onlineDisks { for _, disk := range onlineDisks {
if disk != nil { if disk != nil {

@ -306,26 +306,34 @@ func readAllXLMetadata(disks []StorageAPI, bucket, object string) ([]xlMetaV1, [
return metadataArray, errs return metadataArray, errs
} }
// Return ordered partsMetadata depending on distribution. // Return shuffled partsMetadata depending on distribution.
func getOrderedPartsMetadata(distribution []int, partsMetadata []xlMetaV1) (orderedPartsMetadata []xlMetaV1) { func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) {
orderedPartsMetadata = make([]xlMetaV1, len(partsMetadata)) if distribution == nil {
return partsMetadata
}
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata))
// Shuffle slice xl metadata for expected distribution.
for index := range partsMetadata { for index := range partsMetadata {
blockIndex := distribution[index] blockIndex := distribution[index]
orderedPartsMetadata[blockIndex-1] = partsMetadata[index] shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
} }
return orderedPartsMetadata return shuffledPartsMetadata
} }
// getOrderedDisks - get ordered disks from erasure distribution. // shuffleDisks - shuffle input disks slice depending on the
// returns ordered slice of disks from their actual distribution. // erasure distribution. return shuffled slice of disks with
func getOrderedDisks(distribution []int, disks []StorageAPI) (orderedDisks []StorageAPI) { // their expected distribution.
orderedDisks = make([]StorageAPI, len(disks)) func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) {
// From disks gets ordered disks. if distribution == nil {
return disks
}
shuffledDisks = make([]StorageAPI, len(disks))
// Shuffle disks for expected distribution.
for index := range disks { for index := range disks {
blockIndex := distribution[index] blockIndex := distribution[index]
orderedDisks[blockIndex-1] = disks[index] shuffledDisks[blockIndex-1] = disks[index]
} }
return orderedDisks return shuffledDisks
} }
// Errors specifically generated by getPartSizeFromIdx function. // Errors specifically generated by getPartSizeFromIdx function.

@ -383,3 +383,51 @@ func TestGetPartSizeFromIdx(t *testing.T) {
} }
} }
} }
func TestShuffleDisks(t *testing.T) {
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
endpoints, err := parseStorageEndpoints(disks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(endpoints)
if err != nil {
removeRoots(disks)
t.Fatal(err)
}
defer removeRoots(disks)
xl := objLayer.(*xlObjects)
testShuffleDisks(t, xl)
}
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
func testShuffleDisks(t *testing.T, xl *xlObjects) {
disks := xl.storageDisks
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
shuffledDisks := shuffleDisks(disks, distribution)
// From the "distribution" above you can notice that:
// 1st data block is in the 9th disk (i.e distribution index 8)
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
if shuffledDisks[0] != disks[8] ||
shuffledDisks[1] != disks[7] ||
shuffledDisks[2] != disks[9] ||
shuffledDisks[3] != disks[6] ||
shuffledDisks[4] != disks[10] ||
shuffledDisks[5] != disks[5] ||
shuffledDisks[6] != disks[11] ||
shuffledDisks[7] != disks[4] ||
shuffledDisks[8] != disks[12] ||
shuffledDisks[9] != disks[3] ||
shuffledDisks[10] != disks[13] ||
shuffledDisks[11] != disks[2] ||
shuffledDisks[12] != disks[14] ||
shuffledDisks[13] != disks[1] ||
shuffledDisks[14] != disks[15] ||
shuffledDisks[15] != disks[0] {
t.Errorf("shuffleDisks returned incorrect order.")
}
}

Loading…
Cancel
Save