@ -43,6 +43,9 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
// Get block info for given offset, length and block size.
startBlock , bytesToSkip , endBlock := getBlockInfo ( offset , length , eInfo . BlockSize )
// Data chunk size on each block.
chunkSize := eInfo . BlockSize / int64 ( eInfo . DataBlocks )
for block := startBlock ; block <= endBlock ; block ++ {
// Allocate encoded blocks up to storage disks.
enBlocks := make ( [ ] [ ] byte , len ( disks ) )
@ -52,7 +55,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
var noReconstruct bool // Set for no reconstruction.
// Keep how many bytes are read for this block.
// In most cases, last block in the file is shorter than eInfo.BlockSize.
// In most cases, last block in the file is shorter than chunkSize
lastReadSize := int64 ( 0 )
// Read from all the disks.
@ -66,14 +69,14 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
}
// Initialize chunk slice and fill the data from each parts.
enBlocks [ blockIndex ] = make ( [ ] byte , eInfo . Bloc kSize)
enBlocks [ blockIndex ] = make ( [ ] byte , chun kSize)
// Read the necessary blocks.
n , err := disk . ReadFile ( volume , path , block * eInfo . Bloc kSize, enBlocks [ blockIndex ] )
n , err := disk . ReadFile ( volume , path , block * chun kSize, enBlocks [ blockIndex ] )
if err != nil {
enBlocks [ blockIndex ] = nil
} else if n < eInfo . Bloc kSize {
// As the data we got is smaller than eInfo.BlockS ize, keep only required chunk slice
} else if n < chun kSize {
// As the data we got is smaller than chunk s ize, keep only required chunk slice
enBlocks [ blockIndex ] = append ( [ ] byte { } , enBlocks [ blockIndex ] [ : n ] ... )
}