fix: allocate buffer to required size than readSizeV1 (#2095)

Refer #2077
master
Bala FA 8 years ago committed by Harshavardhana
parent a35341448f
commit 44ae7a037b
  1. 12
      erasure-utils.go
  2. 17
      fs-v1-multipart.go
  3. 17
      fs-v1.go

@ -152,13 +152,17 @@ func getEncodedBlockLen(inputLen int64, dataBlocks int) (curEncBlockSize int64)
// err == nil, not err == EOF. Additionally offset can be provided to start
// the read at. copyN returns io.EOF if there aren't enough data to be read.
func copyN(writer io.Writer, disk StorageAPI, volume string, path string, offset int64, length int64) (err error) {
// Use 128KiB staging buffer to read up to length.
buf := make([]byte, readSizeV1)
// Use staging buffer to read up to length.
bufSize := int64(readSizeV1)
if length > 0 && bufSize > length {
bufSize = length
}
buf := make([]byte, int(bufSize))
// Read into writer until length.
for length > 0 {
curLength := int64(readSizeV1)
if length < readSizeV1 {
curLength := bufSize
if length < bufSize {
curLength = length
}
nr, er := disk.ReadFile(volume, path, offset, buf[:curLength])

@ -304,10 +304,15 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
limitDataReader = data
}
// Allocate 128KiB buffer for staging buffer.
var buf = make([]byte, readSizeV1)
// Allocate buffer for staging buffer.
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
bufSize = size
}
var buf = make([]byte, int(bufSize))
// Read till io.EOF.
// Read up to required size
totalLeft := size
for {
n, err := io.ReadFull(limitDataReader, buf)
if err == io.EOF {
@ -321,6 +326,10 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
if err = fs.storage.AppendFile(minioMetaBucket, tmpPartPath, buf[:n]); err != nil {
return "", toObjectErr(err, bucket, object)
}
if totalLeft -= int64(n); size >= 0 && totalLeft <= 0 {
break
}
}
// Validate if payload is valid.
@ -509,7 +518,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
tempObj := path.Join(tmpMetaPrefix, uploadID, "part.1")
// Allocate 128KiB of staging buffer.
// Allocate staging buffer.
var buf = make([]byte, readSizeV1)
// Loop through all parts, validate them and then commit to disk.

@ -218,11 +218,16 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
return ObjectNameInvalid{Bucket: bucket, Object: object}
}
var totalLeft = length
buf := make([]byte, readSizeV1) // Allocate a 128KiB staging buffer.
bufSize := int64(readSizeV1)
if length > 0 && bufSize > length {
bufSize = length
}
// Allocate a staging buffer.
buf := make([]byte, int(bufSize))
for totalLeft > 0 {
// Figure out the right size for the buffer.
curLeft := int64(readSizeV1)
if totalLeft < readSizeV1 {
curLeft := bufSize
if totalLeft < bufSize {
curLeft = totalLeft
}
// Reads the file at offset.
@ -333,7 +338,11 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
}
} else {
// Allocate a buffer to Read() the object upload stream.
buf := make([]byte, readSizeV1)
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
// Read the buffer till io.EOF and append the read data to the temporary file.
for {

Loading…
Cancel
Save