Merge pull request #649 from harshavardhana/pr_out_minor_optimization_avoiding_one_unnecessary_copy

Minor optimization avoiding one unnecessary copy
master
Harshavardhana 10 years ago
commit da593b498c
  1. 6
      pkg/storage/drivers/memory/memory.go
  2. 10
      pkg/storage/drivers/memory/memory_multipart.go

@ -277,18 +277,18 @@ func (memory *memoryDriver) createObject(bucket, key, contentType, expectedMD5Su
if err != io.EOF {
return "", iodine.New(err, nil)
}
go debug.FreeOSMemory()
md5SumBytes := hash.Sum(nil)
totalLength := len(readBytes)
memory.lock.Lock()
ok := memory.objects.Set(objectKey, readBytes)
// setting up for de-allocation
readBytes = nil
go debug.FreeOSMemory()
memory.lock.Unlock()
if !ok {
return "", iodine.New(drivers.InternalError{}, nil)
}
// setting up for de-allocation
readBytes = nil
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such

@ -244,9 +244,8 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
memory.lock.Unlock()
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
}
obj := object
size += int64(len(obj))
calcMD5Bytes := md5.Sum(obj)
size += int64(len(object))
calcMD5Bytes := md5.Sum(object)
// complete multi part request header md5sum per part is hex encoded
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
@ -255,10 +254,12 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
return "", iodine.New(drivers.BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil)
}
_, err = io.Copy(&fullObject, bytes.NewBuffer(obj))
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
if err != nil {
return "", iodine.New(err, nil)
}
object = nil
go debug.FreeOSMemory()
}
memory.lock.Unlock()
@ -271,6 +272,7 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
// which would in-turn cleanup properly in accordance with S3 Spec
return "", iodine.New(err, nil)
}
fullObject.Reset()
memory.cleanupMultiparts(bucket, key, uploadID)
memory.cleanupMultipartSession(bucket, key, uploadID)
return etag, nil

Loading…
Cancel
Save