Support SSE-C multipart source objects in CopyObject (#5603)

Current code didn't implement the logic to support
decrypting encrypted multiple parts, this PR fixes
by supporting copying encrypted multipart objects.
master
Harshavardhana 7 years ago committed by GitHub
parent e4f6877c8b
commit 52eea7b9c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 69
      cmd/encryption-v1.go
  2. 4
      cmd/fs-v1.go
  3. 104
      cmd/object-handlers.go
  4. 19
      cmd/xl-v1-object.go
  5. 12
      pkg/ioutil/ioutil.go

@ -28,6 +28,7 @@ import (
"io"
"net/http"
"github.com/minio/minio/pkg/ioutil"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
)
@ -426,6 +427,7 @@ func newDecryptWriterWithObjectKey(client io.Writer, objectEncryptionKey []byte,
delete(metadata, ServerSideEncryptionIV)
delete(metadata, ServerSideEncryptionSealAlgorithm)
delete(metadata, ServerSideEncryptionSealedKey)
delete(metadata, ReservedMetadataPrefix+"Encrypted-Multipart")
return writer, nil
}
@ -463,6 +465,7 @@ type DecryptBlocksWriter struct {
partEncRelOffset int64
copySource bool
// Customer Key
customerKeyHeader string
}
@ -473,8 +476,15 @@ func (w *DecryptBlocksWriter) buildDecrypter(partID int) error {
m[k] = v
}
// Initialize the first decrypter, new decrypters will be initialized in Write() operation as needed.
w.req.Header.Set(SSECustomerKey, w.customerKeyHeader)
key, err := ParseSSECustomerRequest(w.req)
var key []byte
var err error
if w.copySource {
w.req.Header.Set(SSECopyCustomerKey, w.customerKeyHeader)
key, err = ParseSSECopyCustomerRequest(w.req)
} else {
w.req.Header.Set(SSECustomerKey, w.customerKeyHeader)
key, err = ParseSSECustomerRequest(w.req)
}
if err != nil {
return err
}
@ -491,14 +501,24 @@ func (w *DecryptBlocksWriter) buildDecrypter(partID int) error {
mac.Write(partIDbin[:])
partEncryptionKey := mac.Sum(nil)
delete(m, SSECustomerKey) // make sure we do not save the key by accident
// make sure we do not save the key by accident
if w.copySource {
delete(m, SSECopyCustomerKey)
} else {
delete(m, SSECustomerKey)
}
decrypter, err := newDecryptWriterWithObjectKey(w.writer, partEncryptionKey, w.startSeqNum, m)
// make sure to provide a NopCloser such that a Close
// on sio.decryptWriter doesn't close the underlying writer's
// close which perhaps can close the stream prematurely.
decrypter, err := newDecryptWriterWithObjectKey(ioutil.NopCloser(w.writer), partEncryptionKey, w.startSeqNum, m)
if err != nil {
return err
}
if w.decrypter != nil {
// Pro-actively close the writer such that any pending buffers
// are flushed already before we allocate a new decrypter.
err = w.decrypter.Close()
if err != nil {
return err
@ -561,9 +581,17 @@ func (w *DecryptBlocksWriter) Close() error {
return nil
}
// DecryptAllBlocksCopyRequest - setup a struct which can decrypt many concatenated encrypted data
// parts information helps to know the boundaries of each encrypted data block, this function decrypts
// all parts starting from part-1.
func DecryptAllBlocksCopyRequest(client io.Writer, r *http.Request, objInfo ObjectInfo) (io.WriteCloser, int64, error) {
w, _, size, err := DecryptBlocksRequest(client, r, 0, objInfo.Size, objInfo, true)
return w, size, err
}
// DecryptBlocksRequest - setup a struct which can decrypt many concatenated encrypted data
// parts information helps to know the boundaries of each encrypted data block.
func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length int64, objInfo ObjectInfo) (io.WriteCloser, int64, int64, error) {
func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length int64, objInfo ObjectInfo, copySource bool) (io.WriteCloser, int64, int64, error) {
seqNumber, encStartOffset, encLength := getEncryptedStartOffset(startOffset, length)
// Encryption length cannot be bigger than the file size, if it is
@ -573,11 +601,16 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length
}
if len(objInfo.Parts) == 0 || !objInfo.IsEncryptedMultipart() {
writer, err := DecryptRequestWithSequenceNumber(client, r, seqNumber, objInfo.UserDefined)
var writer io.WriteCloser
var err error
if copySource {
writer, err = DecryptCopyRequest(client, r, objInfo.UserDefined)
} else {
writer, err = DecryptRequestWithSequenceNumber(client, r, seqNumber, objInfo.UserDefined)
}
if err != nil {
return nil, 0, 0, err
}
return writer, encStartOffset, encLength, nil
}
@ -614,10 +647,28 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length
partIndex: partStartIndex,
req: r,
customerKeyHeader: r.Header.Get(SSECustomerKey),
metadata: objInfo.UserDefined,
copySource: copySource,
}
w.metadata = map[string]string{}
// Copy encryption metadata for internal use.
for k, v := range objInfo.UserDefined {
w.metadata[k] = v
}
w.buildDecrypter(partStartIndex + 1)
// Purge all the encryption headers.
delete(objInfo.UserDefined, ServerSideEncryptionIV)
delete(objInfo.UserDefined, ServerSideEncryptionSealAlgorithm)
delete(objInfo.UserDefined, ServerSideEncryptionSealedKey)
delete(objInfo.UserDefined, ReservedMetadataPrefix+"Encrypted-Multipart")
if w.copySource {
w.customerKeyHeader = r.Header.Get(SSECopyCustomerKey)
}
if err := w.buildDecrypter(partStartIndex + 1); err != nil {
return nil, 0, 0, err
}
return w, encStartOffset, encLength, nil
}

@ -398,6 +398,10 @@ func (fs *FSObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject strin
// Save objects' metadata in `fs.json`.
fsMeta := newFSMetaV1()
if _, err = fsMeta.ReadFrom(wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
fsMeta.Meta = srcInfo.UserDefined
if _, err = fsMeta.WriteTo(wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)

@ -152,7 +152,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, startOffset, length, objInfo)
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, startOffset, length, objInfo, false)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -382,19 +382,23 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var writer io.WriteCloser = pipeWriter
var reader io.Reader = pipeReader
srcInfo.Reader, err = hash.NewReader(reader, srcInfo.Size, "", "")
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Save the original size for later use when we want to copy
// encrypted file into an unencrypted one.
size := srcInfo.Size
var encMetadata = make(map[string]string)
if objectAPI.IsEncryptionSupported() {
var oldKey, newKey []byte
sseCopyC := IsSSECopyCustomerRequest(r.Header)
sseC := IsSSECustomerRequest(r.Header)
if sseCopyC {
oldKey, err = ParseSSECopyCustomerRequest(r)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
if sseC {
newKey, err = ParseSSECustomerRequest(r)
if err != nil {
@ -406,7 +410,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// AWS S3 implementation requires us to only rotate keys
// when/ both keys are provided and destination is same
// otherwise we proceed to encrypt/decrypt.
if len(oldKey) > 0 && len(newKey) > 0 && cpSrcDstSame {
if sseCopyC && sseC && cpSrcDstSame {
// Get the old key which needs to be rotated.
oldKey, err = ParseSSECopyCustomerRequest(r)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
for k, v := range srcInfo.UserDefined {
encMetadata[k] = v
}
@ -418,10 +429,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} else {
if sseCopyC {
// Source is encrypted make sure to save the encrypted size.
if srcInfo.IsEncrypted() {
srcInfo.Size = srcInfo.EncryptedSize()
}
writer, err = newDecryptWriter(pipeWriter, oldKey, 0, srcInfo.UserDefined)
writer = ioutil.LimitedWriter(writer, 0, srcInfo.Size)
writer, srcInfo.Size, err = DecryptAllBlocksCopyRequest(writer, r, srcInfo)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -431,11 +440,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// we are creating a new object at this point, even
// if source and destination are same objects.
srcInfo.metadataOnly = false
if sseC {
size = srcInfo.Size
}
}
if sseC {
reader, err = newEncryptReader(pipeReader, newKey, encMetadata)
if err != nil {
pipeReader.CloseWithError(err)
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -443,6 +455,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// we are creating a new object at this point, even
// if source and destination are same objects.
srcInfo.metadataOnly = false
if !sseCopyC {
size = srcInfo.EncryptedSize()
}
}
srcInfo.Reader, err = hash.NewReader(reader, size, "", "") // do not try to verify encrypted content
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
}
@ -450,7 +471,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(r.Header, srcInfo.UserDefined)
if err != nil {
pipeReader.CloseWithError(err)
pipeWriter.CloseWithError(err)
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
@ -469,21 +490,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// desination are same objects. Apply this restriction also when
// metadataOnly is true indicating that we are not overwriting the object.
if !isMetadataReplace(r.Header) && srcInfo.metadataOnly {
pipeReader.CloseWithError(fmt.Errorf("invalid copy dest"))
pipeWriter.CloseWithError(fmt.Errorf("invalid copy dest"))
// If x-amz-metadata-directive is not set to REPLACE then we need
// to error out if source and destination are same.
writeErrorResponse(w, ErrInvalidCopyDest, r.URL)
return
}
hashReader, err := hash.NewReader(reader, srcInfo.Size, "", "") // do not try to verify encrypted content
if err != nil {
pipeReader.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
srcInfo.Reader = hashReader
// Copy source object to destination, if source and destination
// object is same then only metadata is updated.
objInfo, err := objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo)
@ -876,6 +889,12 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
var writer io.WriteCloser = pipeWriter
var reader io.Reader = pipeReader
srcInfo.Reader, err = hash.NewReader(reader, length, "", "")
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if objectAPI.IsEncryptionSupported() {
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(dstBucket, dstObject, uploadID, 0, 1)
@ -884,6 +903,18 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
sseCopyC := IsSSECopyCustomerRequest(r.Header)
if sseCopyC {
// Response writer should be limited early on for decryption upto required length,
// additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(pipeWriter, r, startOffset, length, srcInfo, true)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
if li.IsEncrypted() {
if !IsSSECustomerRequest(r.Header) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
@ -906,19 +937,20 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return
}
reader, err = sio.EncryptReader(reader, sio.Config{Key: objectEncryptionKey})
reader, err = sio.EncryptReader(pipeReader, sio.Config{Key: objectEncryptionKey})
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
if IsSSECopyCustomerRequest(r.Header) {
// Response writer should be limited early on for decryption upto required length,
// additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(pipeWriter, r, startOffset, length, srcInfo)
size := length
if !sseCopyC {
info := ObjectInfo{Size: length}
size = info.EncryptedSize()
}
srcInfo.Reader, err = hash.NewReader(reader, size, "", "")
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -926,14 +958,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
}
}
}
hashReader, err := hash.NewReader(reader, length, "", "") // do not try to verify encrypted content
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
srcInfo.Reader = hashReader
srcInfo.Writer = writer
// Copy source object to destination, if source and destination

@ -83,7 +83,9 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(xl.getDisks(), srcBucket, srcObject)
storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(storageDisks, srcBucket, srcObject)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(xl, metaArr, errs)
@ -96,7 +98,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), metaArr, errs)
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(metaArr, modTime)
@ -104,25 +106,22 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Reorder online disks based on erasure distribution order.
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Length of the file to read.
length := xlMeta.Stat.Size
// Check if this request is only metadata update.
if cpSrcDstSame {
xlMeta.Meta = srcInfo.UserDefined
partsMetadata := make([]xlMetaV1, len(xl.getDisks()))
// Update `xl.json` content on each disks.
for index := range partsMetadata {
partsMetadata[index] = xlMeta
for index := range metaArr {
metaArr[index].Meta = srcInfo.UserDefined
}
var onlineDisks []StorageAPI
tempObj := mustGetUUID()
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil {
if onlineDisks, err = writeUniqueXLMetadata(storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Rename atomically `xl.json` from tmp location to destination for each disk.

@ -114,3 +114,15 @@ func (w *LimitWriter) Close() error {
func LimitedWriter(w io.Writer, skipBytes int64, limit int64) *LimitWriter {
return &LimitWriter{w, skipBytes, limit}
}
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error { return nil }
// NopCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w.
func NopCloser(w io.Writer) io.WriteCloser {
return nopCloser{w}
}

Loading…
Cancel
Save