XL: Relax write quorum further to N/2 + 1. (#2018)
This changes behavior in some parts of the code as well address it. Fixes #2016master
parent
d484157d67
commit
ae936a0147
@ -0,0 +1,166 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"path" |
||||
"sort" |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// A uploadInfo represents the s3 compatible spec.
|
||||
type uploadInfo struct { |
||||
UploadID string `json:"uploadId"` // UploadID for the active multipart upload.
|
||||
Deleted bool `json:"deleted"` // Currently unused, for future use.
|
||||
Initiated time.Time `json:"initiated"` // Indicates when the uploadID was initiated.
|
||||
} |
||||
|
||||
// A uploadsV1 represents `uploads.json` metadata header.
|
||||
type uploadsV1 struct { |
||||
Version string `json:"version"` // Version of the current `uploads.json`
|
||||
Format string `json:"format"` // Format of the current `uploads.json`
|
||||
Uploads []uploadInfo `json:"uploadIds"` // Captures all the upload ids for a given object.
|
||||
} |
||||
|
||||
// byInitiatedTime is a collection satisfying sort.Interface.
|
||||
type byInitiatedTime []uploadInfo |
||||
|
||||
func (t byInitiatedTime) Len() int { return len(t) } |
||||
func (t byInitiatedTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } |
||||
func (t byInitiatedTime) Less(i, j int) bool { |
||||
return t[i].Initiated.Before(t[j].Initiated) |
||||
} |
||||
|
||||
// AddUploadID - adds a new upload id in order of its initiated time.
|
||||
func (u *uploadsV1) AddUploadID(uploadID string, initiated time.Time) { |
||||
u.Uploads = append(u.Uploads, uploadInfo{ |
||||
UploadID: uploadID, |
||||
Initiated: initiated, |
||||
}) |
||||
sort.Sort(byInitiatedTime(u.Uploads)) |
||||
} |
||||
|
||||
// Index - returns the index of matching the upload id.
|
||||
func (u uploadsV1) Index(uploadID string) int { |
||||
for i, u := range u.Uploads { |
||||
if u.UploadID == uploadID { |
||||
return i |
||||
} |
||||
} |
||||
return -1 |
||||
} |
||||
|
||||
// readUploadsJSON - get all the saved uploads JSON.
|
||||
func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV1, err error) { |
||||
uploadJSONPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile) |
||||
// Reads entire `uploads.json`.
|
||||
buf, err := disk.ReadAll(minioMetaBucket, uploadJSONPath) |
||||
if err != nil { |
||||
return uploadsV1{}, err |
||||
} |
||||
|
||||
// Decode `uploads.json`.
|
||||
if err = json.Unmarshal(buf, &uploadIDs); err != nil { |
||||
return uploadsV1{}, err |
||||
} |
||||
|
||||
// Success.
|
||||
return uploadIDs, nil |
||||
} |
||||
|
||||
// newUploadsV1 - initialize new uploads v1.
|
||||
func newUploadsV1(format string) uploadsV1 { |
||||
uploadIDs := uploadsV1{} |
||||
uploadIDs.Version = "1.0.0" // Should follow semantic versioning.
|
||||
uploadIDs.Format = format |
||||
return uploadIDs |
||||
} |
||||
|
||||
// Wrapper which removes all the uploaded parts.
|
||||
func cleanupUploadedParts(bucket, object, uploadID string, storageDisks ...StorageAPI) error { |
||||
var errs = make([]error, len(storageDisks)) |
||||
var wg = &sync.WaitGroup{} |
||||
|
||||
// Construct uploadIDPath.
|
||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) |
||||
|
||||
// Cleanup uploadID for all disks.
|
||||
for index, disk := range storageDisks { |
||||
if disk == nil { |
||||
errs[index] = errDiskNotFound |
||||
continue |
||||
} |
||||
wg.Add(1) |
||||
// Cleanup each uploadID in a routine.
|
||||
go func(index int, disk StorageAPI) { |
||||
defer wg.Done() |
||||
err := cleanupDir(disk, minioMetaBucket, uploadIDPath) |
||||
if err != nil { |
||||
errs[index] = err |
||||
return |
||||
} |
||||
errs[index] = nil |
||||
}(index, disk) |
||||
} |
||||
|
||||
// Wait for all the cleanups to finish.
|
||||
wg.Wait() |
||||
|
||||
// Return first error.
|
||||
for _, err := range errs { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// listMultipartUploadIDs - list all the upload ids from a marker up to 'count'.
|
||||
func listMultipartUploadIDs(bucketName, objectName, uploadIDMarker string, count int, disk StorageAPI) ([]uploadMetadata, bool, error) { |
||||
var uploads []uploadMetadata |
||||
// Read `uploads.json`.
|
||||
uploadsJSON, err := readUploadsJSON(bucketName, objectName, disk) |
||||
if err != nil { |
||||
return nil, false, err |
||||
} |
||||
index := 0 |
||||
if uploadIDMarker != "" { |
||||
for ; index < len(uploadsJSON.Uploads); index++ { |
||||
if uploadsJSON.Uploads[index].UploadID == uploadIDMarker { |
||||
// Skip the uploadID as it would already be listed in previous listing.
|
||||
index++ |
||||
break |
||||
} |
||||
} |
||||
} |
||||
for index < len(uploadsJSON.Uploads) { |
||||
uploads = append(uploads, uploadMetadata{ |
||||
Object: objectName, |
||||
UploadID: uploadsJSON.Uploads[index].UploadID, |
||||
Initiated: uploadsJSON.Uploads[index].Initiated, |
||||
}) |
||||
count-- |
||||
index++ |
||||
if count == 0 { |
||||
break |
||||
} |
||||
} |
||||
end := (index == len(uploadsJSON.Uploads)) |
||||
return uploads, end, nil |
||||
} |
Loading…
Reference in new issue