Rename getUUID() into mustGetUUID() (#3320)

In case of UUID generation failure mustGetUUID() will panic than
infinitely trying in for loop.
master
Bala FA 8 years ago committed by Harshavardhana
parent 71b357e4f2
commit 1d4ac4b084
  1. 6
      cmd/format-config-v1.go
  2. 24
      cmd/format-config-v1_test.go
  3. 2
      cmd/fs-v1-metadata.go
  4. 2
      cmd/fs-v1-multipart-common.go
  5. 4
      cmd/fs-v1-multipart.go
  6. 2
      cmd/fs-v1.go
  7. 18
      cmd/object-utils.go
  8. 2
      cmd/xl-v1-healing.go
  9. 2
      cmd/xl-v1-multipart-common.go
  10. 11
      cmd/xl-v1-multipart.go
  11. 4
      cmd/xl-v1-object.go

@ -506,7 +506,7 @@ func healFormatXLFreshDisks(storageDisks []StorageAPI) error {
// From ordered disks fill the UUID position.
for index, disk := range orderedDisks {
if disk == nil {
newJBOD[index] = getUUID()
newJBOD[index] = mustGetUUID()
}
}
@ -696,7 +696,7 @@ func healFormatXLCorruptedDisks(storageDisks []StorageAPI) error {
// From ordered disks fill the UUID position.
for index, disk := range orderedDisks {
if disk == nil {
newJBOD[index] = getUUID()
newJBOD[index] = mustGetUUID()
}
}
@ -889,7 +889,7 @@ func initFormatXL(storageDisks []StorageAPI) (err error) {
Format: "xl",
XL: &xlFormat{
Version: "1",
Disk: getUUID(),
Disk: mustGetUUID(),
},
}
jbod[index] = formats[index].XL.Disk

@ -26,7 +26,7 @@ func genFormatXLValid() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -47,7 +47,7 @@ func genFormatXLInvalidVersion() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -71,7 +71,7 @@ func genFormatXLInvalidFormat() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -95,7 +95,7 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -126,7 +126,7 @@ func genFormatXLInvalidJBODCount() []*formatConfigV1 {
jbod := make([]string, 7)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -147,7 +147,7 @@ func genFormatXLInvalidJBOD() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -161,7 +161,7 @@ func genFormatXLInvalidJBOD() []*formatConfigV1 {
}
}
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
// Corrupt JBOD entries on disk 6 and disk 8.
formatConfigs[5].XL.JBOD = jbod
@ -174,7 +174,7 @@ func genFormatXLInvalidDisks() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -188,8 +188,8 @@ func genFormatXLInvalidDisks() []*formatConfigV1 {
}
}
// Make disk 5 and disk 8 have inconsistent disk uuid's.
formatConfigs[4].XL.Disk = getUUID()
formatConfigs[7].XL.Disk = getUUID()
formatConfigs[4].XL.Disk = mustGetUUID()
formatConfigs[7].XL.Disk = mustGetUUID()
return formatConfigs
}
@ -198,7 +198,7 @@ func genFormatXLInvalidDisksOrder() []*formatConfigV1 {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
}
for index := range jbod {
formatConfigs[index] = &formatConfigV1{
@ -559,7 +559,7 @@ func TestSavedUUIDOrder(t *testing.T) {
jbod := make([]string, 8)
formatConfigs := make([]*formatConfigV1, 8)
for index := range jbod {
jbod[index] = getUUID()
jbod[index] = mustGetUUID()
uuidTestCases[index].uuid = jbod[index]
uuidTestCases[index].shouldPass = true
}

@ -92,7 +92,7 @@ func readFSMetadata(disk StorageAPI, bucket, filePath string) (fsMeta fsMetaV1,
// Write fsMeta to fs.json or fs-append.json.
func writeFSMetadata(disk StorageAPI, bucket, filePath string, fsMeta fsMetaV1) error {
tmpPath := getUUID()
tmpPath := mustGetUUID()
metadataBytes, err := json.Marshal(fsMeta)
if err != nil {
return traceError(err)

@ -58,7 +58,7 @@ func (fs fsObjects) isUploadIDExists(bucket, object, uploadID string) bool {
// updateUploadJSON - add or remove upload ID info in all `uploads.json`.
func (fs fsObjects) updateUploadJSON(bucket, object, uploadID string, initiated time.Time, isRemove bool) error {
uploadsPath := path.Join(bucket, object, uploadsJSONFile)
tmpUploadsPath := getUUID()
tmpUploadsPath := mustGetUUID()
uploadsJSON, err := readUploadsJSON(bucket, object, fs.storage)
if errorCause(err) == errFileNotFound {

@ -236,7 +236,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
uploadID = getUUID()
uploadID = mustGetUUID()
initiated := time.Now().UTC()
// Add upload ID to uploads.json
if err = fs.addUploadID(bucket, object, uploadID, initiated); err != nil {
@ -319,7 +319,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
partSuffix := fmt.Sprintf("object%d", partID)
tmpPartPath := uploadID + "." + getUUID() + "." + partSuffix
tmpPartPath := uploadID + "." + mustGetUUID() + "." + partSuffix
// Initialize md5 writer.
md5Writer := md5.New()

@ -354,7 +354,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
metadata = make(map[string]string)
}
uniqueID := getUUID()
uniqueID := mustGetUUID()
// Uploaded object will first be written to the temporary location which will eventually
// be renamed to the actual location. It is first written to the temporary location

@ -127,18 +127,14 @@ func pathJoin(elem ...string) string {
return path.Join(elem...) + trailingSlash
}
// getUUID() - get a unique uuid.
func getUUID() (uuidStr string) {
for {
uuid, err := uuid.New()
if err != nil {
errorIf(err, "Unable to initialize uuid")
continue
}
uuidStr = uuid.String()
break
// mustGetUUID - get a random UUID.
func mustGetUUID() string {
uuid, err := uuid.New()
if err != nil {
panic(fmt.Sprintf("Random UUID generation failed. Error: %s", err))
}
return uuidStr
return uuid.String()
}
// Create an s3 compatible MD5sum for complete multipart transaction.

@ -276,7 +276,7 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum
partsMetadata = getOrderedPartsMetadata(latestMeta.Erasure.Distribution, partsMetadata)
// We write at temporary location and then rename to fianal location.
tmpID := getUUID()
tmpID := mustGetUUID()
// Checksum of the part files. checkSumInfos[index] will contain checksums
// of all the part files in the outDatedDisks[index]

@ -25,7 +25,7 @@ import (
// updateUploadJSON - add or remove upload ID info in all `uploads.json`.
func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated time.Time, isRemove bool) error {
uploadsPath := path.Join(bucket, object, uploadsJSONFile)
tmpUploadsPath := getUUID()
tmpUploadsPath := mustGetUUID()
// slice to store errors from disks
errs := make([]error, len(xl.storageDisks))

@ -286,7 +286,7 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
objectMPartPathLock.Lock()
defer objectMPartPathLock.Unlock()
uploadID := getUUID()
uploadID := mustGetUUID()
uploadIDPath := path.Join(bucket, object, uploadID)
tempUploadIDPath := uploadID
// Write updated `xl.json` to all disks.
@ -393,9 +393,8 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// accommodate concurrent PutObjectPart requests
partSuffix := fmt.Sprintf("part.%d", partID)
tmpSuffix := getUUID()
tmpPart := tmpSuffix
tmpPartPath := path.Join(tmpSuffix, partSuffix)
tmpPart := mustGetUUID()
tmpPartPath := path.Join(tmpPart, partSuffix)
// Initialize md5 writer.
md5Writer := md5.New()
@ -521,7 +520,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
// Write all the checksum metadata.
newUUID := getUUID()
newUUID := mustGetUUID()
tempXLMetaPath := newUUID
// Writes a unique `xl.json` each disk carrying new checksum related information.
@ -798,7 +797,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
}()
// Rename if an object already exists to temporary location.
uniqueID := getUUID()
uniqueID := mustGetUUID()
if xl.isObject(bucket, object) {
// NOTE: Do not use online disks slice here.
// The reason is that existing object should be purged

@ -384,7 +384,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
metadata = make(map[string]string)
}
uniqueID := getUUID()
uniqueID := mustGetUUID()
tempErasureObj := path.Join(uniqueID, "part.1")
tempObj := uniqueID
@ -518,7 +518,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
// Rename if an object already exists to temporary location.
newUniqueID := getUUID()
newUniqueID := mustGetUUID()
if xl.isObject(bucket, object) {
// Delete the temporary copy of the object that existed before this PutObject request.
defer xl.deleteObject(minioMetaTmpBucket, newUniqueID)

Loading…
Cancel
Save