Merge pull request #752 from harshavardhana/pr_out_handle_removal_of_disks_getobject_now_reads_if_disks_are_missing_underneath_add_initial_stub_healing_code

Handle removal of disks - getObject() now reads if disks are missing underneath, add initial stub healing code
master
Harshavardhana 10 years ago
commit 2e5e85d8ad
  1. 14
      pkg/donut/bucket.go
  2. 51
      pkg/donut/donut-v1.go
  3. 26
      pkg/donut/heal.go
  4. 7
      pkg/donut/management.go

@ -86,14 +86,13 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu
func (b bucket) getBucketName() string { func (b bucket) getBucketName() string {
return b.name return b.name
} }
func (b bucket) getBucketMetadataReaders() ([]io.ReadCloser, error) { func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
var readers []io.ReadCloser readers := make(map[int]io.ReadCloser)
for _, node := range b.nodes { for _, node := range b.nodes {
disks, err := node.ListDisks() disks, err := node.ListDisks()
if err != nil { if err != nil {
return nil, iodine.New(err, nil) return nil, iodine.New(err, nil)
} }
readers = make([]io.ReadCloser, len(disks))
for order, disk := range disks { for order, disk := range disks {
bucketMetaDataReader, err := disk.OpenFile(filepath.Join(b.donutName, bucketMetadataConfig)) bucketMetaDataReader, err := disk.OpenFile(filepath.Join(b.donutName, bucketMetadataConfig))
if err != nil { if err != nil {
@ -500,7 +499,7 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
} }
// decodeEncodedData - // decodeEncodedData -
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) { func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) {
var curBlockSize int64 var curBlockSize int64
if blockSize < totalLeft { if blockSize < totalLeft {
curBlockSize = blockSize curBlockSize = blockSize
@ -511,7 +510,7 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadC
if err != nil { if err != nil {
return nil, iodine.New(err, nil) return nil, iodine.New(err, nil)
} }
encodedBytes := make([][]byte, len(readers)) encodedBytes := make([][]byte, encoder.k+encoder.m)
for i, reader := range readers { for i, reader := range readers {
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
_, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize)) _, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
@ -528,15 +527,14 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadC
} }
// getObjectReaders - // getObjectReaders -
func (b bucket) getObjectReaders(objectName, objectMeta string) ([]io.ReadCloser, error) { func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, error) {
var readers []io.ReadCloser readers := make(map[int]io.ReadCloser)
nodeSlice := 0 nodeSlice := 0
for _, node := range b.nodes { for _, node := range b.nodes {
disks, err := node.ListDisks() disks, err := node.ListDisks()
if err != nil { if err != nil {
return nil, iodine.New(err, nil) return nil, iodine.New(err, nil)
} }
readers = make([]io.ReadCloser, len(disks))
for order, disk := range disks { for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta) objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)

@ -25,6 +25,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/iodine"
) )
@ -229,14 +230,13 @@ func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) {
} }
// getBucketMetadataReaders - // getBucketMetadataReaders -
func (donut API) getBucketMetadataReaders() ([]io.ReadCloser, error) { func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
var readers []io.ReadCloser readers := make(map[int]io.ReadCloser)
for _, node := range donut.nodes { for _, node := range donut.nodes {
disks, err := node.ListDisks() disks, err := node.ListDisks()
if err != nil { if err != nil {
return nil, iodine.New(err, nil) return nil, iodine.New(err, nil)
} }
readers = make([]io.ReadCloser, len(disks))
for order, d := range disks { for order, d := range disks {
bucketMetaDataReader, err := d.OpenFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) bucketMetaDataReader, err := d.OpenFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
if err != nil { if err != nil {
@ -339,30 +339,37 @@ func (donut API) makeDonutBucket(bucketName, acl string) error {
// listDonutBuckets - // listDonutBuckets -
func (donut API) listDonutBuckets() error { func (donut API) listDonutBuckets() error {
var disks map[int]disk.Disk
var err error
for _, node := range donut.nodes { for _, node := range donut.nodes {
disks, err := node.ListDisks() disks, err = node.ListDisks()
if err != nil { if err != nil {
return iodine.New(err, nil) return iodine.New(err, nil)
} }
for _, disk := range disks { }
dirs, err := disk.ListDir(donut.config.DonutName) var dirs []os.FileInfo
if err != nil { for _, disk := range disks {
return iodine.New(err, nil) dirs, err = disk.ListDir(donut.config.DonutName)
} if err == nil {
for _, dir := range dirs { break
splitDir := strings.Split(dir.Name(), "$") }
if len(splitDir) < 3 { }
return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) // if all disks are missing then return error
} if err != nil {
bucketName := splitDir[0] return iodine.New(err, nil)
// we dont need this once we cache from makeDonutBucket() }
bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes) for _, dir := range dirs {
if err != nil { splitDir := strings.Split(dir.Name(), "$")
return iodine.New(err, nil) if len(splitDir) < 3 {
} return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil)
donut.buckets[bucketName] = bucket }
} bucketName := splitDir[0]
// we dont need this once we cache from makeDonutBucket()
bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
if err != nil {
return iodine.New(err, nil)
} }
donut.buckets[bucketName] = bucket
} }
return nil return nil
} }

@ -0,0 +1,26 @@
package donut
import (
"fmt"
"github.com/minio/minio/pkg/iodine"
)
// Heal heal an existing donut
func (donut API) Heal() error {
missingDisks := make(map[int]struct{})
for _, node := range donut.nodes {
disks, err := node.ListDisks()
if err != nil {
return iodine.New(err, nil)
}
for i, disk := range disks {
dirs, err := disk.ListDir(donut.config.DonutName)
if err != nil {
missingDisks[i] = struct{}{}
}
fmt.Println(dirs)
}
}
return nil
}

@ -24,11 +24,6 @@ import (
"github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/iodine"
) )
// Heal - heal a donut and fix bad data blocks
func (donut API) Heal() error {
return iodine.New(NotImplemented{Function: "Heal"}, nil)
}
// Info - return info about donut configuration // Info - return info about donut configuration
func (donut API) Info() (nodeDiskMap map[string][]string, err error) { func (donut API) Info() (nodeDiskMap map[string][]string, err error) {
nodeDiskMap = make(map[string][]string) nodeDiskMap = make(map[string][]string)
@ -59,7 +54,7 @@ func (donut API) AttachNode(hostname string, disks []string) error {
for i, d := range disks { for i, d := range disks {
newDisk, err := disk.New(d) newDisk, err := disk.New(d)
if err != nil { if err != nil {
return iodine.New(err, nil) continue
} }
if err := newDisk.MakeDir(donut.config.DonutName); err != nil { if err := newDisk.MakeDir(donut.config.DonutName); err != nil {
return iodine.New(err, nil) return iodine.New(err, nil)

Loading…
Cancel
Save