Merge pull request #450 from harshavardhana/pr_out_update_to_new_changes_at_minio_io_objectdriver_and_minio_io_donut

master
Harshavardhana 10 years ago
commit 81e2dafe36
  1. 4
      Godeps/Godeps.json
  2. 120
      Godeps/_workspace/src/github.com/minio-io/donut/bucket.go
  3. 76
      Godeps/_workspace/src/github.com/minio-io/donut/bucket_internal.go
  4. 127
      Godeps/_workspace/src/github.com/minio-io/donut/donut.go
  5. 223
      Godeps/_workspace/src/github.com/minio-io/donut/donut_test.go
  6. 31
      Godeps/_workspace/src/github.com/minio-io/donut/interfaces.go
  7. 67
      Godeps/_workspace/src/github.com/minio-io/donut/management.go
  8. 20
      Godeps/_workspace/src/github.com/minio-io/donut/object.go
  9. 162
      Godeps/_workspace/src/github.com/minio-io/donut/objectstorage.go
  10. 129
      Godeps/_workspace/src/github.com/minio-io/donut/objectstorage_internal.go
  11. 45
      Godeps/_workspace/src/github.com/minio-io/objectdriver/Godeps/Godeps.json
  12. 5
      Godeps/_workspace/src/github.com/minio-io/objectdriver/Godeps/Readme
  13. 66
      Godeps/_workspace/src/github.com/minio-io/objectdriver/Makefile
  14. 29
      Godeps/_workspace/src/github.com/minio-io/objectdriver/api_testsuite.go
  15. 201
      Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/checkdeps.sh
  16. 247
      Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/verifier.go
  17. 151
      Godeps/_workspace/src/github.com/minio-io/objectdriver/donut/donut.go
  18. 68
      Godeps/_workspace/src/github.com/minio-io/objectdriver/donut/donut_filter.go
  19. 2
      Godeps/_workspace/src/github.com/minio-io/objectdriver/mocks/Driver.go

4
Godeps/Godeps.json generated vendored

@ -28,7 +28,7 @@
},
{
"ImportPath": "github.com/minio-io/donut",
"Rev": "1cb5d3239ed989c4dd153af9931bcfb8ec4f0b87"
"Rev": "1df31a9834eb6acef9ff0fb7ee3e597776faf966"
},
{
"ImportPath": "github.com/minio-io/erasure",
@ -40,7 +40,7 @@
},
{
"ImportPath": "github.com/minio-io/objectdriver",
"Rev": "e5f505ad4731ac247f2afecce9b323eda7fbfcc3"
"Rev": "d5a77b2d8a48a0d8906db1757bbac861199ef6a1"
},
{
"ImportPath": "github.com/stretchr/objx",

@ -17,7 +17,6 @@
package donut
import (
"bytes"
"errors"
"fmt"
"io"
@ -29,9 +28,6 @@ import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/minio-io/minio/pkg/utils/split"
)
type bucket struct {
@ -54,10 +50,6 @@ func NewBucket(bucketName, donutName string, nodes map[string]Node) (Bucket, err
return b, nil
}
func (b bucket) ListNodes() (map[string]Node, error) {
return b.nodes, nil
}
func (b bucket) ListObjects() (map[string]Object, error) {
nodeSlice := 0
for _, node := range b.nodes {
@ -105,75 +97,27 @@ func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64,
if !ok {
return nil, 0, os.ErrNotExist
}
donutObjectMetadata, err := object.GetDonutObjectMetadata()
objectMetata, err := object.GetObjectMetadata()
if err != nil {
return nil, 0, err
}
if objectName == "" || writer == nil || len(donutObjectMetadata) == 0 {
if objectName == "" || writer == nil || len(objectMetata) == 0 {
return nil, 0, errors.New("invalid argument")
}
size, err = strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
size, err = strconv.ParseInt(objectMetata["size"], 10, 64)
if err != nil {
return nil, 0, err
}
go b.getObject(b.normalizeObjectName(objectName), writer, donutObjectMetadata)
go b.readEncodedData(b.normalizeObjectName(objectName), writer, objectMetata)
return reader, size, nil
}
func (b bucket) WriteObjectMetadata(objectName string, objectMetadata map[string]string) error {
if len(objectMetadata) == 0 {
return errors.New("invalid argument")
}
objectMetadataWriters, err := b.getDiskWriters(objectName, objectMetadataConfig)
if err != nil {
return err
}
for _, objectMetadataWriter := range objectMetadataWriters {
defer objectMetadataWriter.Close()
}
for _, objectMetadataWriter := range objectMetadataWriters {
jenc := json.NewEncoder(objectMetadataWriter)
if err := jenc.Encode(objectMetadata); err != nil {
return err
}
}
return nil
}
func (b bucket) WriteDonutObjectMetadata(objectName string, donutObjectMetadata map[string]string) error {
if len(donutObjectMetadata) == 0 {
return errors.New("invalid argument")
}
donutObjectMetadataWriters, err := b.getDiskWriters(objectName, donutObjectMetadataConfig)
if err != nil {
return err
}
for _, donutObjectMetadataWriter := range donutObjectMetadataWriters {
defer donutObjectMetadataWriter.Close()
}
for _, donutObjectMetadataWriter := range donutObjectMetadataWriters {
jenc := json.NewEncoder(donutObjectMetadataWriter)
if err := jenc.Encode(donutObjectMetadata); err != nil {
return err
}
}
return nil
}
// This a temporary normalization of object path, need to find a better way
func (b bucket) normalizeObjectName(objectName string) string {
// replace every '/' with '-'
return strings.Replace(objectName, "/", "-", -1)
}
func (b bucket) PutObject(objectName, contentType string, objectData io.Reader) error {
if objectName == "" {
func (b bucket) PutObject(objectName string, objectData io.Reader, metadata map[string]string) error {
if objectName == "" || objectData == nil {
return errors.New("invalid argument")
}
if objectData == nil {
return errors.New("invalid argument")
}
if contentType == "" || strings.TrimSpace(contentType) == "" {
contentType, ok := metadata["contentType"]
if !ok || strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data")
@ -184,7 +128,7 @@ func (b bucket) PutObject(objectName, contentType string, objectData io.Reader)
defer writer.Close()
}
summer := md5.New()
donutObjectMetadata := make(map[string]string)
objectMetata := make(map[string]string)
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], summer)
@ -192,48 +136,30 @@ func (b bucket) PutObject(objectName, contentType string, objectData io.Reader)
if err != nil {
return err
}
donutObjectMetadata["size"] = strconv.FormatInt(totalLength, 10)
objectMetata["size"] = strconv.FormatInt(totalLength, 10)
case false:
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
return err
}
chunks := split.Stream(objectData, 10*1024*1024)
encoder, err := NewEncoder(k, m, "Cauchy")
chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer)
if err != nil {
return err
}
chunkCount := 0
totalLength := 0
for chunk := range chunks {
if chunk.Err == nil {
totalLength = totalLength + len(chunk.Data)
encodedBlocks, _ := encoder.Encode(chunk.Data)
summer.Write(chunk.Data)
for blockIndex, block := range encodedBlocks {
io.Copy(writers[blockIndex], bytes.NewBuffer(block))
}
}
chunkCount = chunkCount + 1
}
donutObjectMetadata["blockSize"] = strconv.Itoa(10 * 1024 * 1024)
donutObjectMetadata["chunkCount"] = strconv.Itoa(chunkCount)
donutObjectMetadata["erasureK"] = strconv.FormatUint(uint64(k), 10)
donutObjectMetadata["erasureM"] = strconv.FormatUint(uint64(m), 10)
donutObjectMetadata["erasureTechnique"] = "Cauchy"
donutObjectMetadata["size"] = strconv.Itoa(totalLength)
objectMetata["blockSize"] = strconv.Itoa(10 * 1024 * 1024)
objectMetata["chunkCount"] = strconv.Itoa(chunkCount)
objectMetata["erasureK"] = strconv.FormatUint(uint64(k), 10)
objectMetata["erasureM"] = strconv.FormatUint(uint64(m), 10)
objectMetata["erasureTechnique"] = "Cauchy"
objectMetata["size"] = strconv.Itoa(totalLength)
}
dataMd5sum := summer.Sum(nil)
donutObjectMetadata["created"] = time.Now().Format(time.RFC3339Nano)
donutObjectMetadata["md5"] = hex.EncodeToString(dataMd5sum)
if err := b.WriteDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil {
return err
}
objectMetadata := make(map[string]string)
objectMetadata["bucket"] = b.name
objectMetadata["object"] = objectName
objectMetadata["contentType"] = strings.TrimSpace(contentType)
if err := b.WriteObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil {
objectMetata["created"] = time.Now().Format(time.RFC3339Nano)
objectMetata["md5"] = hex.EncodeToString(dataMd5sum)
objectMetata["bucket"] = b.name
objectMetata["object"] = objectName
objectMetata["contentType"] = strings.TrimSpace(contentType)
if err := b.writeDonutObjectMetadata(b.normalizeObjectName(objectName), objectMetata); err != nil {
return err
}
return nil

@ -20,13 +20,44 @@ import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"hash"
"io"
"path"
"strconv"
"strings"
"github.com/minio-io/minio/pkg/utils/split"
)
func (b bucket) writeDonutObjectMetadata(objectName string, objectMetadata map[string]string) error {
if len(objectMetadata) == 0 {
return errors.New("invalid argument")
}
objectMetadataWriters, err := b.getDiskWriters(objectName, objectMetadataConfig)
if err != nil {
return err
}
for _, objectMetadataWriter := range objectMetadataWriters {
defer objectMetadataWriter.Close()
}
for _, objectMetadataWriter := range objectMetadataWriters {
jenc := json.NewEncoder(objectMetadataWriter)
if err := jenc.Encode(objectMetadata); err != nil {
return err
}
}
return nil
}
// This a temporary normalization of object path, need to find a better way
func (b bucket) normalizeObjectName(objectName string) string {
// replace every '/' with '-'
return strings.Replace(objectName, "/", "-", -1)
}
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) {
if totalWriters <= 1 {
return 0, 0, errors.New("invalid argument")
@ -42,8 +73,33 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error)
return k, m, nil
}
func (b bucket) getObject(objectName string, writer *io.PipeWriter, donutObjectMetadata map[string]string) {
expectedMd5sum, err := hex.DecodeString(donutObjectMetadata["md5"])
func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, summer hash.Hash) (int, int, error) {
chunks := split.Stream(objectData, 10*1024*1024)
encoder, err := NewEncoder(k, m, "Cauchy")
if err != nil {
return 0, 0, err
}
chunkCount := 0
totalLength := 0
for chunk := range chunks {
if chunk.Err == nil {
totalLength = totalLength + len(chunk.Data)
encodedBlocks, _ := encoder.Encode(chunk.Data)
summer.Write(chunk.Data)
for blockIndex, block := range encodedBlocks {
_, err := io.Copy(writers[blockIndex], bytes.NewBuffer(block))
if err != nil {
return 0, 0, err
}
}
}
chunkCount = chunkCount + 1
}
return chunkCount, totalLength, nil
}
func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, objectMetadata map[string]string) {
expectedMd5sum, err := hex.DecodeString(objectMetadata["md5"])
if err != nil {
writer.CloseWithError(err)
return
@ -57,12 +113,12 @@ func (b bucket) getObject(objectName string, writer *io.PipeWriter, donutObjectM
mwriter := io.MultiWriter(writer, hasher)
switch len(readers) == 1 {
case false:
totalChunks, totalLeft, blockSize, k, m, err := b.metadata2Values(donutObjectMetadata)
totalChunks, totalLeft, blockSize, k, m, err := b.metadata2Values(objectMetadata)
if err != nil {
writer.CloseWithError(err)
return
}
technique, ok := donutObjectMetadata["erasureTechnique"]
technique, ok := objectMetadata["erasureTechnique"]
if !ok {
writer.CloseWithError(errors.New("missing erasure Technique"))
return
@ -128,12 +184,12 @@ func (b bucket) decodeData(totalLeft, blockSize int64, readers []io.ReadCloser,
return decodedData, nil
}
func (b bucket) metadata2Values(donutObjectMetadata map[string]string) (totalChunks int, totalLeft, blockSize int64, k, m uint64, err error) {
totalChunks, err = strconv.Atoi(donutObjectMetadata["chunkCount"])
totalLeft, err = strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
blockSize, err = strconv.ParseInt(donutObjectMetadata["blockSize"], 10, 64)
k, err = strconv.ParseUint(donutObjectMetadata["erasureK"], 10, 8)
m, err = strconv.ParseUint(donutObjectMetadata["erasureM"], 10, 8)
func (b bucket) metadata2Values(objectMetadata map[string]string) (totalChunks int, totalLeft, blockSize int64, k, m uint64, err error) {
totalChunks, err = strconv.Atoi(objectMetadata["chunkCount"])
totalLeft, err = strconv.ParseInt(objectMetadata["size"], 10, 64)
blockSize, err = strconv.ParseInt(objectMetadata["blockSize"], 10, 64)
k, err = strconv.ParseUint(objectMetadata["erasureK"], 10, 8)
m, err = strconv.ParseUint(objectMetadata["erasureM"], 10, 8)
return
}

@ -16,13 +16,7 @@
package donut
import (
"encoding/json"
"errors"
"fmt"
"path"
"strings"
)
import "errors"
type donut struct {
name string
@ -79,122 +73,3 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error)
}
return d, nil
}
func (d donut) MakeBucket(bucketName string) error {
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return errors.New("invalid argument")
}
if _, ok := d.buckets[bucketName]; ok {
return errors.New("bucket exists")
}
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return err
}
nodeNumber := 0
d.buckets[bucketName] = bucket
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, disk.GetOrder())
err := disk.MakeDir(path.Join(d.name, bucketSlice))
if err != nil {
return err
}
}
nodeNumber = nodeNumber + 1
}
return nil
}
func (d donut) ListBuckets() (map[string]Bucket, error) {
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return nil, err
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return nil, errors.New("corrupted backend")
}
bucketName := splitDir[0]
// we dont need this NewBucket once we cache these
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return nil, err
}
d.buckets[bucketName] = bucket
}
}
}
return d.buckets, nil
}
func (d donut) Heal() error {
return errors.New("Not Implemented")
}
func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
nodeDiskMap = make(map[string][]string)
for nodeName, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
diskList := make([]string, len(disks))
for diskName, disk := range disks {
diskList[disk.GetOrder()] = diskName
}
nodeDiskMap[nodeName] = diskList
}
return nodeDiskMap, nil
}
func (d donut) AttachNode(node Node) error {
if node == nil {
return errors.New("invalid argument")
}
d.nodes[node.GetNodeName()] = node
return nil
}
func (d donut) DetachNode(node Node) error {
delete(d.nodes, node.GetNodeName())
return nil
}
func (d donut) SaveConfig() error {
nodeDiskMap := make(map[string][]string)
for hostname, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
donutConfigPath := path.Join(d.name, donutConfig)
donutConfigWriter, err := disk.MakeFile(donutConfigPath)
defer donutConfigWriter.Close()
if err != nil {
return err
}
nodeDiskMap[hostname][disk.GetOrder()] = disk.GetPath()
jenc := json.NewEncoder(donutConfigWriter)
if err := jenc.Encode(nodeDiskMap); err != nil {
return err
}
}
}
return nil
}
func (d donut) LoadConfig() error {
return errors.New("Not Implemented")
}

@ -0,0 +1,223 @@
package donut
import (
"bytes"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"testing"
"time"
. "github.com/minio-io/check"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
// create a dummy TestNodeDiskMap
func createTestNodeDiskMap(p string) map[string][]string {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := path.Join(p, strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
nodes["localhost"][i] = diskPath
}
return nodes
}
func (s *MySuite) TestEmptyBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
// check buckets are empty
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, IsNil)
}
func (s *MySuite) TestBucketWithoutNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
// fail to create new bucket without a name
err = donut.MakeBucket("")
c.Assert(err, Not(IsNil))
err = donut.MakeBucket(" ")
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestMakeBucketAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
// create bucket
err = donut.MakeBucket("foo")
c.Assert(err, IsNil)
// check bucket exists
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"foo"})
}
func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
err = donut.MakeBucket("foo")
c.Assert(err, IsNil)
err = donut.MakeBucket("foo")
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
// add a second bucket
err = donut.MakeBucket("foo")
c.Assert(err, IsNil)
err = donut.MakeBucket("bar")
c.Assert(err, IsNil)
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"bar", "foo"})
err = donut.MakeBucket("foobar")
c.Assert(err, IsNil)
buckets, err = donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"bar", "foo", "foobar"})
}
func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
err = donut.PutObject("foo", "obj", nil, nil)
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
err = donut.PutObject("foo", "", nil, nil)
c.Assert(err, Not(IsNil))
err = donut.PutObject("foo", " ", nil, nil)
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
err = donut.MakeBucket("foo")
c.Assert(err, IsNil)
metadata := make(map[string]string)
metadata["contentType"] = "application/octet-stream"
data := "Hello World"
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
err = donut.PutObject("foo", "obj", reader, metadata)
c.Assert(err, IsNil)
reader, size, err := donut.GetObject("foo", "obj")
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len(data)))
var actualData bytes.Buffer
_, err = io.Copy(&actualData, reader)
c.Assert(err, IsNil)
c.Assert(actualData.Bytes(), DeepEquals, []byte(data))
actualMetadata, err := donut.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
c.Assert("b10a8db164e0754105b7a99be72e3fe5", Equals, actualMetadata["md5"])
c.Assert("11", Equals, actualMetadata["size"])
_, err = time.Parse(time.RFC3339Nano, actualMetadata["created"])
c.Assert(err, IsNil)
}
func (s *MySuite) TestMultipleNewObjects(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut("test", createTestNodeDiskMap(root))
c.Assert(err, IsNil)
c.Assert(donut.MakeBucket("foo"), IsNil)
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
err = donut.PutObject("foo", "obj1", one, nil)
c.Assert(err, IsNil)
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
err = donut.PutObject("foo", "obj2", two, nil)
c.Assert(err, IsNil)
obj1, size, err := donut.GetObject("foo", "obj1")
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("one"))))
var readerBuffer1 bytes.Buffer
_, err = io.CopyN(&readerBuffer1, obj1, size)
c.Assert(err, IsNil)
c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one"))
obj2, size, err := donut.GetObject("foo", "obj2")
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("two"))))
var readerBuffer2 bytes.Buffer
_, err = io.CopyN(&readerBuffer2, obj2, size)
c.Assert(err, IsNil)
c.Assert(readerBuffer2.Bytes(), DeepEquals, []byte("two"))
// test list objects
listObjects, _, isTruncated, err := donut.ListObjects("foo", "o", "", "", 1)
c.Assert(err, IsNil)
c.Assert(isTruncated, Equals, true)
c.Assert(listObjects, DeepEquals, []string{"obj1"})
listObjects, _, isTruncated, err = donut.ListObjects("foo", "o", "", "", 10)
c.Assert(err, IsNil)
c.Assert(isTruncated, Equals, false)
c.Assert(listObjects, DeepEquals, []string{"obj1", "obj2"})
}

@ -25,14 +25,25 @@ import (
// Donut interface
type Donut interface {
Storage
ObjectStorage
Management
}
// Storage object storage interface
type Storage interface {
// ObjectStorage interface
type ObjectStorage interface {
// Storage service Operations
GetBucketMetadata(bucket string) (map[string]string, error)
SetBucketMetadata(bucket string, metadata map[string]string) error
ListBuckets() ([]string, error)
MakeBucket(bucket string) error
ListBuckets() (map[string]Bucket, error)
// Bucket Operations
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (result []string, prefixes []string, isTruncated bool, err error)
// Object Operations
GetObject(bucket, object string) (io.ReadCloser, int64, error)
GetObjectMetadata(bucket, object string) (map[string]string, error)
PutObject(bucket, object string, reader io.ReadCloser, metadata map[string]string) error
}
// Management is a donut management system interface
@ -57,20 +68,15 @@ type Encoder interface {
// Bucket interface
type Bucket interface {
ListNodes() (map[string]Node, error)
ListObjects() (map[string]Object, error)
GetObject(object string) (io.ReadCloser, int64, error)
PutObject(object, contentType string, contents io.Reader) error
WriteDonutObjectMetadata(object string, donutMetadata map[string]string) error
WriteObjectMetadata(object string, objectMetadata map[string]string) error
PutObject(object string, contents io.Reader, metadata map[string]string) error
}
// Object interface
type Object interface {
GetObjectMetadata() (map[string]string, error)
GetDonutObjectMetadata() (map[string]string, error)
}
// Node interface
@ -100,7 +106,6 @@ type Disk interface {
}
const (
donutObjectMetadataConfig = "donutObjectMetadata.json"
objectMetadataConfig = "objectMetadata.json"
donutConfig = "donutMetadata.json"
objectMetadataConfig = "objectMetadata.json"
donutConfig = "donutMetadata.json"
)

@ -0,0 +1,67 @@
package donut
import (
"encoding/json"
"errors"
"path"
)
func (d donut) Heal() error {
return errors.New("Not Implemented")
}
func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
nodeDiskMap = make(map[string][]string)
for nodeName, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
diskList := make([]string, len(disks))
for diskName, disk := range disks {
diskList[disk.GetOrder()] = diskName
}
nodeDiskMap[nodeName] = diskList
}
return nodeDiskMap, nil
}
func (d donut) AttachNode(node Node) error {
if node == nil {
return errors.New("invalid argument")
}
d.nodes[node.GetNodeName()] = node
return nil
}
func (d donut) DetachNode(node Node) error {
delete(d.nodes, node.GetNodeName())
return nil
}
func (d donut) SaveConfig() error {
nodeDiskMap := make(map[string][]string)
for hostname, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
donutConfigPath := path.Join(d.name, donutConfig)
donutConfigWriter, err := disk.MakeFile(donutConfigPath)
defer donutConfigWriter.Close()
if err != nil {
return err
}
nodeDiskMap[hostname][disk.GetOrder()] = disk.GetPath()
jenc := json.NewEncoder(donutConfigWriter)
if err := jenc.Encode(nodeDiskMap); err != nil {
return err
}
}
}
return nil
}
func (d donut) LoadConfig() error {
return errors.New("Not Implemented")
}

@ -25,10 +25,9 @@ import (
)
type object struct {
name string
objectPath string
objectMetadata map[string]string
donutObjectMetadata map[string]string
name string
objectPath string
objectMetadata map[string]string
}
// NewObject - instantiate a new object
@ -54,16 +53,3 @@ func (o object) GetObjectMetadata() (map[string]string, error) {
o.objectMetadata = objectMetadata
return objectMetadata, nil
}
func (o object) GetDonutObjectMetadata() (map[string]string, error) {
donutObjectMetadata := make(map[string]string)
donutObjectMetadataBytes, err := ioutil.ReadFile(path.Join(o.objectPath, donutObjectMetadataConfig))
if err != nil {
return nil, err
}
if err := json.Unmarshal(donutObjectMetadataBytes, &donutObjectMetadata); err != nil {
return nil, err
}
o.donutObjectMetadata = donutObjectMetadata
return donutObjectMetadata, nil
}

@ -0,0 +1,162 @@
package donut
import (
"errors"
"io"
"sort"
"strconv"
"strings"
"github.com/minio-io/iodine"
)
func (d donut) MakeBucket(bucket string) error {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return errors.New("invalid argument")
}
return d.makeBucket(bucket)
}
func (d donut) GetBucketMetadata(bucket string) (map[string]string, error) {
return nil, errors.New("Not implemented")
}
func (d donut) SetBucketMetadata(bucket string, metadata map[string]string) error {
return errors.New("Not implemented")
}
func (d donut) ListBuckets() (results []string, err error) {
err = d.getAllBuckets()
if err != nil {
return nil, iodine.New(err, nil)
}
for name := range d.buckets {
results = append(results, name)
}
sort.Strings(results)
return results, nil
}
func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
// TODO: Marker is not yet handled please handle it
errParams := map[string]string{
"bucket": bucket,
"prefix": prefix,
"marker": marker,
"delimiter": delimiter,
"maxkeys": strconv.Itoa(maxkeys),
}
err := d.getAllBuckets()
if err != nil {
return nil, nil, false, iodine.New(err, errParams)
}
if _, ok := d.buckets[bucket]; !ok {
return nil, nil, false, iodine.New(errors.New("bucket does not exist"), errParams)
}
objectList, err := d.buckets[bucket].ListObjects()
if err != nil {
return nil, nil, false, iodine.New(err, errParams)
}
var donutObjects []string
for objectName := range objectList {
donutObjects = append(donutObjects, objectName)
}
if maxkeys <= 0 {
maxkeys = 1000
}
if strings.TrimSpace(prefix) != "" {
donutObjects = filterPrefix(donutObjects, prefix)
donutObjects = removePrefix(donutObjects, prefix)
}
var actualObjects []string
var commonPrefixes []string
var isTruncated bool
if strings.TrimSpace(delimiter) != "" {
actualObjects = filterDelimited(donutObjects, delimiter)
commonPrefixes = filterNotDelimited(donutObjects, delimiter)
commonPrefixes = extractDir(commonPrefixes, delimiter)
commonPrefixes = uniqueObjects(commonPrefixes)
} else {
actualObjects = donutObjects
}
var results []string
for _, objectName := range actualObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = append(results, prefix+objectName)
}
sort.Strings(results)
return results, commonPrefixes, isTruncated, nil
}
func (d donut) PutObject(bucket, object string, reader io.ReadCloser, metadata map[string]string) error {
errParams := map[string]string{
"bucket": bucket,
"object": object,
}
if bucket == "" || strings.TrimSpace(bucket) == "" {
return iodine.New(errors.New("invalid argument"), errParams)
}
if object == "" || strings.TrimSpace(object) == "" {
return iodine.New(errors.New("invalid argument"), errParams)
}
err := d.getAllBuckets()
if err != nil {
return iodine.New(err, errParams)
}
if _, ok := d.buckets[bucket]; !ok {
return iodine.New(errors.New("bucket does not exist"), nil)
}
err = d.buckets[bucket].PutObject(object, reader, metadata)
if err != nil {
return iodine.New(err, errParams)
}
return nil
}
func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int64, err error) {
errParams := map[string]string{
"bucket": bucket,
"object": object,
}
if bucket == "" || strings.TrimSpace(bucket) == "" {
return nil, 0, iodine.New(errors.New("invalid argument"), errParams)
}
if object == "" || strings.TrimSpace(object) == "" {
return nil, 0, iodine.New(errors.New("invalid argument"), errParams)
}
err = d.getAllBuckets()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
if _, ok := d.buckets[bucket]; !ok {
return nil, 0, iodine.New(errors.New("bucket does not exist"), errParams)
}
return d.buckets[bucket].GetObject(object)
}
func (d donut) GetObjectMetadata(bucket, object string) (map[string]string, error) {
errParams := map[string]string{
"bucket": bucket,
"object": object,
}
err := d.getAllBuckets()
if err != nil {
return nil, iodine.New(err, errParams)
}
if _, ok := d.buckets[bucket]; !ok {
return nil, iodine.New(errors.New("bucket does not exist"), errParams)
}
objectList, err := d.buckets[bucket].ListObjects()
if err != nil {
return nil, iodine.New(err, errParams)
}
objectStruct, ok := objectList[object]
if !ok {
return nil, iodine.New(errors.New("object does not exist"), errParams)
}
return objectStruct.GetObjectMetadata()
}

@ -0,0 +1,129 @@
package donut
import (
"errors"
"fmt"
"path"
"sort"
"strings"
)
func filterPrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
if strings.HasPrefix(object, prefix) {
results = append(results, object)
}
}
return results
}
func removePrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
results = append(results, strings.TrimPrefix(object, prefix))
}
return results
}
func filterDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if !strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func filterNotDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func extractDir(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)
results = append(results, parts[0]+"/")
}
return results
}
func uniqueObjects(objects []string) []string {
objectMap := make(map[string]string)
for _, v := range objects {
objectMap[v] = v
}
var results []string
for k := range objectMap {
results = append(results, k)
}
sort.Strings(results)
return results
}
func (d donut) makeBucket(bucketName string) error {
err := d.getAllBuckets()
if err != nil {
return err
}
if _, ok := d.buckets[bucketName]; ok {
return errors.New("bucket exists")
}
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return err
}
nodeNumber := 0
d.buckets[bucketName] = bucket
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, disk.GetOrder())
err := disk.MakeDir(path.Join(d.name, bucketSlice))
if err != nil {
return err
}
}
nodeNumber = nodeNumber + 1
}
return nil
}
func (d donut) getAllBuckets() error {
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return err
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return errors.New("corrupted backend")
}
bucketName := splitDir[0]
// we dont need this NewBucket once we cache these
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return err
}
d.buckets[bucketName] = bucket
}
}
}
return nil
}

@ -0,0 +1,45 @@
{
"ImportPath": "github.com/minio-io/objectdriver",
"GoVersion": "go1.4",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/minio-io/check",
"Rev": "bc4e66da8cd7ff58a4b9b84301f906352b8f2c94"
},
{
"ImportPath": "github.com/minio-io/donut",
"Rev": "1df31a9834eb6acef9ff0fb7ee3e597776faf966"
},
{
"ImportPath": "github.com/minio-io/erasure",
"Rev": "8a72b14991a6835b4d30403e7cb201f373b7cb3a"
},
{
"ImportPath": "github.com/minio-io/iodine",
"Rev": "55cc4d4256c68fbd6f0775f1a25e37e6a2f6457e"
},
{
"ImportPath": "github.com/minio-io/minio/pkg/utils/log",
"Rev": "81e4a3332cd3e1b44eb21fdbb7171ba715625ab7"
},
{
"ImportPath": "github.com/minio-io/minio/pkg/utils/split",
"Rev": "81e4a3332cd3e1b44eb21fdbb7171ba715625ab7"
},
{
"ImportPath": "github.com/stretchr/objx",
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
},
{
"ImportPath": "github.com/stretchr/testify/mock",
"Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
}
]
}

@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

@ -0,0 +1,66 @@
MINIOPATH=$(GOPATH)/src/github.com/minio-io/objectdriver
all: getdeps install
checkdeps:
@echo "Checking deps:"
@(env bash $(PWD)/buildscripts/checkdeps.sh)
checkgopath:
@echo "Checking if project is at ${MINIOPATH}"
@if [ ! -d ${MINIOPATH} ]; then echo "Project not found in $GOPATH, please follow instructions provided at https://github.com/Minio-io/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi
getdeps: checkdeps checkgopath
@go get github.com/minio-io/godep && echo "Installed godep:"
@go get github.com/golang/lint/golint && echo "Installed golint:"
@go get golang.org/x/tools/cmd/vet && echo "Installed vet:"
@go get github.com/fzipp/gocyclo && echo "Installed gocyclo:"
verifiers: getdeps vet fmt lint cyclo
vet:
@echo "Running $@:"
@go vet ./...
fmt:
@echo "Running $@:"
@test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \
echo "+ please format Go code with 'gofmt -s'"
lint:
@echo "Running $@:"
@test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
cyclo:
@echo "Running $@:"
@test -z "$$(gocyclo -over 15 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
build-all: verifiers
@echo "Building Libraries:"
@godep go generate github.com/minio-io/erasure
@godep go generate ./...
@godep go build -a ./... # have no stale packages
test-all: build-all
@echo "Running Test Suites:"
@godep go test -race ./...
test: test-all
minio: build-all test-all
install: minio
save: restore
@godep save ./...
restore:
@godep restore
env:
@godep go env
docs-deploy:
@mkdocs gh-deploy --clean
clean:
@echo "Cleaning up all the generated files:"
@rm -fv cover.out

@ -18,6 +18,8 @@ package drivers
import (
"bytes"
"crypto/md5"
"encoding/base64"
"math/rand"
"strconv"
@ -61,9 +63,14 @@ func testMultipleObjectCreation(c *check.C, create func() Driver) {
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
hasher := md5.New()
hasher.Write([]byte(randomString))
md5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString)
err := drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(randomString))
err := drivers.CreateObject("bucket", key, "", md5Sum, bytes.NewBufferString(randomString))
c.Assert(err, check.IsNil)
}
@ -192,9 +199,18 @@ func testPaging(c *check.C, create func() Driver) {
func testObjectOverwriteFails(c *check.C, create func() Driver) {
drivers := create()
drivers.CreateBucket("bucket")
err := drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one"))
hasher1 := md5.New()
hasher1.Write([]byte("one"))
md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil))
err := drivers.CreateObject("bucket", "object", "", md5Sum1, bytes.NewBufferString("one"))
c.Assert(err, check.IsNil)
err = drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("three"))
hasher2 := md5.New()
hasher2.Write([]byte("three"))
md5Sum2 := base64.StdEncoding.EncodeToString(hasher2.Sum(nil))
err = drivers.CreateObject("bucket", "object", "", md5Sum2, bytes.NewBufferString("three"))
c.Assert(err, check.Not(check.IsNil))
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
@ -221,8 +237,13 @@ func testPutObjectInSubdir(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket")
c.Assert(err, check.IsNil)
err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world"))
hasher := md5.New()
hasher.Write([]byte("hello world"))
md5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
err = drivers.CreateObject("bucket", "dir1/dir2/object", "", md5Sum, bytes.NewBufferString("hello world"))
c.Assert(err, check.IsNil)
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))

@ -0,0 +1,201 @@
#!/usr/bin/env bash
#
# Minimalist Object Storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_init() {
## Minimum required versions for build dependencies
GCC_VERSION="4.0"
CLANG_VERSION="3.5"
YASM_VERSION="1.2.0"
GIT_VERSION="1.0"
GO_VERSION="1.4"
OSX_VERSION="10.8"
UNAME=$(uname -sm)
## Check all dependencies are present
MISSING=""
}
###
#
# Takes two arguments
# arg1: version number in `x.x.x` format
# arg2: version number in `x.x.x` format
#
# example: check_version "$version1" "$version2"
#
# returns:
# 0 - Installed version is equal to required
# 1 - Installed version is greater than required
# 2 - Installed version is lesser than required
# 3 - If args have length zero
#
####
check_version () {
## validate args
[[ -z "$1" ]] && return 3
[[ -z "$2" ]] && return 3
if [[ $1 == $2 ]]; then
return 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++)); do
if [[ -z ${ver2[i]} ]]; then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
return 1
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
## Installed version is lesser than required - Bad condition
return 2
fi
done
return 0
}
check_golang_env() {
echo ${GOROOT:?} 2>&1 >/dev/null
if [ $? -eq 1 ]; then
echo "ERROR"
echo "GOROOT environment variable missing, please refer to Go installation document"
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
exit 1
fi
echo ${GOPATH:?} 2>&1 >/dev/null
if [ $? -eq 1 ]; then
echo "ERROR"
echo "GOPATH environment variable missing, please refer to Go installation document"
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
exit 1
fi
}
is_supported_os() {
case ${UNAME%% *} in
"Linux")
os="linux"
;;
"Darwin")
osx_host_version=$(env sw_vers -productVersion)
check_version "${osx_host_version}" "${OSX_VERSION}"
[[ $? -ge 2 ]] && die "Minimum OSX version supported is ${OSX_VERSION}"
;;
"*")
echo "Exiting.. unsupported operating system found"
exit 1;
esac
}
is_supported_arch() {
local supported
case ${UNAME##* } in
"x86_64")
supported=1
;;
*)
supported=0
;;
esac
if [ $supported -eq 0 ]; then
echo "Invalid arch: ${UNAME} not supported, please use x86_64/amd64"
exit 1;
fi
}
check_deps() {
check_version "$(env go version 2>/dev/null | sed 's/^.* go\([0-9.]*\).*$/\1/')" "${GO_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} golang(1.4)"
fi
check_version "$(env git --version 2>/dev/null | sed -e 's/^.* \([0-9.\].*\).*$/\1/' -e 's/^\([0-9.\]*\).*/\1/g')" "${GIT_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} git"
fi
case ${UNAME%% *} in
"Linux")
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} build-essential"
fi
;;
"Darwin")
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${CLANG_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} xcode-cli"
fi
;;
"*")
;;
esac
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} yasm(1.2.0)"
fi
env mkdocs help >/dev/null 2>&1
if [ $? -ne 0 ]; then
MISSING="${MISSING} mkdocs"
fi
}
main() {
echo -n "Check for supported arch.. "
is_supported_arch
echo -n "Check for supported os.. "
is_supported_os
echo -n "Checking if proper environment variables are set.. "
check_golang_env
echo "Done"
echo "Using GOPATH=${GOPATH} and GOROOT=${GOROOT}"
echo -n "Checking dependencies for Minio.. "
check_deps
## If dependencies are missing, warn the user and abort
if [ "x${MISSING}" != "x" ]; then
echo "ERROR"
echo
echo "The following build tools are missing:"
echo
echo "** ${MISSING} **"
echo
echo "Please install them "
echo "${MISSING}"
echo
echo "Follow https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md for further instructions"
exit 1
fi
echo "Done"
}
_init && main "$@"

@ -0,0 +1,247 @@
package main
import (
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"sort"
"strings"
)
var exitCode int
var dirs []string
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
// error formats the error to standard error, adding program
// identification and a newline
func errorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, "verifier: "+format+"\n", args...)
exitCode = 2
}
type goPackage struct {
p *ast.Package
fs *token.FileSet
decl map[string]ast.Node
missingcomments map[string]ast.Node
used map[string]bool
}
type usedWalker goPackage
// Walks through the AST marking used identifiers.
func (p *usedWalker) Visit(node ast.Node) ast.Visitor {
// just be stupid and mark all *ast.Ident
switch n := node.(type) {
case *ast.Ident:
p.used[n.Name] = true
}
return p
}
type report struct {
pos token.Pos
name string
}
type reports []report
// Len
func (l reports) Len() int { return len(l) }
// Less
func (l reports) Less(i, j int) bool { return l[i].pos < l[j].pos }
// Swap
func (l reports) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
// Visits files for used nodes.
func (p *goPackage) Visit(node ast.Node) ast.Visitor {
u := usedWalker(*p) // hopefully p fields are references.
switch n := node.(type) {
// don't walk whole file, but only:
case *ast.ValueSpec:
// - variable initializers
for _, value := range n.Values {
ast.Walk(&u, value)
}
// variable types.
if n.Type != nil {
ast.Walk(&u, n.Type)
}
case *ast.BlockStmt:
// - function bodies
for _, stmt := range n.List {
ast.Walk(&u, stmt)
}
case *ast.FuncDecl:
// - function signatures
ast.Walk(&u, n.Type)
case *ast.TypeSpec:
// - type declarations
ast.Walk(&u, n.Type)
}
return p
}
func getAllMinioPkgs(path string, fl os.FileInfo, err error) error {
if err != nil {
return err
}
if fl.IsDir() {
// Skip godeps
if strings.Contains(path, "Godeps") {
return nil
}
dirs = appendUniq(dirs, path)
}
return nil
}
func doDir(name string) {
notests := func(info os.FileInfo) bool {
if !info.IsDir() && strings.HasSuffix(info.Name(), ".go") &&
!strings.HasSuffix(info.Name(), "_test.go") {
return true
}
return false
}
fs := token.NewFileSet()
pkgs, err := parser.ParseDir(fs, name, notests, parser.ParseComments|parser.Mode(0))
if err != nil {
errorf("%s", err)
return
}
for _, pkg := range pkgs {
doPackage(fs, pkg)
}
}
func doDecl(p *goPackage, decl interface{}, cmap ast.CommentMap) bool {
switch n := decl.(type) {
case *ast.GenDecl:
// var, const, types
for _, spec := range n.Specs {
switch s := spec.(type) {
case *ast.ValueSpec:
// constants and variables.
for _, name := range s.Names {
p.decl[name.Name] = n
}
case *ast.TypeSpec:
// type definitions.
p.decl[s.Name.Name] = n
}
}
case *ast.FuncDecl:
// if function is 'main', never check
if n.Name.Name == "main" {
return true
}
// Do not be strict on non-exported functions
if !ast.IsExported(n.Name.Name) {
return true
}
// Do not be strict for field list functions
// if n.Recv != nil {
// continue
//}
// Be strict for global functions
_, ok := cmap[n]
if ok == false {
p.missingcomments[n.Name.Name] = n
}
// function declarations
// TODO(remy): do methods
if n.Recv == nil {
p.decl[n.Name.Name] = n
}
}
return false
}
func doPackage(fs *token.FileSet, pkg *ast.Package) {
p := &goPackage{
p: pkg,
fs: fs,
decl: make(map[string]ast.Node),
missingcomments: make(map[string]ast.Node),
used: make(map[string]bool),
}
for _, file := range pkg.Files {
cmap := ast.NewCommentMap(fs, file, file.Comments)
for _, decl := range file.Decls {
if doDecl(p, decl, cmap) {
continue
}
}
}
// init() is always used
p.used["init"] = true
if pkg.Name != "main" {
// exported names are marked used for non-main packages.
for name := range p.decl {
if ast.IsExported(name) {
p.used[name] = true
}
}
} else {
// in main programs, main() is called.
p.used["main"] = true
}
for _, file := range pkg.Files {
// walk file looking for used nodes.
ast.Walk(p, file)
}
// reports.
reports := reports(nil)
for name, node := range p.decl {
if !p.used[name] {
reports = append(reports, report{node.Pos(), name})
}
}
sort.Sort(reports)
for _, report := range reports {
errorf("%s: %s is unused", fs.Position(report.pos), report.name)
}
for name, node := range p.missingcomments {
errorf("%s: comment is missing for 'func %s'", fs.Position(node.Pos()), name)
}
}
func main() {
flag.Parse()
if flag.NArg() == 0 {
doDir(".")
} else {
for _, name := range flag.Args() {
// Is it a directory?
if fi, err := os.Stat(name); err == nil && fi.IsDir() {
err := filepath.Walk(name, getAllMinioPkgs)
if err != nil {
errorf(err.Error())
}
for _, dir := range dirs {
doDir(dir)
}
} else {
errorf("not a directory: %s", name)
}
}
}
os.Exit(exitCode)
}

@ -17,6 +17,9 @@
package donut
import (
"bytes"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"os"
@ -102,7 +105,7 @@ func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error)
if err != nil {
return nil, err
}
for name := range buckets {
for _, name := range buckets {
result := drivers.BucketMetadata{
Name: name,
// TODO Add real created date
@ -149,14 +152,7 @@ func (d donutDriver) GetObject(target io.Writer, bucketName, objectName string)
if objectName == "" || strings.TrimSpace(objectName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return 0, iodine.New(err, nil)
}
if _, ok := buckets[bucketName]; !ok {
return 0, drivers.BucketNotFound{Bucket: bucketName}
}
reader, size, err := buckets[bucketName].GetObject(objectName)
reader, size, err := d.donut.GetObject(bucketName, objectName)
if err != nil {
return 0, drivers.ObjectNotFound{
Bucket: bucketName,
@ -176,7 +172,6 @@ func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string
"start": strconv.FormatInt(start, 10),
"length": strconv.FormatInt(length, 10),
}
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
@ -186,14 +181,7 @@ func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string
if start < 0 {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return 0, iodine.New(err, nil)
}
if _, ok := buckets[bucketName]; !ok {
return 0, drivers.BucketNotFound{Bucket: bucketName}
}
reader, size, err := buckets[bucketName].GetObject(objectName)
reader, size, err := d.donut.GetObject(bucketName, objectName)
defer reader.Close()
if err != nil {
return 0, drivers.ObjectNotFound{
@ -209,7 +197,10 @@ func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string
return 0, iodine.New(err, errParams)
}
n, err := io.CopyN(w, reader, length)
return n, iodine.New(err, errParams)
if err != nil {
return 0, iodine.New(err, errParams)
}
return n, nil
}
// GetObjectMetadata retrieves an object's metadata
@ -219,59 +210,31 @@ func (d donutDriver) GetObjectMetadata(bucketName, objectName, prefixName string
"objectName": objectName,
"prefixName": prefixName,
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return drivers.ObjectMetadata{}, drivers.BucketNotFound{Bucket: bucketName}
}
objectList, err := buckets[bucketName].ListObjects()
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
object, ok := objectList[objectName]
if !ok {
// return ObjectNotFound quickly on an error, API needs this to handle invalid requests
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
}
donutObjectMetadata, err := object.GetDonutObjectMetadata()
if err != nil {
// return ObjectNotFound quickly on an error, API needs this to handle invalid requests
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
}
objectMetadata, err := object.GetObjectMetadata()
metadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
if err != nil {
// return ObjectNotFound quickly on an error, API needs this to handle invalid requests
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
}
created, err := time.Parse(time.RFC3339Nano, donutObjectMetadata["created"])
created, err := time.Parse(time.RFC3339Nano, metadata["created"])
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, nil)
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
size, err := strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
size, err := strconv.ParseInt(metadata["size"], 10, 64)
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, nil)
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
driversObjectMetadata := drivers.ObjectMetadata{
objectMetadata := drivers.ObjectMetadata{
Bucket: bucketName,
Key: objectName,
ContentType: objectMetadata["contentType"],
ContentType: metadata["contentType"],
Created: created,
Md5: donutObjectMetadata["md5"],
Md5: metadata["md5"],
Size: size,
}
return driversObjectMetadata, nil
return objectMetadata, nil
}
type byObjectKey []drivers.ObjectMetadata
@ -285,42 +248,20 @@ func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketReso
errParams := map[string]string{
"bucketName": bucketName,
}
buckets, err := d.donut.ListBuckets()
actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName,
resources.Prefix,
resources.Marker,
resources.Delimiter,
resources.Maxkeys)
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return nil, drivers.BucketResourcesMetadata{}, drivers.BucketNotFound{Bucket: bucketName}
}
objectList, err := buckets[bucketName].ListObjects()
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
var objects []string
for key := range objectList {
objects = append(objects, key)
}
sort.Strings(objects)
if resources.Maxkeys <= 0 || resources.Maxkeys > 1000 {
resources.Maxkeys = 1000
}
// Populate filtering mode
resources.Mode = drivers.GetMode(resources)
// filter objects based on resources.Prefix and resources.Delimiter
actualObjects, commonPrefixes := d.filter(objects, resources)
resources.CommonPrefixes = commonPrefixes
resources.IsTruncated = isTruncated
var results []drivers.ObjectMetadata
for _, objectName := range actualObjects {
if len(results) >= resources.Maxkeys {
resources.IsTruncated = true
break
}
if _, ok := objectList[objectName]; !ok {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(errors.New("object corrupted"), errParams)
}
objectMetadata, err := objectList[objectName].GetDonutObjectMetadata()
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
@ -356,21 +297,39 @@ func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedM
if objectName == "" || strings.TrimSpace(objectName) == "" {
return iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return drivers.BucketNotFound{Bucket: bucketName}
}
if contentType == "" {
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
err = buckets[bucketName].PutObject(objectName, contentType, reader)
metadata := make(map[string]string)
metadata["contentType"] = contentType
err := d.donut.PutObject(bucketName, objectName, ioutil.NopCloser(reader), metadata)
if err != nil {
return iodine.New(err, errParams)
}
// handle expectedMd5sum
if strings.TrimSpace(expectedMd5sum) != "" {
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
if err != nil {
return iodine.New(err, errParams)
}
expectedMd5sumBytes, err := base64.StdEncoding.DecodeString(expectedMd5sum)
if err != nil {
return iodine.New(err, errParams)
}
if _, ok := objectMetadata["md5"]; !ok {
return iodine.New(errors.New("corrupted metadata"), nil)
}
actualMd5sumBytes, err := hex.DecodeString(objectMetadata["md5"])
if err != nil {
return iodine.New(err, errParams)
}
if !bytes.Equal(expectedMd5sumBytes, actualMd5sumBytes) {
return drivers.BadDigest{
Md5: expectedMd5sum,
Bucket: bucketName,
Key: objectName,
}
}
}
return nil
}

@ -1,68 +0,0 @@
package donut
import (
"bufio"
"bytes"
"strings"
"github.com/minio-io/objectdriver"
)
func delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
func (d donutDriver) filter(objects []string, resources drivers.BucketResourcesMetadata) ([]string, []string) {
var actualObjects []string
var commonPrefixes []string
for _, name := range objects {
switch true {
// Both delimiter and Prefix is present
case resources.IsDelimiterPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
trimmedName := strings.TrimPrefix(name, resources.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter)
if delimitedName != "" {
if delimitedName == resources.Delimiter {
commonPrefixes = appendUniq(commonPrefixes, resources.Prefix+delimitedName)
} else {
commonPrefixes = appendUniq(commonPrefixes, delimitedName)
}
if trimmedName == delimitedName {
actualObjects = appendUniq(actualObjects, name)
}
}
}
// Delimiter present and Prefix is absent
case resources.IsDelimiterSet():
delimitedName := delimiter(name, resources.Delimiter)
switch true {
case delimitedName == name:
actualObjects = appendUniq(actualObjects, name)
case delimitedName != "":
commonPrefixes = appendUniq(commonPrefixes, delimitedName)
}
case resources.IsPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
actualObjects = appendUniq(actualObjects, name)
}
case resources.IsDefault():
return objects, nil
}
}
return actualObjects, commonPrefixes
}

@ -4,9 +4,9 @@ import (
"bytes"
"io"
"github.com/fkautz/testify/mock"
"github.com/minio-io/iodine"
"github.com/minio-io/objectdriver"
"github.com/stretchr/testify/mock"
)
// Driver is a mock

Loading…
Cancel
Save