Donut cleanup another set

- Make sure to close all readers
- Fix errors in api_testsuite c.Assert(err, IsNil) should be done right after each function call
master
Harshavardhana 10 years ago
parent eec66f195a
commit fb9adb5524
  1. 16
      pkg/storage/donut/bucket.go
  2. 2
      pkg/storage/donut/disk/disk.go
  3. 9
      pkg/storage/donut/donut.go
  4. 16
      pkg/storage/drivers/api_testsuite.go

@ -117,10 +117,6 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]st
return nil, nil, false, iodine.New(err, nil)
}
for _, file := range files {
if len(objects) >= maxkeys {
isTruncated = true
goto truncated
}
objectName, err := b.getObjectName(file.Name(), disk.GetPath(), bucketPath)
if err != nil {
return nil, nil, false, iodine.New(err, nil)
@ -134,8 +130,6 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]st
}
nodeSlice = nodeSlice + 1
}
truncated:
{
if strings.TrimSpace(prefix) != "" {
objects = removePrefix(objects, prefix)
@ -150,10 +144,15 @@ truncated:
} else {
filteredObjects = objects
}
var results []string
var commonPrefixes []string
sort.Strings(filteredObjects)
for _, objectName := range filteredObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = appendUniq(results, prefix+objectName)
}
for _, commonPrefix := range prefixes {
@ -418,6 +417,9 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
writer.CloseWithError(iodine.New(err, nil))
return
}
for _, reader := range readers {
defer reader.Close()
}
hasher := md5.New()
mwriter := io.MultiWriter(writer, hasher)
switch len(readers) == 1 {

@ -93,6 +93,7 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
if err != nil {
return nil, iodine.New(err, nil)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)
@ -113,6 +114,7 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
if err != nil {
return nil, iodine.New(err, nil)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)

@ -192,7 +192,7 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadC
if _, ok := dt.buckets[bucket]; !ok {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1)
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
if err != nil {
return "", iodine.New(err, nil)
}
@ -245,7 +245,12 @@ func (dt donut) GetObjectMetadata(bucket, object string) (map[string]string, err
if _, ok := dt.buckets[bucket]; !ok {
return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
}
objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1)
//
// there is a potential issue here, if the object comes after the truncated list
// below GetObjectMetadata would fail as ObjectNotFound{}
//
// will fix it when we bring in persistent json into Donut - TODO
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
if err != nil {
return nil, iodine.New(err, errParams)
}

@ -187,9 +187,9 @@ func testPaging(c *check.C, create func() Driver) {
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, check.IsNil)
}
// check after paging occurs pages work
for i := 6; i <= 10; i++ {
@ -198,9 +198,9 @@ func testPaging(c *check.C, create func() Driver) {
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, check.Equals, true)
c.Assert(err, check.IsNil)
}
// check paging with prefix at end returns less objects
{
@ -209,6 +209,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 2)
}
@ -217,6 +218,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
@ -248,6 +250,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
@ -265,6 +268,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Delimiter = ""
resources.Maxkeys = 3
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix2")
c.Assert(objects[1].Key, check.Equals, "obj0")
c.Assert(objects[2].Key, check.Equals, "obj1")
@ -276,6 +280,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Marker = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, check.Equals, "obj10")
@ -288,6 +293,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Marker = ""
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
}
@ -313,8 +319,8 @@ func testObjectOverwriteFails(c *check.C, create func() Driver) {
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(err, check.IsNil)
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
}
@ -358,9 +364,9 @@ func testPutObjectInSubdir(c *check.C, create func() Driver) {
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(err, check.IsNil)
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
c.Assert(err, check.IsNil)
}
func testListBuckets(c *check.C, create func() Driver) {
@ -405,8 +411,8 @@ func testListBucketsOrder(c *check.C, create func() Driver) {
drivers.CreateBucket("bucket2", "")
buckets, err := drivers.ListBuckets()
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
c.Assert(len(buckets), check.Equals, 2)
c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, check.Equals, "bucket2")
}

Loading…
Cancel
Save