Implement bucket caching for b2 gateway (#8820)

fixes #8739 #6806
master
Forest Lovewood 5 years ago committed by Nitish Tiwari
parent 7a400542ae
commit dd93eee1e3
  1. 183
      cmd/gateway/b2/gateway-b2.go

@ -127,6 +127,7 @@ type b2Objects struct {
b2Client *b2.B2 b2Client *b2.B2
httpClient *http.Client httpClient *http.Client
ctx context.Context ctx context.Context
buckets []*b2.Bucket
} }
// Convert B2 errors to minio object layer errors. // Convert B2 errors to minio object layer errors.
@ -257,11 +258,14 @@ func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, e
return nil, rerr return nil, rerr
} }
} }
if len(l.buckets) == 0 {
bktList, lerr := l.b2Client.ListBuckets(l.ctx) bktList, lerr := l.b2Client.ListBuckets(l.ctx)
if lerr != nil { if lerr != nil {
return l.listBuckets(ctx, lerr) return l.listBuckets(ctx, lerr)
} }
return bktList, nil l.buckets = bktList
}
return l.buckets, nil
} }
// Bucket - is a helper which provides a *Bucket instance // Bucket - is a helper which provides a *Bucket instance
@ -316,6 +320,20 @@ func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error {
return err return err
} }
err = bkt.DeleteBucket(l.ctx) err = bkt.DeleteBucket(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
err = bkt.DeleteBucket(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket) return b2ToObjectError(err, bucket)
} }
@ -327,10 +345,20 @@ func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix strin
return loi, err return loi, err
} }
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return loi, err
}
files, next, lerr = bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil { if lerr != nil {
logger.LogIf(ctx, lerr) logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket) return loi, b2ToObjectError(lerr, bucket)
} }
}
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.NextMarker = next loi.NextMarker = next
for _, file := range files { for _, file := range files {
@ -369,10 +397,20 @@ func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
return loi, err return loi, err
} }
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return loi, err
}
files, next, lerr = bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil { if lerr != nil {
logger.LogIf(ctx, lerr) logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket) return loi, b2ToObjectError(lerr, bucket)
} }
}
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken loi.ContinuationToken = continuationToken
loi.NextContinuationToken = next loi.NextContinuationToken = next
@ -432,10 +470,20 @@ func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string,
return err return err
} }
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
reader, err = bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object) return b2ToObjectError(err, bucket, object)
} }
}
defer reader.Close() defer reader.Close()
_, err = io.Copy(writer, reader) _, err = io.Copy(writer, reader)
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
@ -450,10 +498,20 @@ func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object str
} }
f, _, err := bkt.ListFileNames(l.ctx, 1, object, "", "") f, _, err := bkt.ListFileNames(l.ctx, 1, object, "", "")
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return objInfo, err
}
f, _, err = bkt.ListFileNames(l.ctx, 1, object, "", "")
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object) return objInfo, b2ToObjectError(err, bucket, object)
} }
}
// B2's list will return the next item in the bucket if the object doesn't // B2's list will return the next item in the bucket if the object doesn't
// exist so we need to perform a name check too // exist so we need to perform a name check too
@ -465,10 +523,20 @@ func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object str
} }
fi, err := bkt.File(f[0].ID, object).GetFileInfo(l.ctx) fi, err := bkt.File(f[0].ID, object).GetFileInfo(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return objInfo, err
}
fi, err = bkt.File(f[0].ID, object).GetFileInfo(l.ctx)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object) return objInfo, b2ToObjectError(err, bucket, object)
} }
}
return minio.ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object, Name: object,
@ -555,11 +623,21 @@ func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string,
delete(opts.UserDefined, "content-type") delete(opts.UserDefined, "content-type")
var u *b2.URL var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return objInfo, err
}
u, err = bkt.GetUploadURL(l.ctx) u, err = bkt.GetUploadURL(l.ctx)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object) return objInfo, b2ToObjectError(err, bucket, object)
} }
}
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
var f *b2.File var f *b2.File
@ -597,6 +675,19 @@ func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object stri
// If we hide the file we'll conform to B2's versioning policy, it also // If we hide the file we'll conform to B2's versioning policy, it also
// saves an additional call to check if the file exists first // saves an additional call to check if the file exists first
_, err = bkt.HideFile(l.ctx, object) _, err = bkt.HideFile(l.ctx, object)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
_, err = bkt.HideFile(l.ctx, object)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object) return b2ToObjectError(err, bucket, object)
} }
@ -624,10 +715,20 @@ func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
maxUploads = 100 maxUploads = 100
} }
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, maxUploads, uploadIDMarker) largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, maxUploads, uploadIDMarker)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return lmi, err
}
largeFiles, nextMarker, err = bkt.ListUnfinishedLargeFiles(l.ctx, maxUploads, uploadIDMarker)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return lmi, b2ToObjectError(err, bucket) return lmi, b2ToObjectError(err, bucket)
} }
}
lmi = minio.ListMultipartsInfo{ lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads, MaxUploads: maxUploads,
} }
@ -660,10 +761,20 @@ func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
contentType := opts.UserDefined["content-type"] contentType := opts.UserDefined["content-type"]
delete(opts.UserDefined, "content-type") delete(opts.UserDefined, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, opts.UserDefined) lf, err := bkt.StartLargeFile(l.ctx, object, contentType, opts.UserDefined)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return uploadID, err
}
lf, err = bkt.StartLargeFile(l.ctx, object, contentType, opts.UserDefined)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return uploadID, b2ToObjectError(err, bucket, object) return uploadID, b2ToObjectError(err, bucket, object)
} }
}
return lf.ID, nil return lf.ID, nil
} }
@ -677,10 +788,20 @@ func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object str
} }
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return pi, err
}
fc, err = bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID) return pi, b2ToObjectError(err, bucket, object, uploadID)
} }
}
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
_, err = fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) _, err = fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
@ -713,10 +834,20 @@ func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object s
// startPartNumber must be in the range 1 - 10000 for B2. // startPartNumber must be in the range 1 - 10000 for B2.
partNumberMarker++ partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return lpi, err
}
partsList, next, err = bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return lpi, b2ToObjectError(err, bucket, object, uploadID) return lpi, b2ToObjectError(err, bucket, object, uploadID)
} }
}
if next != 0 { if next != 0 {
lpi.IsTruncated = true lpi.IsTruncated = true
lpi.NextPartNumberMarker = next lpi.NextPartNumberMarker = next
@ -738,6 +869,19 @@ func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
return err return err
} }
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object, uploadID)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object, uploadID) return b2ToObjectError(err, bucket, object, uploadID)
} }
@ -761,10 +905,19 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1") hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1")
} }
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return oi, err
}
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return oi, b2ToObjectError(err, bucket, object, uploadID) return oi, b2ToObjectError(err, bucket, object, uploadID)
} }
}
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{}) return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
} }
@ -806,6 +959,20 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPo
} }
bkt.Type = bucketTypeReadOnly bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err) return b2ToObjectError(err)
} }
@ -854,6 +1021,20 @@ func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error
} }
bkt.Type = bucketTypePrivate bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
if err != nil {
l.buckets = []*b2.Bucket{}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return err
}
bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx)
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return b2ToObjectError(err) return b2ToObjectError(err)
} }

Loading…
Cancel
Save