fix: use per test context (#9343)

Instead of GlobalContext use a local context for tests.
Most notably this allows stuff created to be shut down 
when tests using it is done. After PR #9345 9331 CI is 
often running out of memory/time.
master
Klaus Post 5 years ago committed by GitHub
parent 78f2183e70
commit f19cbfad5c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 24
      cmd/admin-handlers_test.go
  2. 25
      cmd/benchmark-utils_test.go
  3. 6
      cmd/server-main.go
  4. 7
      cmd/server-main_test.go
  5. 68
      cmd/test-utils_test.go
  6. 10
      cmd/web-handlers_test.go
  7. 8
      cmd/xl-sets.go
  8. 11
      cmd/xl-sets_test.go
  9. 6
      cmd/xl-v1-common_test.go
  10. 11
      cmd/xl-v1-healing-common_test.go
  11. 64
      cmd/xl-v1-healing_test.go
  12. 12
      cmd/xl-v1-multipart_test.go
  13. 90
      cmd/xl-v1-object_test.go
  14. 11
      cmd/xl-v1-utils_test.go
  15. 6
      cmd/xl-zones.go

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
@ -42,7 +43,8 @@ type adminXLTestBed struct {
// prepareAdminXLTestBed - helper function that setups a single-node
// XL backend for admin-handler tests.
func prepareAdminXLTestBed() (*adminXLTestBed, error) {
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
@ -51,7 +53,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
globalIsXL = true
// Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer()
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
if xlErr != nil {
return nil, xlErr
}
@ -69,9 +71,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
globalConfigSys = NewConfigSys()
globalIAMSys = NewIAMSys()
globalIAMSys.Init(GlobalContext, objLayer)
globalIAMSys.Init(ctx, objLayer)
buckets, err := objLayer.ListBuckets(GlobalContext)
buckets, err := objLayer.ListBuckets(ctx)
if err != nil {
return nil, err
}
@ -102,7 +104,7 @@ func (atb *adminXLTestBed) TearDown() {
// initTestObjLayer - Helper function to initialize an XL-based object
// layer and set globalObjectAPI.
func initTestXLObjLayer() (ObjectLayer, []string, error) {
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
@ -115,7 +117,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
}
globalPolicySys = NewPolicySys()
objLayer, err := newXLSets(endpoints, storageDisks, format, 1, 16)
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format, 1, 16)
if err != nil {
return nil, nil, err
}
@ -190,7 +192,10 @@ func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, er
// testServicesCmdHandler - parametrizes service subcommand tests on
// cmdType value.
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
@ -258,7 +263,10 @@ func buildAdminRequest(queryVal url.Values, method, path string,
}
func TestAdminServerInfo(t *testing.T) {
adminTestBed, err := prepareAdminXLTestBed()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}

@ -28,11 +28,6 @@ import (
humanize "github.com/dustin/go-humanize"
)
// Prepare XL/FS backend for benchmark.
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
return prepareTestBackend(instanceType)
}
// Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
@ -135,7 +130,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@ -149,7 +146,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@ -163,7 +162,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@ -242,7 +243,9 @@ func generateBytesData(size int) []byte {
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
@ -256,7 +259,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}

@ -412,7 +412,7 @@ func serverMain(ctx *cli.Context) {
}
}
newObject, err := newObjectLayer(globalEndpoints)
newObject, err := newObjectLayer(GlobalContext, globalEndpoints)
logger.SetDeploymentID(globalDeploymentID)
if err != nil {
// Stop watching for any certificate changes.
@ -479,7 +479,7 @@ func serverMain(ctx *cli.Context) {
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err error) {
func newObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
if endpointZones.NEndpoints() == 1 {
@ -487,5 +487,5 @@ func newObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err err
return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path)
}
return newXLZones(endpointZones)
return newXLZones(ctx, endpointZones)
}

@ -17,12 +17,15 @@
package cmd
import (
"context"
"reflect"
"testing"
)
// Tests initializing new object layer.
func TestNewObjectLayer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Tests for FS object layer.
nDisks := 1
disks, err := getRandomDisks(nDisks)
@ -31,7 +34,7 @@ func TestNewObjectLayer(t *testing.T) {
}
defer removeRoots(disks)
obj, err := newObjectLayer(mustGetZoneEndpoints(disks...))
obj, err := newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
t.Fatal("Unexpected object layer initialization error", err)
}
@ -50,7 +53,7 @@ func TestNewObjectLayer(t *testing.T) {
}
defer removeRoots(disks)
obj, err = newObjectLayer(mustGetZoneEndpoints(disks...))
obj, err = newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
t.Fatal("Unexpected object layer initialization error", err)
}

@ -160,7 +160,7 @@ func prepareFS() (ObjectLayer, string, error) {
return obj, fsDirs[0], nil
}
func prepareXLSets32() (ObjectLayer, []string, error) {
func prepareXLSets32(ctx context.Context) (ObjectLayer, []string, error) {
fsDirs1, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
@ -182,7 +182,7 @@ func prepareXLSets32() (ObjectLayer, []string, error) {
return nil, nil, err
}
objAPI, err := newXLSets(endpoints, storageDisks, format, 2, 16)
objAPI, err := newXLSets(ctx, endpoints, storageDisks, format, 2, 16)
if err != nil {
return nil, nil, err
}
@ -190,12 +190,12 @@ func prepareXLSets32() (ObjectLayer, []string, error) {
return objAPI, fsDirs, nil
}
func prepareXL(nDisks int) (ObjectLayer, []string, error) {
func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) {
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
return nil, nil, err
}
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
if err != nil {
removeRoots(fsDirs)
return nil, nil, err
@ -203,8 +203,8 @@ func prepareXL(nDisks int) (ObjectLayer, []string, error) {
return obj, fsDirs, nil
}
func prepareXL16() (ObjectLayer, []string, error) {
return prepareXL(16)
func prepareXL16(ctx context.Context) (ObjectLayer, []string, error) {
return prepareXL(ctx, 16)
}
// Initialize FS objects.
@ -292,16 +292,18 @@ type TestServer struct {
SecretKey string
Server *httptest.Server
Obj ObjectLayer
cancel context.CancelFunc
}
// UnstartedTestServer - Configures a temp FS/XL backend,
// initializes the endpoints and configures the test server.
// The server should be started using the Start() method.
func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
ctx, cancel := context.WithCancel(context.Background())
// create an instance of TestServer.
testServer := TestServer{}
testServer := TestServer{cancel: cancel}
// return FS/XL object layer and temp backend.
objLayer, disks, err := prepareTestBackend(instanceType)
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
t.Fatal(err)
}
@ -341,9 +343,9 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
globalConfigSys = NewConfigSys()
globalIAMSys = NewIAMSys()
globalIAMSys.Init(GlobalContext, objLayer)
globalIAMSys.Init(ctx, objLayer)
buckets, err := objLayer.ListBuckets(context.Background())
buckets, err := objLayer.ListBuckets(ctx)
if err != nil {
t.Fatalf("Unable to list buckets on backend %s", err)
}
@ -503,13 +505,14 @@ func newTestConfig(bucketLocation string, obj ObjectLayer) (err error) {
// Deleting the temporary backend and stopping the server.
func (testServer TestServer) Stop() {
testServer.cancel()
testServer.Server.Close()
os.RemoveAll(testServer.Root)
for _, ep := range testServer.Disks {
for _, disk := range ep.Endpoints {
os.RemoveAll(disk.Path)
}
}
testServer.Server.Close()
}
// Truncate request to simulate unexpected EOF for a request signed using streaming signature v4.
@ -1568,14 +1571,14 @@ func getRandomDisks(N int) ([]string, error) {
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newTestObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err error) {
func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
if endpointZones.NEndpoints() == 1 {
// Initialize new FS object layer.
return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path)
}
z, err := newXLZones(endpointZones)
z, err := newXLZones(ctx, endpointZones)
if err != nil {
return nil, err
}
@ -1595,8 +1598,8 @@ func newTestObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err
}
// initObjectLayer - Instantiates object layer and returns it.
func initObjectLayer(endpointZones EndpointZones) (ObjectLayer, []StorageAPI, error) {
objLayer, err := newTestObjectLayer(endpointZones)
func initObjectLayer(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, []StorageAPI, error) {
objLayer, err := newTestObjectLayer(ctx, endpointZones)
if err != nil {
return nil, nil, err
}
@ -1655,14 +1658,14 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl
// prepare test backend.
// create FS/XL/XLSet backend.
// return object layer, backend disks.
func prepareTestBackend(instanceType string) (ObjectLayer, []string, error) {
func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer, []string, error) {
switch instanceType {
// Total number of disks for XL sets backend is set to 32.
case XLSetsTestStr:
return prepareXLSets32()
return prepareXLSets32(ctx)
// Total number of disks for XL backend is set to 16.
case XLTestStr:
return prepareXL16()
return prepareXL16(ctx)
default:
// return FS backend by default.
obj, disk, err := prepareFS()
@ -1866,6 +1869,9 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc
// ExecObjectLayerAPITest - executes object layer API tests.
// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers.
func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// reset globals.
// this is to make sure that the tests are not affected by modified value.
resetTestGlobals()
@ -1901,7 +1907,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
// Executing the object layer tests for single node setup.
objAPITest(objLayer, FSTestStr, bucketFS, fsAPIRouter, credentials, t)
objLayer, xlDisks, err := prepareXL16()
objLayer, xlDisks, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
@ -1931,6 +1937,9 @@ type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []s
// ExecObjectLayerTest - executes object layer tests.
// Creates single node and XL ObjectLayer instance and runs test for both the layers.
func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
@ -1943,7 +1952,7 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
}
globalIAMSys = NewIAMSys()
globalIAMSys.Init(GlobalContext, objLayer)
globalIAMSys.Init(ctx, objLayer)
buckets, err := objLayer.ListBuckets(context.Background())
if err != nil {
@ -1959,19 +1968,22 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
// Executing the object layer tests for single node setup.
objTest(objLayer, FSTestStr, t)
objLayer, fsDirs, err := prepareXLSets32()
objLayer, fsDirs, err := prepareXLSets32(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
defer removeRoots(append(fsDirs, fsDir))
// Executing the object layer tests for XL.
objTest(objLayer, XLTestStr, t)
defer removeRoots(append(fsDirs, fsDir))
}
// ExecObjectLayerTestWithDirs - executes object layer tests.
// Creates single node and XL ObjectLayer instance and runs test for both the layers.
func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) {
objLayer, fsDirs, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
@ -1990,7 +2002,10 @@ func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs)
// ExecObjectLayerDiskAlteredTest - executes object layer tests while altering
// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) {
objLayer, fsDirs, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
@ -2010,12 +2025,15 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
erasureDisks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatalf("Initialization of disks for XL setup: %s", err)
}
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(erasureDisks...))
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(erasureDisks...))
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}

@ -1475,8 +1475,11 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
// TestWebCheckAuthorization - Test Authorization for all web handlers
func TestWebCheckAuthorization(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Prepare XL backend
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
@ -1564,8 +1567,11 @@ func TestWebCheckAuthorization(t *testing.T) {
// TestWebObjectLayerFaultyDisks - Test Web RPC responses with faulty disks
func TestWebObjectLayerFaultyDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Prepare XL backend
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}

@ -293,7 +293,7 @@ func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI {
const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
// Initialize new set of erasure coded sets.
func newXLSets(endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV3, setCount int, drivesPerSet int) (*xlSets, error) {
func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV3, setCount int, drivesPerSet int) (*xlSets, error) {
endpointStrings := make([]string, len(endpoints))
for i, endpoint := range endpoints {
if endpoint.IsLocal {
@ -363,12 +363,12 @@ func newXLSets(endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV
mrfUploadCh: make(chan partialUpload, 10000),
}
go s.sets[i].cleanupStaleMultipartUploads(GlobalContext,
GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
go s.sets[i].cleanupStaleMultipartUploads(ctx,
GlobalMultipartCleanupInterval, GlobalMultipartExpiry, ctx.Done())
}
// Start the disk monitoring and connect routine.
go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval)
go s.monitorAndConnectEndpoints(ctx, defaultMonitorConnectEndpointInterval)
go s.maintainMRFList()
go s.healMRFRoutine()

@ -17,6 +17,7 @@
package cmd
import (
"context"
"os"
"path/filepath"
"testing"
@ -64,6 +65,9 @@ func TestCrcHashMod(t *testing.T) {
// TestNewXL - tests initialization of all input disks
// and constructs a valid `XL` object
func TestNewXLSets(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var nDisks = 16 // Maximum disks.
var erasureDisks []string
for i := 0; i < nDisks; i++ {
@ -92,7 +96,7 @@ func TestNewXLSets(t *testing.T) {
t.Fatalf("Unable to format disks for erasure, %s", err)
}
if _, err := newXLSets(endpoints, storageDisks, format, 1, 16); err != nil {
if _, err := newXLSets(ctx, endpoints, storageDisks, format, 1, 16); err != nil {
t.Fatalf("Unable to initialize erasure")
}
}
@ -100,10 +104,13 @@ func TestNewXLSets(t *testing.T) {
// TestHashedLayer - tests the hashed layer which will be returned
// consistently for a given object name.
func TestHashedLayer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []*xlObjects
for i := 0; i < 16; i++ {
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal("Unable to initialize 'XL' object layer.", err)
}

@ -18,13 +18,17 @@ package cmd
import (
"bytes"
"context"
"os"
"testing"
)
// Tests for if parent directory is object
func TestXLParentDirIsObject(t *testing.T) {
obj, fsDisks, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDisks, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Unable to initialize 'XL' object layer.")
}

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
@ -91,7 +92,10 @@ func TestCommonTime(t *testing.T) {
// TestListOnlineDisks - checks if listOnlineDisks and outDatedDisks
// are consistent with each other.
func TestListOnlineDisks(t *testing.T) {
obj, disks, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, disks, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Prepare XL backend failed - %v", err)
}
@ -256,8 +260,9 @@ func TestListOnlineDisks(t *testing.T) {
}
func TestDisksWithAllParts(t *testing.T) {
ctx := GlobalContext
obj, disks, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, disks, err := prepareXL16(ctx)
if err != nil {
t.Fatalf("Prepare XL backend failed - %v", err)
}

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"path/filepath"
"testing"
@ -26,6 +27,9 @@ import (
// Tests undoes and validates if the undoing completes successfully.
func TestUndoMakeBucket(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
@ -34,13 +38,13 @@ func TestUndoMakeBucket(t *testing.T) {
defer removeRoots(fsDirs)
// Remove format.json on 16 disks.
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
if err != nil {
t.Fatal(err)
}
bucketName := getRandomBucketName()
if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, ""); err != nil {
if err = obj.MakeBucketWithLocation(ctx, bucketName, ""); err != nil {
t.Fatal(err)
}
z := obj.(*xlZones)
@ -48,7 +52,7 @@ func TestUndoMakeBucket(t *testing.T) {
undoMakeBucket(xl.getDisks(), bucketName)
// Validate if bucket was deleted properly.
_, err = obj.GetBucketInfo(GlobalContext, bucketName)
_, err = obj.GetBucketInfo(ctx, bucketName)
if err != nil {
switch err.(type) {
case BucketNotFound:
@ -59,8 +63,10 @@ func TestUndoMakeBucket(t *testing.T) {
}
func TestHealObjectCorrupted(t *testing.T) {
resetGlobalHealState()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resetGlobalHealState()
defer resetGlobalHealState()
nDisks := 16
@ -72,7 +78,7 @@ func TestHealObjectCorrupted(t *testing.T) {
defer removeRoots(fsDirs)
// Everything is fine, should return nil
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
if err != nil {
t.Fatal(err)
}
@ -82,21 +88,21 @@ func TestHealObjectCorrupted(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(GlobalContext, bucket, "")
err = objLayer.MakeBucketWithLocation(ctx, bucket, "")
if err != nil {
t.Fatalf("Failed to make a bucket - %v", err)
}
// Create an object with multiple parts uploaded in decreasing
// part number.
uploadID, err := objLayer.NewMultipartUpload(GlobalContext, bucket, object, opts)
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err)
}
var uploadedParts []CompletePart
for _, partID := range []int{2, 1} {
pInfo, err1 := objLayer.PutObjectPart(GlobalContext, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1)
}
@ -106,7 +112,7 @@ func TestHealObjectCorrupted(t *testing.T) {
})
}
_, err = objLayer.CompleteMultipartUpload(GlobalContext, bucket, object, uploadID, uploadedParts, ObjectOptions{})
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err)
}
@ -120,7 +126,7 @@ func TestHealObjectCorrupted(t *testing.T) {
t.Fatalf("Failed to delete a file - %v", err)
}
_, err = objLayer.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatalf("Failed to heal object - %v", err)
}
@ -143,7 +149,7 @@ func TestHealObjectCorrupted(t *testing.T) {
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
_, err = objLayer.HealObject(GlobalContext, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil {
t.Errorf("Expected nil but received %v", err)
}
@ -169,7 +175,7 @@ func TestHealObjectCorrupted(t *testing.T) {
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
_, err = objLayer.HealObject(GlobalContext, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil {
t.Errorf("Expected nil but received %v", err)
}
@ -189,7 +195,7 @@ func TestHealObjectCorrupted(t *testing.T) {
}
// Try healing now, expect to receive errFileNotFound.
_, err = objLayer.HealObject(GlobalContext, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil {
if _, ok := err.(ObjectNotFound); !ok {
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
@ -197,7 +203,7 @@ func TestHealObjectCorrupted(t *testing.T) {
}
// since majority of xl.jsons are not available, object should be successfully deleted.
_, err = objLayer.GetObjectInfo(GlobalContext, bucket, object, ObjectOptions{})
_, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if _, ok := err.(ObjectNotFound); !ok {
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
}
@ -205,6 +211,9 @@ func TestHealObjectCorrupted(t *testing.T) {
// Tests healing of object.
func TestHealObjectXL(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
@ -214,7 +223,7 @@ func TestHealObjectXL(t *testing.T) {
defer removeRoots(fsDirs)
// Everything is fine, should return nil
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
if err != nil {
t.Fatal(err)
}
@ -224,21 +233,21 @@ func TestHealObjectXL(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions
err = obj.MakeBucketWithLocation(GlobalContext, bucket, "")
err = obj.MakeBucketWithLocation(ctx, bucket, "")
if err != nil {
t.Fatalf("Failed to make a bucket - %v", err)
}
// Create an object with multiple parts uploaded in decreasing
// part number.
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucket, object, opts)
uploadID, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err)
}
var uploadedParts []CompletePart
for _, partID := range []int{2, 1} {
pInfo, err1 := obj.PutObjectPart(GlobalContext, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1)
}
@ -248,7 +257,7 @@ func TestHealObjectXL(t *testing.T) {
})
}
_, err = obj.CompleteMultipartUpload(GlobalContext, bucket, object, uploadID, uploadedParts, ObjectOptions{})
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err)
}
@ -262,7 +271,7 @@ func TestHealObjectXL(t *testing.T) {
t.Fatalf("Failed to delete a file - %v", err)
}
_, err = obj.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
_, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatalf("Failed to heal object - %v", err)
}
@ -284,7 +293,7 @@ func TestHealObjectXL(t *testing.T) {
z.zones[0].xlDisksMu.Unlock()
// Try healing now, expect to receive errDiskNotFound.
_, err = obj.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan})
_, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan})
// since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum
if _, ok := err.(InsufficientReadQuorum); !ok {
t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err)
@ -293,6 +302,9 @@ func TestHealObjectXL(t *testing.T) {
// Tests healing of empty directories
func TestHealEmptyDirectoryXL(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
@ -301,7 +313,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
defer removeRoots(fsDirs)
// Everything is fine, should return nil
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
if err != nil {
t.Fatal(err)
}
@ -310,13 +322,13 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
object := "empty-dir/"
var opts ObjectOptions
err = obj.MakeBucketWithLocation(GlobalContext, bucket, "")
err = obj.MakeBucketWithLocation(ctx, bucket, "")
if err != nil {
t.Fatalf("Failed to make a bucket - %v", err)
}
// Upload an empty directory
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t,
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t,
bytes.NewReader([]byte{}), 0, "", ""), opts)
if err != nil {
t.Fatal(err)
@ -332,7 +344,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
}
// Heal the object
hr, err := obj.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
hr, err := obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatalf("Failed to heal object - %v", err)
}
@ -356,7 +368,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
}
// Heal the same object again
hr, err = obj.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
hr, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatalf("Failed to heal object - %v", err)
}

@ -25,8 +25,11 @@ import (
// Tests cleanup multipart uploads for erasure coded backend.
func TestXLCleanupStaleMultipartUploads(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -40,15 +43,12 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
objectName := "object"
var opts ObjectOptions
obj.MakeBucketWithLocation(GlobalContext, bucketName, "")
obj.MakeBucketWithLocation(ctx, bucketName, "")
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, opts)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
// Create a context we can cancel.
ctx, cancel := context.WithCancel(GlobalContext)
var cleanupWg sync.WaitGroup
cleanupWg.Add(1)
go func() {
@ -65,7 +65,7 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
cleanupWg.Wait()
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, uploadID); err != nil {
if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {
if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err)
}

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"io/ioutil"
"math/rand"
"os"
@ -32,12 +33,15 @@ import (
)
func TestRepeatPutObjectPart(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objLayer ObjectLayer
var disks []string
var err error
var opts ObjectOptions
objLayer, disks, err = prepareXL16()
objLayer, disks, err = prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -45,23 +49,23 @@ func TestRepeatPutObjectPart(t *testing.T) {
// cleaning up of temporary test directories
defer removeRoots(disks)
err = objLayer.MakeBucketWithLocation(GlobalContext, "bucket1", "")
err = objLayer.MakeBucketWithLocation(ctx, "bucket1", "")
if err != nil {
t.Fatal(err)
}
uploadID, err := objLayer.NewMultipartUpload(GlobalContext, "bucket1", "mpartObj1", opts)
uploadID, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
if err != nil {
t.Fatal(err)
}
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart(GlobalContext, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil {
t.Fatal(err)
}
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart(GlobalContext, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil {
t.Fatal(err)
}
@ -83,24 +87,27 @@ func TestXLDeleteObjectBasic(t *testing.T) {
{"bucket", "dir/obj", nil},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend
xl, fsDirs, err := prepareXL16()
xl, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
err = xl.MakeBucketWithLocation(GlobalContext, "bucket", "")
err = xl.MakeBucketWithLocation(ctx, "bucket", "")
if err != nil {
t.Fatal(err)
}
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject(GlobalContext, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
_, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
for i, test := range testCases {
actualErr := xl.DeleteObject(GlobalContext, test.bucket, test.object)
actualErr := xl.DeleteObject(ctx, test.bucket, test.object)
if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr)
}
@ -113,10 +120,11 @@ func TestXLDeleteObjectBasic(t *testing.T) {
}
func TestXLDeleteObjectsXLSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []*xlObjects
for i := 0; i < 32; i++ {
obj, fsDirs, err := prepareXL(16)
obj, fsDirs, err := prepareXL(ctx, 16)
if err != nil {
t.Fatal("Unable to initialize 'XL' object layer.", err)
}
@ -188,8 +196,11 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
}
func TestXLDeleteObjectDiskNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -200,7 +211,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
xl := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "")
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
if err != nil {
t.Fatal(err)
}
@ -209,7 +220,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
object := "object"
opts := ObjectOptions{}
// Create object "obj" under bucket "bucket".
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
@ -224,13 +235,13 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
return xlDisks
}
z.zones[0].xlDisksMu.Unlock()
err = obj.DeleteObject(GlobalContext, bucket, object)
err = obj.DeleteObject(ctx, bucket, object)
if err != nil {
t.Fatal(err)
}
// Create "obj" under "bucket".
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
@ -244,7 +255,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
return xlDisks
}
z.zones[0].xlDisksMu.Unlock()
err = obj.DeleteObject(GlobalContext, bucket, object)
err = obj.DeleteObject(ctx, bucket, object)
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error
if err != toObjectErr(errXLReadQuorum, bucket, object) {
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err)
@ -252,8 +263,11 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
}
func TestGetObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -264,7 +278,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
xl := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "")
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
if err != nil {
t.Fatal(err)
}
@ -273,7 +287,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
object := "object"
opts := ObjectOptions{}
// Create "object" under "bucket".
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
@ -302,7 +316,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
z.zones[0].xlDisksMu.Unlock()
// Fetch object from store.
err = xl.GetObject(GlobalContext, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
if err != toObjectErr(errXLReadQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
}
@ -310,8 +324,11 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
func TestPutObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareXL16()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -323,7 +340,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
xl := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "")
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
if err != nil {
t.Fatal(err)
}
@ -332,7 +349,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
object := "object"
opts := ObjectOptions{}
// Create "object" under "bucket".
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
@ -361,7 +378,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
z.zones[0].xlDisksMu.Unlock()
// Upload new content to same object "object"
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
}
@ -370,7 +387,10 @@ func TestPutObjectNoQuorum(t *testing.T) {
// Tests both object and bucket healing.
func TestHealing(t *testing.T) {
obj, fsDirs, err := prepareXL16()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
@ -380,7 +400,7 @@ func TestHealing(t *testing.T) {
xl := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "")
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
if err != nil {
t.Fatal(err)
}
@ -395,13 +415,13 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{})
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
disk := xl.getDisks()[0]
xlMetaPreHeal, err := readXLMeta(GlobalContext, disk, bucket, object)
xlMetaPreHeal, err := readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
@ -413,12 +433,12 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
_, err = xl.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
_, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatal(err)
}
xlMetaPostHeal, err := readXLMeta(GlobalContext, disk, bucket, object)
xlMetaPostHeal, err := readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
@ -437,17 +457,17 @@ func TestHealing(t *testing.T) {
// gone down when an object was replaced by a new object.
xlMetaOutDated := xlMetaPreHeal
xlMetaOutDated.Stat.ModTime = time.Now()
err = writeXLMetadata(GlobalContext, disk, bucket, object, xlMetaOutDated)
err = writeXLMetadata(ctx, disk, bucket, object, xlMetaOutDated)
if err != nil {
t.Fatal(err)
}
_, err = xl.HealObject(GlobalContext, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan})
_, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan})
if err != nil {
t.Fatal(err)
}
xlMetaPostHeal, err = readXLMeta(GlobalContext, disk, bucket, object)
xlMetaPostHeal, err = readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
@ -464,7 +484,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
// This would create the bucket.
_, err = xl.HealBucket(GlobalContext, bucket, false, false)
_, err = xl.HealBucket(ctx, bucket, false, false)
if err != nil {
t.Fatal(err)
}

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"reflect"
@ -383,12 +384,15 @@ func TestGetPartSizeFromIdx(t *testing.T) {
}
func TestShuffleDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(disks...))
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
removeRoots(disks)
t.Fatal(err)
@ -428,12 +432,15 @@ func testShuffleDisks(t *testing.T, z *xlZones) {
// TestEvalDisks tests the behavior of evalDisks
func TestEvalDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(disks...))
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
removeRoots(disks)
t.Fatal(err)

@ -55,7 +55,7 @@ func (z *xlZones) quickHealBuckets(ctx context.Context) {
}
// Initialize new zone of erasure sets.
func newXLZones(endpointZones EndpointZones) (ObjectLayer, error) {
func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) {
var (
deploymentID string
err error
@ -74,13 +74,13 @@ func newXLZones(endpointZones EndpointZones) (ObjectLayer, error) {
if deploymentID == "" {
deploymentID = formats[i].ID
}
z.zones[i], err = newXLSets(ep.Endpoints, storageDisks[i], formats[i], ep.SetCount, ep.DrivesPerSet)
z.zones[i], err = newXLSets(ctx, ep.Endpoints, storageDisks[i], formats[i], ep.SetCount, ep.DrivesPerSet)
if err != nil {
return nil, err
}
}
if !z.SingleZone() {
z.quickHealBuckets(GlobalContext)
z.quickHealBuckets(ctx)
}
return z, nil
}

Loading…
Cancel
Save