fix: for defer'ed deleteObject use internal context (#10463)

master
Harshavardhana 4 years ago committed by GitHub
parent eb3ded420e
commit 48919de301
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 55
      buildscripts/verify-build.sh
  2. 5
      cmd/erasure-bucket.go
  3. 9
      cmd/erasure-object.go
  4. 4
      pkg/retry/retry.go

@ -45,88 +45,63 @@ FUNCTIONAL_TESTS="$WORK_DIR/functional-tests.sh"
function start_minio_fs()
{
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
minio_pid=$!
sleep 10
echo "$minio_pid"
}
function start_minio_erasure()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
minio_pid=$!
sleep 15
echo "$minio_pid"
}
function start_minio_erasure_sets()
{
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk-sets{1...32}" >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
minio_pid=$!
sleep 15
echo "$minio_pid"
}
function start_minio_zone_erasure_sets()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_zone_erasure_sets_ipv6()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
minio_pids[1]=$!
sleep 40
echo "${minio_pids[@]}"
}
function start_minio_dist_erasure()
{
declare -a minio_pids
export MINIO_ACCESS_KEY=$ACCESS_KEY
export MINIO_SECRET_KEY=$SECRET_KEY
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
minio_pids[0]=$!
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
minio_pids[1]=$!
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
minio_pids[2]=$!
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
minio_pids[3]=$!
sleep 40
echo "${minio_pids[@]}"
}
function run_test_fs()
{
minio_pid="$(start_minio_fs)"
start_minio_fs
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@ -138,12 +113,12 @@ function run_test_fs()
}
function run_test_erasure_sets() {
minio_pid="$(start_minio_erasure_sets)"
start_minio_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@ -156,14 +131,12 @@ function run_test_erasure_sets() {
function run_test_zone_erasure_sets()
{
minio_pids=( $(start_minio_zone_erasure_sets) )
start_minio_zone_erasure_sets
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@ -182,16 +155,14 @@ function run_test_zone_erasure_sets()
function run_test_zone_erasure_sets_ipv6()
{
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
start_minio_zone_erasure_sets_ipv6
export SERVER_ENDPOINT="[::1]:9000"
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@ -210,12 +181,12 @@ function run_test_zone_erasure_sets_ipv6()
function run_test_erasure()
{
minio_pid="$(start_minio_erasure)"
start_minio_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
kill "$minio_pid"
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then
@ -228,14 +199,12 @@ function run_test_erasure()
function run_test_dist_erasure()
{
minio_pids=( $(start_minio_dist_erasure) )
start_minio_dist_erasure
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
rv=$?
for pid in "${minio_pids[@]}"; do
kill "$pid"
done
pkill minio
sleep 3
if [ "$rv" -ne 0 ]; then

@ -243,7 +243,10 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceD
// If we reduce quorum to nil, means we have deleted buckets properly
// on some servers in quorum, we should look for volumeNotEmpty errors
// and delete those buckets as well.
deleteDanglingBucket(ctx, storageDisks, dErrs, bucket)
//
// let this call succeed, even if client cancels the context
// this is to ensure that we don't leave any stale content
deleteDanglingBucket(context.Background(), storageDisks, dErrs, bucket)
return nil
}

@ -25,6 +25,7 @@ import (
"net/http"
"path"
"sync"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
xhttp "github.com/minio/minio/cmd/http"
@ -122,7 +123,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
tempObj := mustGetUUID()
// Cleanup in case of xl.meta writing failure
defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum)
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
// Write unique `xl.meta` for each disk.
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
@ -599,7 +600,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum)
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
// This is a special case with size as '0' and object ends with
// a slash separator, we treat it like a valid operation and
@ -818,7 +819,9 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
if disks[index] == nil {
return errDiskNotFound
}
err := cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj)
tctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
err := cleanupDir(tctx, disks[index], minioMetaTmpBucket, tmpObj)
if err != nil && err != errVolumeNotFound {
return err
}

@ -99,10 +99,10 @@ func NewTimerWithJitter(ctx context.Context, unit time.Duration, cap time.Durati
// Channel used to signal after the expiry of backoff wait seconds.
for {
select {
case attemptCh <- nextBackoff:
nextBackoff++
case <-ctx.Done():
return
case attemptCh <- nextBackoff:
nextBackoff++
}
t.Start(exponentialBackoffWait(nextBackoff))

Loading…
Cancel
Save