allow ctrl+c to be consistent at early startup (#10435)

fixes #10431
master
Harshavardhana 4 years ago committed by GitHub
parent 86a3319d41
commit 96997d2b21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      cmd/admin-heal-ops.go
  2. 25
      cmd/bootstrap-peer-server.go
  3. 27
      cmd/server-main.go
  4. 18
      cmd/signals.go

@ -108,7 +108,6 @@ func (ahs *allHealState) healDriveCount() int {
ahs.Lock()
defer ahs.Unlock()
fmt.Println(ahs.healLocalDisks)
return len(ahs.healLocalDisks)
}
@ -130,7 +129,6 @@ func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
fmt.Println(ahs.healLocalDisks)
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
@ -140,7 +138,6 @@ func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
for _, ep := range healLocalDisks {
ahs.healLocalDisks[ep] = struct{}{}
}
fmt.Println(ahs.healLocalDisks)
}
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {

@ -178,17 +178,22 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones)
}
onlineServers++
}
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(500 * time.Millisecond)
retries++
// after 5 retries start logging that servers are not reachable yet
if retries >= 5 {
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
retries = 0 // reset to log again after 5 retries.
select {
case <-ctx.Done():
return ctx.Err()
default:
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(100 * time.Millisecond)
retries++
// after 5 retries start logging that servers are not reachable yet
if retries >= 5 {
logger.Info(fmt.Sprintf("Waiting for atleast %d servers to be online for bootstrap check", len(clnts)/2))
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
retries = 0 // reset to log again after 5 retries.
}
offlineEndpoints = nil
}
offlineEndpoints = nil
}
return nil
}

@ -213,14 +213,17 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
return
}
// If context was canceled
if errors.Is(err, context.Canceled) {
return
}
// Prints the formatted startup message in safe mode operation.
// Drops-into safe mode where users need to now manually recover
// the server.
printStartupSafeModeMessage(getAPIEndpoints(), err)
// Initialization returned error reaching safe mode and
// not proceeding waiting for admin action.
handleSignals()
<-globalOSSignalCh
}
}(txnLk)
@ -276,7 +279,6 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
// One of these retriable errors shall be retried.
if errors.Is(err, errDiskNotFound) ||
errors.Is(err, errConfigNotFound) ||
errors.Is(err, context.Canceled) ||
errors.Is(err, context.DeadlineExceeded) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
@ -384,13 +386,15 @@ func startBackgroundOps(ctx context.Context, objAPI ObjectLayer) {
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
go handleSignals()
setDefaultProfilerRates()
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
// Handle all server command args.
serverHandleCmdArgs(ctx)
@ -444,6 +448,9 @@ func serverMain(ctx *cli.Context) {
globalBackgroundHealState = newHealState()
}
// Initialize all sub-systems
newAllSubsystems()
// Configure server.
handler, err := configureServerHandler(globalEndpoints)
if err != nil {
@ -491,14 +498,14 @@ func serverMain(ctx *cli.Context) {
for {
// Additionally in distributed setup, validate the setup and configuration.
err := verifyServerSystemConfig(GlobalContext, globalEndpoints)
if err == nil {
if err == nil || errors.Is(err, context.Canceled) {
break
}
logger.LogIf(GlobalContext, err, "Unable to initialize distributed setup, retrying.. after 5 seconds")
select {
case <-GlobalContext.Done():
return
case <-time.After(5 * time.Second):
case <-time.After(500 * time.Millisecond):
}
}
}
@ -516,8 +523,6 @@ func serverMain(ctx *cli.Context) {
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
newAllSubsystems()
go startBackgroundOps(GlobalContext, newObject)
logger.FatalIf(initSafeMode(GlobalContext, newObject), "Unable to initialize server switching into safe-mode")
@ -549,7 +554,7 @@ func serverMain(ctx *cli.Context) {
logger.StartupMessage(color.RedBold(msg))
}
handleSignals()
<-globalOSSignalCh
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.

@ -18,8 +18,6 @@ package cmd
import (
"context"
"errors"
"net/http"
"os"
"strings"
@ -46,6 +44,9 @@ func handleSignals() {
stopProcess := func() bool {
var err, oerr error
// send signal to various go-routines that they need to quit.
cancelGlobalContext()
if globalNotificationSys != nil {
globalNotificationSys.RemoveAllRemoteTargets()
}
@ -55,9 +56,6 @@ func handleSignals() {
logger.LogIf(context.Background(), err)
}
// send signal to various go-routines that they need to quit.
cancelGlobalContext()
if objAPI := newObjectLayerWithoutSafeModeFn(); objAPI != nil {
oerr = objAPI.Shutdown(context.Background())
logger.LogIf(context.Background(), oerr)
@ -68,14 +66,8 @@ func handleSignals() {
for {
select {
case err := <-globalHTTPServerErrorCh:
if objAPI := newObjectLayerWithoutSafeModeFn(); objAPI != nil {
objAPI.Shutdown(context.Background())
}
if err != nil && !errors.Is(err, http.ErrServerClosed) {
logger.Fatal(err, "Unable to start MinIO server")
}
exit(true)
case <-globalHTTPServerErrorCh:
exit(stopProcess())
case osSignal := <-globalOSSignalCh:
logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String()))
exit(stopProcess())

Loading…
Cancel
Save