Change CriticalIf to FatalIf for proper error message (#6040)

During startup until the object layer is initialized
logger is disabled to provide for a cleaner UI error
message. CriticalIf is disabled, use FatalIf instead.

Also never call os.Exit(1) on running servers where
you can return error to client in handlers.
master
Harshavardhana 6 years ago committed by kannappanr
parent 05f96f3956
commit 28d526bc68
  1. 4
      cmd/admin-rpc-client.go
  2. 2
      cmd/admin-rpc-server.go
  3. 25
      cmd/bucket-notification-handlers.go
  4. 2
      cmd/generic-handlers.go
  5. 7
      cmd/lock-rpc-server.go
  6. 4
      cmd/namespace-lock.go
  7. 4
      cmd/peer-rpc-client.go
  8. 2
      cmd/peer-rpc-server.go
  9. 4
      cmd/storage-rpc-client.go
  10. 4
      cmd/web-handlers.go

@ -179,9 +179,9 @@ func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) {
for _, hostStr := range GetRemotePeers(endpoints) {
host, err := xnet.ParseHost(hostStr)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to parse Admin RPC Host", context.Background())
rpcClient, err := NewAdminRPCClient(host)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize Admin RPC Client", context.Background())
adminPeerList = append(adminPeerList, adminPeer{
addr: hostStr,
cmdRunner: rpcClient,

@ -121,7 +121,7 @@ func NewAdminRPCServer() (*xrpc.Server, error) {
// registerAdminRPCRouter - creates and registers Admin RPC server and its router.
func registerAdminRPCRouter(router *mux.Router) {
rpcServer, err := NewAdminRPCServer()
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize Lock RPC Server", context.Background())
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
subrouter.Path(adminServiceSubPath).Handler(rpcServer)
}

@ -224,11 +224,17 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return
}
host, e := xnet.ParseHost(r.RemoteAddr)
logger.CriticalIf(ctx, e)
host, err := xnet.ParseHost(r.RemoteAddr)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
target, e := target.NewHTTPClientTarget(*host, w)
logger.CriticalIf(ctx, e)
target, err := target.NewHTTPClientTarget(*host, w)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
@ -241,10 +247,13 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
defer globalNotificationSys.RemoveRulesMap(bucketName, rulesMap)
thisAddr, e := xnet.ParseHost(GetLocalPeer(globalEndpoints))
logger.CriticalIf(ctx, e)
thisAddr, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
if err = SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -259,7 +268,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
<-target.DoneCh
if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
if err = RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)

@ -691,7 +691,7 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
// cancelled immediately.
func setRateLimitHandler(h http.Handler) http.Handler {
_, maxLimit, err := sys.GetMaxOpenFileLimit()
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to get maximum open file limit", context.Background())
// Burst value is set to 1 to allow only maxOpenFileLimit
// requests to happen at once.
l := rate.NewLimiter(rate.Limit(maxLimit), 1)

@ -124,7 +124,10 @@ func (l *lockRPCReceiver) lockMaintenance(interval time.Duration) {
for _, nlrip := range nlripLongLived {
// Initialize client based on the long live locks.
host, err := xnet.ParseHost(nlrip.lri.node)
logger.CriticalIf(context.Background(), err)
if err != nil {
logger.LogIf(context.Background(), err)
continue
}
c, err := NewLockRPCClient(host)
if err != nil {
logger.LogIf(context.Background(), err)
@ -183,7 +186,7 @@ func NewLockRPCServer() (*xrpc.Server, error) {
// Register distributed NS lock handlers.
func registerDistNSLockRouter(router *mux.Router) {
rpcServer, err := NewLockRPCServer()
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize Lock RPC Server", context.Background())
// Start lock maintenance from all lock servers.
go startLockMaintenance(globalLockServer)

@ -86,9 +86,9 @@ func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int)
locker = &(receiver.ll)
} else {
host, err := xnet.ParseHost(endpoint.Host)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to parse Lock RPC Host", context.Background())
locker, err = NewLockRPCClient(host)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize Lock RPC Client", context.Background())
}
clnts = append(clnts, locker)

@ -166,9 +166,9 @@ func makeRemoteRPCClients(endpoints EndpointList) map[xnet.Host]*PeerRPCClient {
peerRPCClientMap := make(map[xnet.Host]*PeerRPCClient)
for _, hostStr := range GetRemotePeers(endpoints) {
host, err := xnet.ParseHost(hostStr)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to parse peer RPC Host", context.Background())
rpcClient, err := NewPeerRPCClient(host)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to parse peer RPC Client", context.Background())
peerRPCClientMap[*host] = rpcClient
}

@ -201,7 +201,7 @@ func NewPeerRPCServer() (*xrpc.Server, error) {
// registerPeerRPCRouter - creates and registers Peer RPC server and its router.
func registerPeerRPCRouter(router *mux.Router) {
rpcServer, err := NewPeerRPCServer()
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize peer RPC Server", context.Background())
subrouter := router.PathPrefix(minioReservedBucketPath).Subrouter()
subrouter.Path(peerServiceSubPath).Handler(rpcServer)
}

@ -316,9 +316,9 @@ func NewStorageRPCClient(host *xnet.Host, endpointPath string) (*StorageRPCClien
// Initialize new storage rpc client.
func newStorageRPC(endpoint Endpoint) *StorageRPCClient {
host, err := xnet.ParseHost(endpoint.Host)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to parse storage RPC Host", context.Background())
rpcClient, err := NewStorageRPCClient(host, endpoint.Path)
logger.CriticalIf(context.Background(), err)
logger.FatalIf(err, "Unable to initialize storage RPC client", context.Background())
rpcClient.connected = rpcClient.Call(storageServiceName+".Connect", &AuthArgs{}, &VoidReply{}) == nil
return rpcClient
}

@ -483,7 +483,9 @@ func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, re
return toJSONError(errAuthentication)
}
cred, err := auth.GetNewCredentials()
logger.CriticalIf(context.Background(), err)
if err != nil {
return toJSONError(err)
}
reply.AccessKey = cred.AccessKey
reply.SecretKey = cred.SecretKey
reply.UIVersion = browser.UIVersion

Loading…
Cancel
Save