Lock-free rate-limit algorithm + bug-fix (#2694)

master
Aditya Manthramurthy 8 years ago committed by Harshavardhana
parent da9ae574df
commit 6533927237
  1. 36
      cmd/rate-limit-handler.go

@ -19,7 +19,6 @@ package cmd
import (
"errors"
"net/http"
"sync"
)
var errTooManyRequests = errors.New("Too many clients in the waiting list")
@ -28,7 +27,6 @@ var errTooManyRequests = errors.New("Too many clients in the waiting list")
// limit the number of concurrent http requests.
type rateLimit struct {
handler http.Handler
lock sync.Mutex
workQueue chan struct{}
waitQueue chan struct{}
}
@ -37,26 +35,26 @@ type rateLimit struct {
// channel this is in-turn used to rate limit incoming connections in
// ServeHTTP() http.Handler method.
func (c *rateLimit) acquire() error {
//lock access to enter waitQueue
c.lock.Lock()
// Kick out clients when it is really crowded
if len(c.waitQueue) == cap(c.waitQueue) {
defer c.lock.Unlock() //unlock after return
//attempt to enter the waitQueue. If no slot is immediately
//available return error.
select {
case c.waitQueue <- struct{}{}:
//entered wait queue
break
default:
//no slot available for waiting
return errTooManyRequests
}
// Add new element in waitQueue to keep track of clients
// wanting to process their requests
c.waitQueue <- struct{}{}
// Unlock now. If we unlock before sending to the waitQueue
// channel, we can have multiple go-routines blocked on
// sending to the waitQueue (and exceeding the max. number of
// waiting connections.)
c.lock.Unlock()
// Block to put a waiting go-routine into processing mode.
c.workQueue <- <-c.waitQueue
//block attempting to enter the workQueue. If the workQueue is
//full, there can be at most cap(waitQueue) == 4*globalMaxConn
//goroutines waiting here because of the select above.
select {
case c.workQueue <- struct{}{}:
//entered workQueue - so remove one waiter. This step
//does not block as the waitQueue cannot be empty.
<-c.waitQueue
}
return nil
}

Loading…
Cancel
Save