Respect min/max num workers range

This commit is contained in:
Gregory Eremin 2015-10-27 03:42:00 +03:00
parent 4eae1c3b8d
commit 850f0033a5

View File

@ -19,8 +19,10 @@ type Satan struct {
DaemonStats stats.Publisher DaemonStats stats.Publisher
Logger *log.Logger Logger *log.Logger
DefaultNumWorkers uint32 MinNumWorkers uint32
ScaleSettings *ScaleSettings MaxNumWorkers uint32
numWorkers int64
ScalePlan *ScalePlan
daemons []Daemon daemons []Daemon
queue chan *task queue chan *task
@ -52,7 +54,7 @@ type Publisher interface {
Close() Close()
} }
type ScaleSettings struct { type ScalePlan struct {
Interval time.Duration Interval time.Duration
MinProcessedTasks uint32 MinProcessedTasks uint32
LatencyThreshold time.Duration LatencyThreshold time.Duration
@ -75,12 +77,13 @@ var (
// Summon creates a new instance of Satan. // Summon creates a new instance of Satan.
func Summon() *Satan { func Summon() *Satan {
return &Satan{ return &Satan{
Logger: log.New(os.Stdout, "[daemons] ", log.LstdFlags), Logger: log.New(os.Stdout, "[daemons] ", log.LstdFlags),
DefaultNumWorkers: 10, MinNumWorkers: 10,
queue: make(chan *task), MaxNumWorkers: 1000,
runtimeStats: stats.NewBasicStats(), queue: make(chan *task),
shutdownWorkers: make(chan struct{}), runtimeStats: stats.NewBasicStats(),
shutdownSystem: make(chan struct{}), shutdownWorkers: make(chan struct{}),
shutdownSystem: make(chan struct{}),
} }
} }
@ -100,9 +103,9 @@ func (s *Satan) AddDaemon(d Daemon) {
// StartDaemons starts all registered daemons. // StartDaemons starts all registered daemons.
func (s *Satan) StartDaemons() { func (s *Satan) StartDaemons() {
s.addWorkers(s.DefaultNumWorkers) s.addWorkers(s.MinNumWorkers)
if s.ScaleSettings != nil { if s.ScalePlan != nil {
go s.autoScale() go s.autoScale()
} }
} }
@ -139,6 +142,9 @@ func (s *Satan) runWorker() {
s.wgWorkers.Add(1) s.wgWorkers.Add(1)
defer s.wgWorkers.Done() defer s.wgWorkers.Done()
atomic.AddInt64(&s.numWorkers, 1)
defer atomic.AddInt64(&s.numWorkers, -1)
i := atomic.AddUint64(&workerIndex, 1) i := atomic.AddUint64(&workerIndex, 1)
s.Logger.Printf("Starting worker #%d", i) s.Logger.Printf("Starting worker #%d", i)
@ -216,7 +222,7 @@ func (s *Satan) processGeneralTask(t *task) {
} }
func (s *Satan) autoScale() { func (s *Satan) autoScale() {
t := time.NewTicker(s.ScaleSettings.Interval) t := time.NewTicker(s.ScalePlan.Interval)
defer t.Stop() defer t.Stop()
for { for {
@ -232,18 +238,24 @@ func (s *Satan) autoScale() {
func (s *Satan) adjustNumWorkers() { func (s *Satan) adjustNumWorkers() {
lat := s.runtimeStats.Fetch(stats.Latency) lat := s.runtimeStats.Fetch(stats.Latency)
tw := s.runtimeStats.Fetch(stats.TaskWait) tw := s.runtimeStats.Fetch(stats.TaskWait)
if lat.Processed() < int64(s.ScaleSettings.MinProcessedTasks) { if lat.Processed() < int64(s.ScalePlan.MinProcessedTasks) {
return return
} }
if lat.P95() > float64(s.ScaleSettings.LatencyThreshold) { if uint32(s.numWorkers)+s.ScalePlan.AdjustmentStep > s.MaxNumWorkers {
s.addWorkers(s.ScaleSettings.AdjustmentStep) return
}
if lat.P95() > float64(s.ScalePlan.LatencyThreshold) {
s.addWorkers(s.ScalePlan.AdjustmentStep)
s.runtimeStats.Reset() s.runtimeStats.Reset()
return return
} }
if tw.P95() > float64(s.ScaleSettings.TaskWaitThreshold) { if uint32(s.numWorkers)-s.ScalePlan.AdjustmentStep < s.MinNumWorkers {
s.stopWorkers(s.ScaleSettings.AdjustmentStep) return
}
if tw.P95() > float64(s.ScalePlan.TaskWaitThreshold) {
s.stopWorkers(s.ScalePlan.AdjustmentStep)
s.runtimeStats.Reset() s.runtimeStats.Reset()
return return
} }