Skip to content

Commit 7122a19

Browse files
committed
feat: implement LeaderElectonRunnable explicitly and add compile-time check
1 parent 6813539 commit 7122a19

File tree

1 file changed

+29
-14
lines changed

1 file changed

+29
-14
lines changed

internal/autoscaler/autoscaler.go

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,12 @@ import (
1414
ctrl "sigs.k8s.io/controller-runtime"
1515
"sigs.k8s.io/controller-runtime/pkg/client"
1616
"sigs.k8s.io/controller-runtime/pkg/log"
17+
"sigs.k8s.io/controller-runtime/pkg/manager"
18+
)
19+
20+
var (
21+
_ manager.Runnable = (*Autoscaler)(nil)
22+
_ manager.LeaderElectionRunnable = (*Autoscaler)(nil)
1723
)
1824

1925
type Autoscaler struct {
@@ -32,7 +38,7 @@ func NewAutoscaler(c client.Client) (*Autoscaler, error) {
3238
return &Autoscaler{
3339
Client: c,
3440
Recommender: NewRecommender(),
35-
MetricsProvider: NewMetricsProvider(),
41+
MetricsProvider: NewMetricsProvider(nil),
3642
WorkloadStates: map[string]*WorkloadState{},
3743
WorkerStates: map[string]*WorkerState{},
3844
}, nil
@@ -57,6 +63,10 @@ func (s *Autoscaler) Start(ctx context.Context) error {
5763
}
5864
}
5965

66+
func (s *Autoscaler) NeedLeaderElection() bool {
67+
return true
68+
}
69+
6070
func (s *Autoscaler) Run(ctx context.Context) {
6171
log := log.FromContext(ctx)
6272

@@ -80,8 +90,8 @@ func (s *Autoscaler) LoadWorkloads(ctx context.Context) {
8090
autoScalingConfig := workload.Spec.AutoScalingConfig
8191
// Currently only supports enabling both AutoSetLimits and AutoSetRequests simultaneously
8292
if !workload.DeletionTimestamp.IsZero() ||
83-
!(autoScalingConfig.AutoSetLimits.Enable &&
84-
autoScalingConfig.AutoSetRequests.Enable) {
93+
!autoScalingConfig.AutoSetLimits.Enable ||
94+
!autoScalingConfig.AutoSetRequests.Enable {
8595
continue
8696
}
8797

@@ -138,15 +148,15 @@ func (s *Autoscaler) LoadHistoryMetrics(ctx context.Context) {
138148

139149
workersMetrics := s.MetricsProvider.GetHistoryMetrics()
140150
for _, metrics := range workersMetrics {
141-
workloadState, exists := s.WorkloadStates[metrics.Workload]
151+
workloadState, exists := s.WorkloadStates[metrics.WorkloadName]
142152
if !exists {
143-
workloadState = NewWorkloadState(metrics.Workload)
144-
s.WorkloadStates[metrics.Workload] = workloadState
153+
workloadState = NewWorkloadState(metrics.WorkloadName)
154+
s.WorkloadStates[metrics.WorkloadName] = workloadState
145155
}
146-
workerState, exists := s.WorkerStates[metrics.Worker]
156+
workerState, exists := s.WorkerStates[metrics.WorkerName]
147157
if !exists {
148-
workerState = NewWorkerState(metrics.Worker, metrics.Workload)
149-
s.WorkerStates[metrics.Worker] = workerState
158+
workerState = NewWorkerState(metrics.WorkerName, metrics.WorkloadName)
159+
s.WorkerStates[metrics.WorkerName] = workerState
150160
}
151161

152162
s.addSamples(workloadState, workerState, metrics)
@@ -159,11 +169,11 @@ func (s *Autoscaler) LoadRealTimeMetrics(ctx context.Context) {
159169

160170
workersMetrics := s.MetricsProvider.GetWorkersMetrics()
161171
for _, metrics := range workersMetrics {
162-
workloadState, workloadExists := s.WorkloadStates[metrics.Workload]
172+
workloadState, workloadExists := s.WorkloadStates[metrics.WorkloadName]
163173
if !workloadExists {
164174
continue
165175
}
166-
workerState, workerExists := s.WorkerStates[metrics.Worker]
176+
workerState, workerExists := s.WorkerStates[metrics.WorkerName]
167177
if !workerExists {
168178
continue
169179
}
@@ -186,8 +196,12 @@ func (s *Autoscaler) ProcessWorkloads(ctx context.Context) {
186196
continue
187197
}
188198

199+
if len(podList.Items) <= 0 {
200+
continue
201+
}
202+
189203
// TODO: apply config
190-
// asConfig := workloadState.AutoScalingConfig
204+
// asConfig := workloadState.AutoScalingConfig
191205
rr := s.Recommender.GetRecommendedResources(workloadState)
192206
log.Info("Autoscaler processWorkloads", "recommended resources", rr)
193207

@@ -197,13 +211,13 @@ func (s *Autoscaler) ProcessWorkloads(ctx context.Context) {
197211
}
198212

199213
annotations := worker.GetAnnotations()
214+
newAnnotations := map[string]string{}
215+
200216
tflopsRequest, err := resource.ParseQuantity(annotations[constants.TFLOPSRequestAnnotation])
201217
if err != nil {
202218
log.Error(err, "failed to parse vram request")
203219
continue
204220
}
205-
206-
newAnnotations := map[string]string{}
207221
if tflopsRequest.Cmp(QuantityFromAmount(rr.LowerBoundTflops)) < 0 ||
208222
tflopsRequest.Cmp(QuantityFromAmount(rr.UpperBoundTflops)) > 0 {
209223
targetTflopsRequest := QuantityFromAmount(rr.TargetTflops)
@@ -248,6 +262,7 @@ func (s *Autoscaler) ProcessWorkloads(ctx context.Context) {
248262
worker.Annotations[key] = value
249263
}
250264

265+
// TODO: replace using the patch method
251266
if err := s.Update(ctx, &worker); err != nil {
252267
log.Error(err, "failed to update worker")
253268
}

0 commit comments

Comments
 (0)