Skip to content

Commit 4efa897

Browse files
committed
TODO: HPA config needs to be removed when scaling to 0
Signed-off-by: Pierangelo Di Pilato <[email protected]>
1 parent bbf032c commit 4efa897

File tree

4 files changed

+52
-5
lines changed

4 files changed

+52
-5
lines changed

olm-catalog/serverless-operator/manifests/serverless-operator.clusterserviceversion.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -496,6 +496,22 @@ spec:
496496
rules:
497497
# These are needed to create the various different resources.
498498
# Upstream manifests
499+
- apiGroups:
500+
- eventing.knative.dev
501+
resources:
502+
- brokers
503+
verbs:
504+
- get
505+
- list
506+
- watch
507+
- apiGroups:
508+
- messaging.knative.dev
509+
resources:
510+
- inmemorychannels
511+
verbs:
512+
- get
513+
- list
514+
- watch
499515
- apiGroups:
500516
- ""
501517
resources:

openshift-knative-operator/pkg/eventing/extension.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,8 @@ func (e *extension) Reconcile(ctx context.Context, comp base.KComponent) error {
142142
eventingistio.ScaleIstioController(requiredNs, ke, 1)
143143
}
144144

145+
e.logger.Debugw("resource spec", zap.Any("resource", ke.Spec))
146+
145147
return monitoring.ReconcileMonitoringForEventing(ctx, e.kubeclient, ke)
146148
}
147149

openshift-knative-operator/pkg/eventing/scale.go

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"knative.dev/operator/pkg/apis/operator/base"
2323
operatorv1beta1 "knative.dev/operator/pkg/apis/operator/v1beta1"
2424
knativeeventinginformer "knative.dev/operator/pkg/client/injection/informers/operator/v1beta1/knativeeventing"
25-
kubeclient "knative.dev/pkg/client/injection/kube/client"
2625
"knative.dev/pkg/controller"
26+
"knative.dev/pkg/injection"
2727
"knative.dev/pkg/logging"
2828
"knative.dev/pkg/ptr"
2929
)
@@ -39,6 +39,8 @@ type coreScaler struct {
3939
hasCRDsInstalled atomic.Bool
4040
cancel context.CancelFunc
4141
factory externalversions.SharedInformerFactory
42+
43+
logger *zap.Logger
4244
}
4345

4446
type CoreScalerWrapper struct {
@@ -98,18 +100,22 @@ func newInternalScaler(ctx context.Context, resync cache.ResourceEventHandler) *
98100

99101
logger := logging.FromContext(ctx).With(zap.String("component", "scaler"))
100102

103+
apiExtensionClient, _ := apiextension.NewForConfig(injection.GetConfig(ctx))
104+
101105
s := &coreScaler{
102106
BrokerLister: f.Eventing().V1().Brokers().Lister(),
103107

104108
InMemoryChannelLister: f.Messaging().V1().InMemoryChannels().Lister(),
105109

106-
apiExtensionClient: apiextension.New(kubeclient.Get(ctx).AppsV1().RESTClient()),
110+
apiExtensionClient: apiExtensionClient,
107111

108112
cacheSynced: sync.WaitGroup{},
109113
hasCRDsInstalled: atomic.Bool{},
110114

111115
cancel: cancel,
112116
factory: f,
117+
118+
logger: logger.Desugar(),
113119
}
114120
_, _ = f.Eventing().V1().Brokers().Informer().AddEventHandler(resync)
115121

@@ -121,6 +127,7 @@ func newInternalScaler(ctx context.Context, resync cache.ResourceEventHandler) *
121127
hasCRDsInstalled, err := s.verifyCRDsInstalled(ctx)
122128
logger.Debugw("Waiting for CRDs to be installed", zap.Bool("hasCRDsInstalled", hasCRDsInstalled))
123129
if err != nil {
130+
logger.Debugw("Failed to wait for CRDs to be installed", zap.Error(err))
124131
return false, nil
125132
}
126133
return hasCRDsInstalled, nil
@@ -152,6 +159,7 @@ func (s *coreScaler) scale(ke *operatorv1beta1.KnativeEventing) error {
152159

153160
hasMTChannelBrokers, err := s.hasMTChannelBrokers()
154161
if err != nil {
162+
s.logger.Warn("failed to verify if there are MT Channel Based Brokers", zap.Error(err))
155163
return err
156164
}
157165
if hasMTChannelBrokers {
@@ -166,6 +174,7 @@ func (s *coreScaler) scale(ke *operatorv1beta1.KnativeEventing) error {
166174

167175
hasInMemoryChannels, err := s.hasInMemoryChannels()
168176
if err != nil {
177+
s.logger.Warn("failed to verify if there are in memory channels", zap.Error(err))
169178
return err
170179
}
171180
if hasInMemoryChannels {
@@ -198,11 +207,11 @@ func (s *coreScaler) hasMTChannelBrokers() (bool, error) {
198207
}
199208

200209
func (s *coreScaler) hasInMemoryChannels() (bool, error) {
201-
eventTypes, err := s.InMemoryChannelLister.List(labels.Everything())
210+
imcs, err := s.InMemoryChannelLister.List(labels.Everything())
202211
if err != nil {
203-
return false, fmt.Errorf("failed to list eventtypes: %w", err)
212+
return false, fmt.Errorf("failed to list inmemorychannels: %w", err)
204213
}
205-
return len(eventTypes) > 0, nil
214+
return len(imcs) > 0, nil
206215
}
207216

208217
func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing, name string) {
@@ -211,6 +220,8 @@ func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing
211220
replicas = ke.Spec.HighAvailability.Replicas
212221
}
213222

223+
s.logger.Info("Scaling up component", zap.String("name", name), zap.Int32("replicas", *replicas))
224+
214225
for i, w := range ke.Spec.Workloads {
215226
if w.Name == name {
216227
if w.Replicas == nil {
@@ -227,6 +238,8 @@ func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing
227238
}
228239

229240
func (s *coreScaler) scaleToZero(ke *operatorv1beta1.KnativeEventing, name string) {
241+
s.logger.Info("Scaling down component", zap.String("name", name))
242+
230243
replicas := pointer.Int32(0)
231244
for i, w := range ke.Spec.Workloads {
232245
if w.Name == name {

templates/csv.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,22 @@ spec:
498498
rules:
499499
# These are needed to create the various different resources.
500500
# Upstream manifests
501+
- apiGroups:
502+
- eventing.knative.dev
503+
resources:
504+
- brokers
505+
verbs:
506+
- get
507+
- list
508+
- watch
509+
- apiGroups:
510+
- messaging.knative.dev
511+
resources:
512+
- inmemorychannels
513+
verbs:
514+
- get
515+
- list
516+
- watch
501517
- apiGroups:
502518
- ""
503519
resources:

0 commit comments

Comments
 (0)