Skip to content

Commit 178426f

Browse files
Added two more tests regarding the robustness K8s standard, namely "maxMutatingRequestInflight" and "minRequestTimeout".
1 parent e73625e commit 178426f

File tree

4 files changed

+275
-51
lines changed

4 files changed

+275
-51
lines changed

Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ dev-rerun: dev-clean dev-build dev-run
123123
sleep 5; \
124124
done
125125
@echo "[rerun] Waiting an additional 10 seconds to ensure results are ready..."
126-
sleep 10
126+
sleep 30
127127
$(MAKE) dev-result
128128
@echo "[Displaying results...]"
129129
cat results/plugins/scsconformance/sonobuoy_results.yaml
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# event-ratelimit-config.yaml
2+
kind: Configuration
3+
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
4+
limits:
5+
- burst: 20000
6+
qps: 5000
7+
type: Server
8+
Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,19 @@
11
# kind-config.yaml
22
kind: Cluster
33
apiVersion: kind.x-k8s.io/v1alpha4
4+
kubeadmConfigPatches:
5+
- |
6+
apiVersion: kubeadm.k8s.io/v1beta2
7+
kind: ClusterConfiguration
8+
apiServer:
9+
extraArgs:
10+
enable-admission-plugins: EventRateLimit
11+
admission-control-config-file: /etc/kubernetes/admission-control-config.yaml
12+
feature-gates": APIPriorityAndFairness=true
413
nodes:
5-
- role: control-plane
6-
- role: worker
7-
- role: worker
14+
- role: control-plane
15+
extraMounts:
16+
- hostPath: ./event-ratelimit-config.yaml
17+
containerPath: /etc/kubernetes/admission-control-config.yaml
18+
- role: worker
19+
- role: worker

Tests/kaas/kaas-sonobuoy-go-example-e2e-framework/scs_k8s_tests/scs_0215_v1_robustness_features_test.go

Lines changed: 251 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,16 @@ import (
1010
"testing"
1111
"time"
1212

13-
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13+
v1 "k8s.io/api/core/v1"
1414
"k8s.io/client-go/kubernetes"
1515
"k8s.io/client-go/rest"
16+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
17+
1618
)
1719

1820
// getClusterSize estimates the cluster size by counting the number of nodes.
1921
func getClusterSize(clientset *kubernetes.Clientset) int {
20-
nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{})
22+
nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
2123
if err != nil {
2224
log.Fatalf("Failed to list nodes: %v", err)
2325
}
@@ -35,7 +37,7 @@ func runConcurrentRequests(clientset *kubernetes.Clientset, maxRequestsInflight
3537
defer wg.Done()
3638
ctx, cancel := context.WithTimeout(context.TODO(), 2*time.Second)
3739
defer cancel()
38-
_, err := clientset.CoreV1().Pods("").List(ctx, v1.ListOptions{})
40+
_, err := clientset.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
3941
if err != nil {
4042
errChan <- err
4143
}
@@ -62,57 +64,259 @@ func testPositiveCase(t *testing.T, clientset *kubernetes.Clientset, maxRequests
6264

6365
// testNegativeCase handles the negative scenario where requests should be throttled.
6466
func testNegativeCase(t *testing.T, clientset *kubernetes.Clientset, maxRequestsInflight int) {
65-
fmt.Println("Running Negative Test Case")
66-
// Increase the load significantly above the maxRequestsInflight to trigger rate limiting
67-
overloadRequests := maxRequestsInflight * 2
68-
errors := runConcurrentRequests(clientset, overloadRequests)
69-
70-
// Expect at least some errors due to rate limiting
71-
if errors == 0 {
72-
t.Errorf("Test failed: expected rate limit errors, but all requests succeeded.")
73-
} else {
74-
fmt.Println("Negative test case passed as expected: rate limit exceeded.")
75-
}
67+
fmt.Println("Running Negative Test Case")
68+
// Increase the load significantly above the maxRequestsInflight to trigger rate limiting
69+
overloadRequests := maxRequestsInflight * 2
70+
errors := runConcurrentRequests(clientset, overloadRequests)
71+
72+
// Expect at least some errors due to rate limiting
73+
if errors == 0 {
74+
t.Errorf("Test failed: expected rate limit errors, but all requests succeeded.")
75+
} else {
76+
fmt.Println("Negative test case passed as expected: rate limit exceeded.")
77+
}
7678
}
7779

7880
// Test_scs_maxRequestInflight is the main entry point that runs both positive and negative test cases.
7981
func Test_scs_0215_maxRequestInflight(t *testing.T) {
80-
// Load in-cluster configuration
81-
config, err := rest.InClusterConfig()
82-
if err != nil {
83-
log.Fatalf("Failed to load in-cluster config: %v", err)
84-
}
85-
86-
// Adjust client rate limits
87-
config.QPS = 50 // Queries Per Second
88-
config.Burst = 100 // Allowed burst (concurrent requests above QPS)
89-
90-
clientset, err := kubernetes.NewForConfig(config)
91-
if err != nil {
92-
log.Fatalf("Failed to create Kubernetes client: %v", err)
93-
}
94-
82+
// Load in-cluster configuration
83+
config, err := rest.InClusterConfig()
84+
if err != nil {
85+
log.Fatalf("Failed to load in-cluster config: %v", err)
86+
}
9587

96-
// Get cluster size (number of nodes)
97-
clusterSize := getClusterSize(clientset)
98-
fmt.Printf("Detected cluster size: %d nodes\n", clusterSize)
88+
// Adjust client rate limits
89+
config.QPS = 10000 // Matches server-side QPS
90+
config.Burst = 40000 // Matches server-side Burst
9991

100-
// Determine maxRequestsInflight based on cluster size and environment variable
101-
maxRequestsInflightStr := os.Getenv("MAX_REQUESTS_INFLIGHT")
102-
maxRequestsInflight, err := strconv.Atoi(maxRequestsInflightStr)
103-
if err != nil || maxRequestsInflight <= 0 {
104-
maxRequestsInflight = clusterSize * 250 // Example scaling logic: 100 requests per node
105-
}
92+
// Create the clientset from the config
93+
clientset, err := kubernetes.NewForConfig(config)
94+
if err != nil {
95+
log.Fatalf("Failed to create Kubernetes clientset: %v", err)
96+
}
97+
98+
// Increase timeout to allow more time for requests to complete
99+
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
100+
defer cancel()
101+
102+
// Example of using the clientset to list pods
103+
_, err = clientset.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
104+
if err != nil {
105+
if isRateLimitError(err) {
106+
log.Printf("Rate limit error: %v", err)
107+
} else {
108+
log.Printf("Unexpected error: %v", err) // Log unexpected errors with details
109+
}
110+
}
111+
112+
// Get cluster size (number of nodes)
113+
clusterSize := getClusterSize(clientset)
114+
fmt.Printf("Detected cluster size: %d nodes\n", clusterSize)
115+
116+
// Determine maxRequestsInflight based on cluster size and environment variable
117+
maxRequestsInflightStr := os.Getenv("MAX_REQUESTS_INFLIGHT")
118+
maxRequestsInflight, err := strconv.Atoi(maxRequestsInflightStr)
119+
if err != nil || maxRequestsInflight <= 0 {
120+
maxRequestsInflight = clusterSize * 250 // Example scaling logic: 100 requests per node
121+
}
122+
123+
fmt.Printf("Using maxRequestsInflight = %d\n", maxRequestsInflight)
124+
125+
// Run the positive test case
126+
t.Run("Positive Test Case", func(t *testing.T) {
127+
testPositiveCase(t, clientset, maxRequestsInflight)
128+
})
129+
130+
// Run the negative test case
131+
t.Run("Negative Test Case", func(t *testing.T) {
132+
testNegativeCase(t, clientset, maxRequestsInflight)
133+
})
134+
}
135+
136+
// Main test function for max-mutating-requests-inflight
137+
func Test_scs_maxMutatingRequestsInflight(t *testing.T) {
138+
// Load in-cluster configuration
139+
config, err := rest.InClusterConfig()
140+
if err != nil {
141+
log.Fatalf("Failed to load in-cluster config: %v", err)
142+
}
143+
144+
// Set higher QPS and burst limits to avoid client-side throttling
145+
config.QPS = 100
146+
config.Burst = 200
147+
148+
clientset, err := kubernetes.NewForConfig(config)
149+
if err != nil {
150+
log.Fatalf("Failed to create Kubernetes client: %v", err)
151+
}
152+
153+
clusterSize := detectClusterSize() // Detects the cluster size
154+
maxMutatingRequestsInflight := calculateMaxMutatingRequestsInflight(clusterSize)
155+
fmt.Printf("Detected cluster size: %d nodes\n", clusterSize)
156+
fmt.Printf("Using maxMutatingRequestsInflight = %d\n", maxMutatingRequestsInflight)
157+
158+
// Positive Test Case: Requests within the allowed limit
159+
t.Run("Positive_Test_Case", func(t *testing.T) {
160+
fmt.Println("Running Positive Test Case")
161+
err := runMutatingTest(clientset, maxMutatingRequestsInflight) // Pass clientset here
162+
if err != nil {
163+
t.Fatalf("Test failed: encountered unexpected errors when requests were expected to succeed: %v", err)
164+
}
165+
fmt.Println("Positive test case passed successfully!")
166+
})
167+
168+
// Negative Test Case: Exceeding the allowed limit
169+
t.Run("Negative_Test_Case", func(t *testing.T) {
170+
fmt.Println("Running Negative Test Case")
171+
err := runMutatingTest(clientset, maxMutatingRequestsInflight + 10) // Pass clientset here and exceed limit
172+
if err != nil && isRateLimitError(err) {
173+
fmt.Println("Negative test case passed as expected: rate limit exceeded.")
174+
} else {
175+
t.Fatalf("Test failed: expected rate limit errors, but requests succeeded or another error occurred: %v", err)
176+
}
177+
})
178+
}
106179

107-
fmt.Printf("Using maxRequestsInflight = %d\n", maxRequestsInflight)
108180

109-
// Run the positive test case
110-
t.Run("Positive Test Case", func(t *testing.T) {
111-
testPositiveCase(t, clientset, maxRequestsInflight)
112-
})
181+
// Function to detect the size of the cluster (stubbed, adjust as needed)
182+
func detectClusterSize() int {
183+
// Logic to detect cluster size (for example using kubectl)
184+
return 1 // Default for single-node kind cluster
185+
}
186+
187+
// Function to calculate max-mutating-requests-inflight based on cluster size
188+
func calculateMaxMutatingRequestsInflight(clusterSize int) int {
189+
// Adjust this formula based on your requirements
190+
return 100 * clusterSize // Example: 100 mutating requests per node
191+
}
192+
193+
// Function to simulate sending mutating requests up to the given limit
194+
func runMutatingTest(clientset *kubernetes.Clientset, limit int) error {
195+
var wg sync.WaitGroup
196+
errChan := make(chan error, limit)
197+
198+
for i := 0; i < limit; i++ {
199+
wg.Add(1)
200+
go func(i int) {
201+
defer wg.Done()
202+
ctx, cancel := context.WithTimeout(context.TODO(), 20*time.Second)
203+
defer cancel()
204+
205+
// Create a unique Pod name
206+
podName := fmt.Sprintf("test-pod-%d", i)
207+
208+
// Create a Pod
209+
_, err := clientset.CoreV1().Pods("default").Create(ctx, &v1.Pod{
210+
ObjectMeta: metav1.ObjectMeta{
211+
Name: podName,
212+
},
213+
Spec: v1.PodSpec{
214+
Containers: []v1.Container{
215+
{
216+
Name: "test-container",
217+
Image: "busybox",
218+
},
219+
},
220+
},
221+
}, metav1.CreateOptions{})
222+
223+
if err != nil {
224+
if isRateLimitError(err) {
225+
errChan <- fmt.Errorf("rate limit reached")
226+
} else {
227+
errChan <- fmt.Errorf("error creating pod: %v", err)
228+
}
229+
return
230+
}
231+
232+
// Clean up by deleting the Pod
233+
err = clientset.CoreV1().Pods("default").Delete(ctx, podName, metav1.DeleteOptions{})
234+
if err != nil {
235+
if isRateLimitError(err) {
236+
errChan <- fmt.Errorf("rate limit reached")
237+
} else {
238+
errChan <- fmt.Errorf("error deleting pod: %v", err)
239+
}
240+
return
241+
}
242+
}(i)
243+
}
244+
245+
wg.Wait()
246+
close(errChan)
247+
248+
var rateLimitErrors, otherErrors int
249+
for err := range errChan {
250+
if err.Error() == "rate limit reached" {
251+
rateLimitErrors++
252+
} else {
253+
otherErrors++
254+
}
255+
}
256+
257+
if otherErrors > 0 {
258+
return fmt.Errorf("encountered %d unexpected errors", otherErrors)
259+
}
113260

114-
// Run the negative test case
115-
t.Run("Negative Test Case", func(t *testing.T) {
116-
testNegativeCase(t, clientset, maxRequestsInflight)
117-
})
261+
if rateLimitErrors > 0 {
262+
fmt.Printf("Rate limit errors encountered: %d\n", rateLimitErrors)
263+
}
264+
265+
return nil
266+
}
267+
268+
// Function to determine if an error is related to rate limiting
269+
func isRateLimitError(err error) bool {
270+
if err == nil {
271+
return false
272+
}
273+
return err.Error() == "TooManyRequests" || err.Error() == "429"
274+
}
275+
276+
// Main test function for min-request-timeout
277+
func Test_scs_minRequestTimeout(t *testing.T) {
278+
// Load in-cluster configuration
279+
config, err := rest.InClusterConfig()
280+
if err != nil {
281+
log.Fatalf("Failed to load in-cluster config: %v", err)
282+
}
283+
284+
// Set QPS and Burst to higher values to avoid throttling
285+
config.QPS = 100
286+
config.Burst = 200
287+
288+
// Create a Kubernetes client
289+
clientset, err := kubernetes.NewForConfig(config)
290+
if err != nil {
291+
log.Fatalf("Failed to create Kubernetes client: %v", err)
292+
}
293+
294+
// Test case: min-request-timeout enforced (timeout set to 5 seconds)
295+
t.Run("Test_minRequestTimeout", func(t *testing.T) {
296+
minRequestTimeout := 5 * time.Second
297+
fmt.Printf("Testing with min-request-timeout = %v\n", minRequestTimeout)
298+
299+
ctx, cancel := context.WithTimeout(context.Background(), minRequestTimeout)
300+
defer cancel()
301+
302+
// Send a request to the Kubernetes API (List Pods in a namespace)
303+
_, err := clientset.CoreV1().Pods("default").List(ctx, metav1.ListOptions{})
304+
305+
// Check if the request failed due to timeout
306+
if err != nil && isTimeoutError(err) {
307+
fmt.Printf("Request failed as expected due to timeout: %v\n", err)
308+
} else if err != nil {
309+
t.Fatalf("Test failed: unexpected error occurred: %v\n", err)
310+
} else {
311+
t.Fatalf("Test failed: request succeeded but was expected to timeout")
312+
}
313+
})
118314
}
315+
316+
// Helper function to check if an error is a timeout error
317+
func isTimeoutError(err error) bool {
318+
if err == nil {
319+
return false
320+
}
321+
return err.Error() == "context deadline exceeded"
322+
}

0 commit comments

Comments
 (0)