Skip to content

Commit c2be292

Browse files
alebedev87claude
andcommitted
NE-2422: Add dual-stack ingress e2e tests for AWSDualStackInstall featuregate
Add e2e tests that verify ingress connectivity on AWS dual-stack clusters gated by the AWSDualStackInstall featuregate. Two test cases are included: - NLB: Creates an IngressController shard with a Network Load Balancer and validates that routes are accessible over both IPv4 and IPv6. - Classic LB: Creates an IngressController shard with a Classic Load Balancer and validates that routes are accessible over IPv4. Each test deploys a backend pod/service, an edge-terminated route, and uses curl from an exec pod to verify connectivity. DNS propagation is validated separately before connectivity checks. Route stability is confirmed by requiring 3 consecutive HTTP 200 responses. Client IP preservation is disabled on the NLB target group to avoid IPv6 hairpin traffic issues (OCPBUGS-63219). The shard.Config is extended with optional LoadBalancer and Replicas fields to configure the IngressController. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 0509de9 commit c2be292

File tree

2 files changed

+367
-3
lines changed

2 files changed

+367
-3
lines changed

test/extended/router/dualstack.go

Lines changed: 350 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,350 @@
1+
package router
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"strings"
7+
"time"
8+
9+
g "github.com/onsi/ginkgo/v2"
10+
o "github.com/onsi/gomega"
11+
12+
configv1 "github.com/openshift/api/config/v1"
13+
operatorv1 "github.com/openshift/api/operator/v1"
14+
routev1 "github.com/openshift/api/route/v1"
15+
16+
corev1 "k8s.io/api/core/v1"
17+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
18+
"k8s.io/apimachinery/pkg/util/intstr"
19+
"k8s.io/apimachinery/pkg/util/wait"
20+
e2e "k8s.io/kubernetes/test/e2e/framework"
21+
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
22+
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
23+
admissionapi "k8s.io/pod-security-admission/api"
24+
utilpointer "k8s.io/utils/pointer"
25+
26+
"github.com/openshift/origin/test/extended/router/shard"
27+
exutil "github.com/openshift/origin/test/extended/util"
28+
"github.com/openshift/origin/test/extended/util/image"
29+
)
30+
31+
var _ = g.Describe("[sig-network-edge][OCPFeatureGate:AWSDualStackInstall][Feature:Router][apigroup:route.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io]", func() {
32+
defer g.GinkgoRecover()
33+
34+
var oc = exutil.NewCLIWithPodSecurityLevel("router-dualstack", admissionapi.LevelBaseline)
35+
36+
g.It("should be reachable via IPv4 and IPv6 through a dual-stack ingress controller", func() {
37+
ctx := context.Background()
38+
39+
g.By("Checking that the Infrastructure CR has a DualStack IPFamily")
40+
requireAWSDualStack(ctx, oc)
41+
42+
g.By("Getting the default ingress domain")
43+
defaultDomain, err := getDefaultIngressClusterDomainName(oc, time.Minute)
44+
o.Expect(err).NotTo(o.HaveOccurred(), "failed to find default domain name")
45+
46+
ns := oc.KubeFramework().Namespace.Name
47+
baseDomain := strings.TrimPrefix(defaultDomain, "apps.")
48+
shardFQDN := "nlb." + baseDomain
49+
50+
// Deploy the shard first so DNS and LB can provision while we set up the backend.
51+
g.By("Deploying a new router shard with NLB")
52+
shardIngressCtrl, err := shard.DeployNewRouterShard(oc, 10*time.Minute, shard.Config{
53+
Domain: shardFQDN,
54+
Type: oc.Namespace(),
55+
LoadBalancer: &operatorv1.LoadBalancerStrategy{
56+
Scope: operatorv1.ExternalLoadBalancer,
57+
ProviderParameters: &operatorv1.ProviderLoadBalancerParameters{
58+
Type: operatorv1.AWSLoadBalancerProvider,
59+
AWS: &operatorv1.AWSLoadBalancerParameters{
60+
Type: operatorv1.AWSNetworkLoadBalancer,
61+
},
62+
},
63+
},
64+
})
65+
defer func() {
66+
if shardIngressCtrl != nil {
67+
if err := oc.AdminOperatorClient().OperatorV1().IngressControllers(shardIngressCtrl.Namespace).Delete(ctx, shardIngressCtrl.Name, metav1.DeleteOptions{}); err != nil {
68+
e2e.Logf("deleting ingress controller failed: %v\n", err)
69+
}
70+
}
71+
}()
72+
o.Expect(err).NotTo(o.HaveOccurred(), "new router shard did not rollout")
73+
74+
g.By("Disabling client IP preservation on the NLB target group to avoid hairpin issues (OCPBUGS-63219)")
75+
routerSvcName := "router-" + shardIngressCtrl.Name
76+
err = oc.AsAdmin().Run("annotate").Args("service", "-n", "openshift-ingress", routerSvcName,
77+
"service.beta.kubernetes.io/aws-load-balancer-target-group-attributes=preserve_client_ip.enabled=false").Execute()
78+
o.Expect(err).NotTo(o.HaveOccurred())
79+
80+
g.By("Labelling the namespace for the shard")
81+
err = oc.AsAdmin().Run("label").Args("namespace", oc.Namespace(), "type="+oc.Namespace()).Execute()
82+
o.Expect(err).NotTo(o.HaveOccurred())
83+
84+
g.By("Creating backend service and pod")
85+
createBackendServiceAndPod(ctx, oc, ns, "dualstack-backend")
86+
87+
g.By("Creating an edge-terminated route")
88+
routeHost := "dualstack-test." + shardFQDN
89+
createEdgeRoute(ctx, oc, ns, "dualstack-route", routeHost, "dualstack-backend")
90+
91+
g.By("Waiting for the route to be admitted")
92+
waitForRouteAdmitted(ctx, oc, ns, "dualstack-route", routeHost)
93+
94+
g.By("Creating exec pod for curl tests")
95+
execPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod")
96+
defer func() {
97+
oc.AdminKubeClient().CoreV1().Pods(ns).Delete(ctx, execPod.Name, *metav1.NewDeleteOptions(1))
98+
}()
99+
100+
g.By("Waiting for DNS resolution of the route host")
101+
err = waitForDNSResolution(ns, execPod.Name, routeHost, 10*time.Minute)
102+
o.Expect(err).NotTo(o.HaveOccurred(), "DNS resolution failed")
103+
104+
g.By("Verifying route is reachable over IPv4")
105+
err = waitForRouteResponse(ns, execPod.Name, routeHost, "-4", 5*time.Minute)
106+
o.Expect(err).NotTo(o.HaveOccurred(), "route not reachable over IPv4")
107+
108+
g.By("Verifying route is reachable over IPv6")
109+
err = waitForRouteResponse(ns, execPod.Name, routeHost, "-6", 5*time.Minute)
110+
o.Expect(err).NotTo(o.HaveOccurred(), "route not reachable over IPv6")
111+
})
112+
113+
g.It("should be reachable via IPv4 through a Classic LB ingress controller on a dual-stack cluster", func() {
114+
ctx := context.Background()
115+
116+
g.By("Checking that the Infrastructure CR has a DualStack IPFamily")
117+
requireAWSDualStack(ctx, oc)
118+
119+
g.By("Getting the default ingress domain")
120+
defaultDomain, err := getDefaultIngressClusterDomainName(oc, time.Minute)
121+
o.Expect(err).NotTo(o.HaveOccurred(), "failed to find default domain name")
122+
123+
ns := oc.KubeFramework().Namespace.Name
124+
baseDomain := strings.TrimPrefix(defaultDomain, "apps.")
125+
shardFQDN := "clb." + baseDomain
126+
127+
// Deploy the shard first so DNS and LB can provision while we set up the backend.
128+
g.By("Deploying a new router shard with Classic LB")
129+
shardIngressCtrl, err := shard.DeployNewRouterShard(oc, 10*time.Minute, shard.Config{
130+
Domain: shardFQDN,
131+
Type: oc.Namespace(),
132+
LoadBalancer: &operatorv1.LoadBalancerStrategy{
133+
Scope: operatorv1.ExternalLoadBalancer,
134+
ProviderParameters: &operatorv1.ProviderLoadBalancerParameters{
135+
Type: operatorv1.AWSLoadBalancerProvider,
136+
AWS: &operatorv1.AWSLoadBalancerParameters{
137+
Type: operatorv1.AWSClassicLoadBalancer,
138+
},
139+
},
140+
},
141+
})
142+
defer func() {
143+
if shardIngressCtrl != nil {
144+
if err := oc.AdminOperatorClient().OperatorV1().IngressControllers(shardIngressCtrl.Namespace).Delete(ctx, shardIngressCtrl.Name, metav1.DeleteOptions{}); err != nil {
145+
e2e.Logf("deleting ingress controller failed: %v\n", err)
146+
}
147+
}
148+
}()
149+
o.Expect(err).NotTo(o.HaveOccurred(), "new router shard did not rollout")
150+
151+
g.By("Labelling the namespace for the shard")
152+
err = oc.AsAdmin().Run("label").Args("namespace", oc.Namespace(), "type="+oc.Namespace()).Execute()
153+
o.Expect(err).NotTo(o.HaveOccurred())
154+
155+
g.By("Creating backend service and pod")
156+
createBackendServiceAndPod(ctx, oc, ns, "classic-backend")
157+
158+
g.By("Creating an edge-terminated route")
159+
routeHost := "classic-test." + shardFQDN
160+
createEdgeRoute(ctx, oc, ns, "classic-route", routeHost, "classic-backend")
161+
162+
g.By("Waiting for the route to be admitted")
163+
waitForRouteAdmitted(ctx, oc, ns, "classic-route", routeHost)
164+
165+
g.By("Creating exec pod for curl tests")
166+
execPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod")
167+
defer func() {
168+
oc.AdminKubeClient().CoreV1().Pods(ns).Delete(ctx, execPod.Name, *metav1.NewDeleteOptions(1))
169+
}()
170+
171+
g.By("Waiting for DNS resolution of the route host")
172+
err = waitForDNSResolution(ns, execPod.Name, routeHost, 10*time.Minute)
173+
o.Expect(err).NotTo(o.HaveOccurred(), "DNS resolution failed")
174+
175+
g.By("Verifying route is reachable over IPv4")
176+
err = waitForRouteResponse(ns, execPod.Name, routeHost, "-4", 5*time.Minute)
177+
o.Expect(err).NotTo(o.HaveOccurred(), "route not reachable over IPv4")
178+
})
179+
})
180+
181+
func requireAWSDualStack(ctx context.Context, oc *exutil.CLI) {
182+
infra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(ctx, "cluster", metav1.GetOptions{})
183+
o.Expect(err).NotTo(o.HaveOccurred(), "failed to get infrastructure CR")
184+
185+
if infra.Status.PlatformStatus == nil || infra.Status.PlatformStatus.Type != configv1.AWSPlatformType {
186+
g.Skip("Test requires AWS platform")
187+
}
188+
if infra.Status.PlatformStatus.AWS == nil {
189+
g.Skip("AWS platform status is not set")
190+
}
191+
ipFamily := infra.Status.PlatformStatus.AWS.IPFamily
192+
if ipFamily != configv1.DualStackIPv4Primary && ipFamily != configv1.DualStackIPv6Primary {
193+
g.Skip(fmt.Sprintf("Test requires DualStack IPFamily, got %q", ipFamily))
194+
}
195+
}
196+
197+
func createBackendServiceAndPod(ctx context.Context, oc *exutil.CLI, ns, name string) {
198+
service := &corev1.Service{
199+
ObjectMeta: metav1.ObjectMeta{
200+
Name: name,
201+
Labels: map[string]string{"app": name},
202+
},
203+
Spec: corev1.ServiceSpec{
204+
Selector: map[string]string{"app": name},
205+
IPFamilyPolicy: func() *corev1.IPFamilyPolicy {
206+
p := corev1.IPFamilyPolicyPreferDualStack
207+
return &p
208+
}(),
209+
Ports: []corev1.ServicePort{
210+
{
211+
Name: "http",
212+
Port: 8080,
213+
Protocol: corev1.ProtocolTCP,
214+
TargetPort: intstr.FromInt(8080),
215+
},
216+
},
217+
},
218+
}
219+
_, err := oc.AdminKubeClient().CoreV1().Services(ns).Create(ctx, service, metav1.CreateOptions{})
220+
o.Expect(err).NotTo(o.HaveOccurred())
221+
222+
pod := &corev1.Pod{
223+
ObjectMeta: metav1.ObjectMeta{
224+
Name: name,
225+
Labels: map[string]string{"app": name},
226+
},
227+
Spec: corev1.PodSpec{
228+
TerminationGracePeriodSeconds: utilpointer.Int64(1),
229+
Containers: []corev1.Container{
230+
{
231+
Name: "server",
232+
Image: image.ShellImage(),
233+
ImagePullPolicy: corev1.PullIfNotPresent,
234+
Command: []string{"/bin/bash", "-c", `while true; do
235+
printf "HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/plain\r\n\r\nOK" | ncat -l 8080 --send-only || true
236+
done`},
237+
Ports: []corev1.ContainerPort{
238+
{
239+
ContainerPort: 8080,
240+
Name: "http",
241+
Protocol: corev1.ProtocolTCP,
242+
},
243+
},
244+
},
245+
},
246+
},
247+
}
248+
_, err = oc.AdminKubeClient().CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
249+
o.Expect(err).NotTo(o.HaveOccurred())
250+
251+
e2e.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(ctx, oc.KubeClient(), name, ns), "backend pod not running")
252+
}
253+
254+
func createEdgeRoute(ctx context.Context, oc *exutil.CLI, ns, name, host, serviceName string) {
255+
route := routev1.Route{
256+
ObjectMeta: metav1.ObjectMeta{
257+
Name: name,
258+
Labels: map[string]string{
259+
"type": oc.Namespace(),
260+
},
261+
},
262+
Spec: routev1.RouteSpec{
263+
Host: host,
264+
Port: &routev1.RoutePort{
265+
TargetPort: intstr.FromInt(8080),
266+
},
267+
TLS: &routev1.TLSConfig{
268+
Termination: routev1.TLSTerminationEdge,
269+
InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect,
270+
},
271+
To: routev1.RouteTargetReference{
272+
Kind: "Service",
273+
Name: serviceName,
274+
Weight: utilpointer.Int32(100),
275+
},
276+
WildcardPolicy: routev1.WildcardPolicyNone,
277+
},
278+
}
279+
_, err := oc.RouteClient().RouteV1().Routes(ns).Create(ctx, &route, metav1.CreateOptions{})
280+
o.Expect(err).NotTo(o.HaveOccurred())
281+
}
282+
283+
func waitForRouteAdmitted(ctx context.Context, oc *exutil.CLI, ns, name, host string) {
284+
err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
285+
r, err := oc.RouteClient().RouteV1().Routes(ns).Get(ctx, name, metav1.GetOptions{})
286+
if err != nil {
287+
e2e.Logf("failed to get route: %v, retrying...", err)
288+
return false, nil
289+
}
290+
for _, ingress := range r.Status.Ingress {
291+
if ingress.Host == host {
292+
for _, condition := range ingress.Conditions {
293+
if condition.Type == routev1.RouteAdmitted && condition.Status == corev1.ConditionTrue {
294+
return true, nil
295+
}
296+
}
297+
}
298+
}
299+
return false, nil
300+
})
301+
o.Expect(err).NotTo(o.HaveOccurred(), "route was not admitted")
302+
}
303+
304+
func waitForDNSResolution(ns, execPodName, host string, timeout time.Duration) error {
305+
cmd := fmt.Sprintf("getent hosts %s", host)
306+
var lastOutput string
307+
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
308+
output, err := e2eoutput.RunHostCmd(ns, execPodName, cmd)
309+
lastOutput = output
310+
if err != nil {
311+
return false, nil
312+
}
313+
e2e.Logf("DNS resolution for %s:\n%s", host, strings.TrimSpace(output))
314+
return true, nil
315+
})
316+
if err != nil {
317+
return fmt.Errorf("DNS resolution for %s timed out, last output: %s", host, lastOutput)
318+
}
319+
return nil
320+
}
321+
322+
func waitForRouteResponse(ns, execPodName, host, ipFlag string, timeout time.Duration) error {
323+
curlCmd := fmt.Sprintf("curl %s -k -v -m 10 --connect-timeout 5 -o /dev/null https://%s 2>&1", ipFlag, host)
324+
var lastOutput string
325+
consecutiveSuccesses := 0
326+
requiredSuccesses := 3
327+
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
328+
output, err := e2eoutput.RunHostCmd(ns, execPodName, curlCmd)
329+
lastOutput = output
330+
if err != nil {
331+
consecutiveSuccesses = 0
332+
return false, nil
333+
}
334+
if strings.Contains(output, "< HTTP/1.1 200") || strings.Contains(output, "< HTTP/2 200") {
335+
consecutiveSuccesses++
336+
e2e.Logf("curl %s %s: success (%d/%d)", ipFlag, host, consecutiveSuccesses, requiredSuccesses)
337+
if consecutiveSuccesses >= requiredSuccesses {
338+
e2e.Logf("curl %s %s:\n%s", ipFlag, host, output)
339+
return true, nil
340+
}
341+
return false, nil
342+
}
343+
consecutiveSuccesses = 0
344+
return false, nil
345+
})
346+
if err != nil {
347+
return fmt.Errorf("curl %s to %s timed out, last output:\n%s", ipFlag, host, lastOutput)
348+
}
349+
return nil
350+
}

test/extended/router/shard/shard.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,14 @@ type Config struct {
2121

2222
// Type is the matchSelector
2323
Type string
24+
25+
// Replicas optionally sets the number of router replicas.
26+
// If 0, defaults to 1.
27+
Replicas int32
28+
29+
// LoadBalancer optionally specifies LoadBalancerStrategy parameters.
30+
// If nil, the default LoadBalancer configuration is used.
31+
LoadBalancer *operatorv1.LoadBalancerStrategy
2432
}
2533

2634
var ingressControllerNonDefaultAvailableConditions = []operatorv1.OperatorCondition{
@@ -40,10 +48,16 @@ func DeployNewRouterShard(oc *exutil.CLI, timeout time.Duration, cfg Config) (*o
4048
},
4149
},
4250
Spec: operatorv1.IngressControllerSpec{
43-
Replicas: utilpointer.Int32(1),
44-
Domain: cfg.Domain,
51+
Replicas: func() *int32 {
52+
if cfg.Replicas > 0 {
53+
return utilpointer.Int32(cfg.Replicas)
54+
}
55+
return utilpointer.Int32(1)
56+
}(),
57+
Domain: cfg.Domain,
4558
EndpointPublishingStrategy: &operatorv1.EndpointPublishingStrategy{
46-
Type: operatorv1.LoadBalancerServiceStrategyType,
59+
Type: operatorv1.LoadBalancerServiceStrategyType,
60+
LoadBalancer: cfg.LoadBalancer,
4761
},
4862
NodePlacement: &operatorv1.NodePlacement{
4963
NodeSelector: &metav1.LabelSelector{

0 commit comments

Comments
 (0)