diff --git a/testsuite/kuadrant/policy/dns.py b/testsuite/kuadrant/policy/dns.py index 1d478bb9a..8c8cb3ed7 100644 --- a/testsuite/kuadrant/policy/dns.py +++ b/testsuite/kuadrant/policy/dns.py @@ -129,7 +129,7 @@ def create_instance( cluster: KubernetesClient, name: str, parent: Referencable, - provider_secret_name: str, + provider_secret_name: str = None, delegate: bool = None, load_balancing: LoadBalancing = None, labels: dict[str, str] = None, @@ -140,12 +140,12 @@ def create_instance( "apiVersion": "kuadrant.io/v1", "kind": "DNSPolicy", "metadata": {"name": name, "labels": labels}, - "spec": { - "targetRef": parent.reference, - "providerRefs": [{"name": provider_secret_name}], - }, + "spec": {"targetRef": parent.reference}, } + if provider_secret_name is not None: + model["spec"]["providerRefs"] = [{"name": provider_secret_name}] + if delegate is not None: model["spec"]["delegate"] = delegate diff --git a/testsuite/kubernetes/secret.py b/testsuite/kubernetes/secret.py index 8bc0b64f1..2a959e7b2 100644 --- a/testsuite/kubernetes/secret.py +++ b/testsuite/kubernetes/secret.py @@ -15,11 +15,15 @@ def create_instance( cls, cluster, name, - data: dict[str, str], + stringData: dict[str, str] = None, # pylint: disable=invalid-name + data: dict[str, str] = None, secret_type: Literal["kubernetes.io/tls", "kuadrant.io/aws", "kuadrant.io/coredns", "Opaque"] = "Opaque", labels: dict[str, str] = None, ): """Creates new Secret""" + if not (stringData is None) ^ (data is None): + raise AttributeError("Either `stringData` or `data` must be used for the secret creation") + model: dict = { "kind": "Secret", "apiVersion": "v1", @@ -27,9 +31,15 @@ def create_instance( "name": name, "labels": labels, }, - "stringData": data, "type": secret_type, } + + if stringData: + model["stringData"] = stringData + + if data: + model["data"] = data + return cls(model, context=cluster.context) def __getitem__(self, name): @@ -60,10 +70,10 @@ def create_instance( # type: ignore[override] return super().create_instance( cluster, name, - { + stringData={ cert_name: certificate.chain, key_name: certificate.key, }, - secret_type, - labels, + secret_type=secret_type, + labels=labels, ) diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/__init__.py b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_default_provider.py b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_default_provider.py new file mode 100644 index 000000000..2d60f3ab8 --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_default_provider.py @@ -0,0 +1,63 @@ +"""Test default DNS provider secret""" + +import pytest + +import openshift_client as oc + +from testsuite.gateway import GatewayListener +from testsuite.kubernetes.secret import Secret +from testsuite.gateway.gateway_api.gateway import KuadrantGateway +from testsuite.kuadrant.policy.dns import DNSPolicy + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy, pytest.mark.disruptive] + + +@pytest.fixture(scope="module") +def gateway(request, cluster, blame, wildcard_domain, module_label): + """Returns gateway without tls""" + gw = KuadrantGateway.create_instance( + cluster, + blame("gw"), + {"app": module_label}, + ) + gw.add_listener(GatewayListener(hostname=wildcard_domain)) + request.addfinalizer(gw.delete) + gw.commit() + gw.wait_for_ready() + return gw + + +@pytest.fixture(scope="module") +def dns_provider_secret(request, dns_provider_secret, cluster, blame, module_label): + """Get existing DNS provider secret and create a copy with default-provider label""" + provider_secret = oc.selector(f"secret/{dns_provider_secret}", static_context=cluster.context).object(cls=Secret) + default_secret = Secret.create_instance( + cluster, + blame("dflt-dns"), + data=provider_secret.model.data, + secret_type=provider_secret.model.type, + labels={"kuadrant.io/default-provider": "true", "app": module_label}, + ) + request.addfinalizer(default_secret.delete) + default_secret.commit() + + +@pytest.fixture(scope="module") +def dns_policy(blame, gateway, module_label): + """Return DNSPolicy without proivderRefs configured""" + return DNSPolicy.create_instance(gateway.cluster, blame("dns"), gateway, labels={"app": module_label}) + + +@pytest.fixture(scope="module", autouse=True) +def commit(request, dns_provider_secret, dns_policy): # pylint: disable=unused-argument + """Commits all important stuff before tests""" + request.addfinalizer(dns_policy.delete) + dns_policy.commit() + dns_policy.wait_for_ready() + + +def test_default_dns_provider(gateway, dns_policy, client): + """Test if default DNS provider secret is picked up and used""" + assert gateway.refresh().is_affected_by(dns_policy) + response = client.get("/get") + assert response.status_code == 200 diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_multiple_default_providers.py b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_multiple_default_providers.py new file mode 100644 index 000000000..772ebd71d --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_multiple_default_providers.py @@ -0,0 +1,52 @@ +"""Test what happens when 2 default DNS provider secrets exist at the same time""" + +import pytest + +import openshift_client as oc + +from testsuite.kubernetes.secret import Secret +from testsuite.kuadrant.policy import has_condition +from testsuite.kuadrant.policy.dns import DNSPolicy, has_record_condition + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy, pytest.mark.disruptive] + + +@pytest.fixture(scope="module") +def default_provider_secrets(request, dns_provider_secret, cluster, blame, module_label): + """Create two default DNS provider secrets from existing, non-default provider""" + provider_secret = oc.selector(f"secret/{dns_provider_secret}", static_context=cluster.context).object(cls=Secret) + for _ in range(2): + default_secret = Secret.create_instance( + cluster, + blame("dflt-dns"), + data=provider_secret.model.data, + secret_type=provider_secret.model.type, + labels={"kuadrant.io/default-provider": "true", "app": module_label}, + ) + request.addfinalizer(default_secret.delete) + default_secret.commit() + + +@pytest.fixture(scope="module") +def dns_policy(blame, gateway, module_label): + """Return DNSPolicy with delegate true and providerRefs secret""" + return DNSPolicy.create_instance(gateway.cluster, blame("dns"), gateway, labels={"app": module_label}) + + +@pytest.fixture(scope="module", autouse=True) +def commit(request, route, default_provider_secrets, dns_policy): # pylint: disable=unused-argument + """Commits all important stuff before tests""" + request.addfinalizer(dns_policy.delete) + dns_policy.commit() + + +def test_multiple_default_provider_secrets(dns_policy): + """Check that authoritative DNSRecord ends up in error state when multiple default provider secrets exist""" + assert dns_policy.wait_until( + has_condition("Enforced", "False", "Unknown", "not a single DNSRecord is ready") + ), f"DNSPolicy did not reach expected status, instead it was: {dns_policy.model.status.conditions}" + assert dns_policy.wait_until( + has_record_condition( + "Ready", "False", "DNSProviderError", "Multiple default providers secrets found. Only one expected" + ) + ), f"Authoritative DNSRecord didn't reach expected status, instead it was: {dns_policy.model.status.conditions}" diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_no_default_provider.py b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_no_default_provider.py new file mode 100644 index 000000000..c09c0057b --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/default_provider/test_no_default_provider.py @@ -0,0 +1,36 @@ +"""Test DNSPolicy behavior with no default DNS provider secret available""" + +import pytest + +from testsuite.kuadrant.policy import has_condition +from testsuite.kuadrant.policy.dns import DNSPolicy, has_record_condition + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy, pytest.mark.disruptive] + + +@pytest.fixture(scope="module") +def dns_policy(blame, gateway, module_label): + """Return DNSPolicy without proivderRefs configured""" + return DNSPolicy.create_instance(gateway.cluster, blame("dns"), gateway, labels={"app": module_label}) + + +@pytest.fixture(scope="module", autouse=True) +def commit(request, route, dns_policy): # pylint: disable=unused-argument + """Commits all important stuff before tests""" + request.addfinalizer(dns_policy.delete) + dns_policy.commit() + + +def test_no_default_provider_secrets(dns_policy): + """Test that DNSPolicy and DNSRecord both end up in error state, when no default provider secret exists""" + assert dns_policy.wait_until( + has_condition("Enforced", "False", "Unknown", "not a single DNSRecord is ready") + ), f"DNSPolicy did not reach expected status, instead it was: {dns_policy.model.status.conditions}" + assert dns_policy.wait_until( + has_record_condition( + "Ready", + "False", + "DNSProviderError", + "No default provider secret labeled kuadrant.io/default-provider was found", + ) + ), f"DNSPolicy's DNSRecord didn't reach expected status, instead it was: {dns_policy.model.status.conditions}" diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/test_delegate_and_provider_ref.py b/testsuite/tests/singlecluster/gateway/dnspolicy/test_delegate_and_provider_ref.py new file mode 100644 index 000000000..210f0c407 --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/test_delegate_and_provider_ref.py @@ -0,0 +1,28 @@ +"""Test that server raises error when both delegate is true and providerRefs are set""" + +import pytest + +from openshift_client import OpenShiftPythonException + +from testsuite.kuadrant.policy.dns import DNSPolicy + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy] + + +@pytest.fixture(scope="module") +def dns_policy(blame, gateway, module_label, dns_provider_secret): + """Return DNSPolicy with delegate true and providerRefs secret""" + return DNSPolicy.create_instance( + gateway.cluster, blame("dns"), gateway, dns_provider_secret, delegate=True, labels={"app": module_label} + ) + + +@pytest.fixture(scope="module", autouse=True) +def commit(): + """Commiting is done inside the test""" + + +def test_delegate_true_and_provider_ref_are_mutually_exclusive(dns_policy): + """Test that server raises error when both delegate is true and providerRefs are set""" + with pytest.raises(OpenShiftPythonException, match="delegate=true and providerRefs are mutually exclusive"): + dns_policy.commit() diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/test_no_dns_provider_secret.py b/testsuite/tests/singlecluster/gateway/dnspolicy/test_no_dns_provider_secret.py new file mode 100644 index 000000000..68c886294 --- /dev/null +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/test_no_dns_provider_secret.py @@ -0,0 +1,41 @@ +"""Test DNSPolicy behavior with a non-existing DNS provider secret""" + +import pytest + +from testsuite.kuadrant.policy import has_condition +from testsuite.kuadrant.policy.dns import has_record_condition +from testsuite.kuadrant.policy.dns import DNSPolicy + +pytestmark = [pytest.mark.kuadrant_only, pytest.mark.dnspolicy] + +NON_EXISTING_SECRET = "should-not-exist" + + +@pytest.fixture(scope="module") +def dns_policy(blame, gateway, module_label): + """Returns DNSPolicy fixture referencing a non-existing secret""" + return DNSPolicy.create_instance( + gateway.cluster, blame("dns"), gateway, NON_EXISTING_SECRET, labels={"app": module_label} + ) + + +@pytest.fixture(scope="module", autouse=True) +def commit(request, route, dns_policy): # pylint: disable=unused-argument + """Commits all important stuff before tests""" + request.addfinalizer(dns_policy.delete) + dns_policy.commit() + + +def test_default_secret_provider_not_found(dns_policy): + """Assert DNSPolicy and DNSRecord both end up in error state, with DNS provider secret does not exist message""" + assert dns_policy.wait_until( + has_condition("Enforced", "False", "Unknown", "not a single DNSRecord is ready") + ), f"DNSPolicy did not reach expected status, instead it was: {dns_policy.model.status.conditions}" + assert dns_policy.wait_until( + has_record_condition( + "Ready", + "False", + "DNSProviderError", + f'The dns provider could not be loaded: Secret "{NON_EXISTING_SECRET}" not found', + ) + ), f"DNSPolicy did not reach expected record status, instead it was: {dns_policy.model.status.recordConditions}"