diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index 5d09e74..9de052a 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -159,6 +159,13 @@ "update", ) +NETWORK_RESOURCE_MODULE_STATES = ( + "merged", + "replaced", + "overridden", + "deleted", +) + INTERFACE_FLOW_RULES_TYPES_MAPPING = {"port_channel": "PORTCHANNEL", "physical": "PHYSICAL", "l3out_sub_interface": "L3_SUBIF", "l3out_svi": "SVI"} INTERFACE_FLOW_RULES_STATUS_MAPPING = {"enabled": "ENABLED", "disabled": "DISABLED"} diff --git a/plugins/module_utils/nd.py b/plugins/module_utils/nd.py index cca3ed4..cc80a26 100644 --- a/plugins/module_utils/nd.py +++ b/plugins/module_utils/nd.py @@ -3,6 +3,7 @@ # Copyright: (c) 2021, Lionel Hercot (@lhercot) # Copyright: (c) 2022, Cindy Zhao (@cizhao) # Copyright: (c) 2022, Akini Ross (@akinross) +# Copyright: (c) 2025, Shreyas Srish (@shrsr) # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -18,11 +19,10 @@ from ansible.module_utils.basic import json from ansible.module_utils.basic import env_fallback from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves import filterfalse from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils._text import to_native, to_text from ansible.module_utils.connection import Connection -from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED +from ansible_collections.cisco.nd.plugins.module_utils.constants import ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED, NETWORK_RESOURCE_MODULE_STATES def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -73,53 +73,27 @@ def cmp(a, b): def issubset(subset, superset): - """Recurse through nested dictionary and compare entries""" + """Recurse through a nested dictionary and check if it is a subset of another.""" - # Both objects are the same object - if subset is superset: - return True - - # Both objects are identical - if subset == superset: - return True - - # Both objects have a different type - if isinstance(subset) is not isinstance(superset): + if type(subset) is not type(superset): return False + if not isinstance(subset, dict): + if isinstance(subset, list): + return all(item in superset for item in subset) + return subset == superset + for key, value in subset.items(): - # Ignore empty values if value is None: - return True + continue - # Item from subset is missing from superset if key not in superset: return False - # Item has different types in subset and superset - if isinstance(superset.get(key)) is not isinstance(value): - return False + superset_value = superset.get(key) - # Compare if item values are subset - if isinstance(value, dict): - if not issubset(superset.get(key), value): - return False - elif isinstance(value, list): - try: - # NOTE: Fails for lists of dicts - if not set(value) <= set(superset.get(key)): - return False - except TypeError: - # Fall back to exact comparison for lists of dicts - diff = list(filterfalse(lambda i: i in value, superset.get(key))) + list(filterfalse(lambda j: j in superset.get(key), value)) - if diff: - return False - elif isinstance(value, set): - if not value <= superset.get(key): - return False - else: - if not value == superset.get(key): - return False + if not issubset(value, superset_value): + return False return True @@ -223,6 +197,10 @@ def __init__(self, module): self.status = None self.url = None self.httpapi_logs = list() + self.nd_logs = list() + self.changed = False + self.before_data = {} + self.after_data = {} if self.module._debug: self.module.warn("Enable debug output because ANSIBLE_DEBUG was set.") @@ -252,7 +230,7 @@ def request( if file is not None: info = conn.send_file_request(method, uri, file, data, None, file_key, file_ext) else: - if data: + if data is not None: info = conn.send_request(method, uri, json.dumps(data)) else: info = conn.send_request(method, uri) @@ -310,6 +288,8 @@ def request( self.fail_json(msg="ND Error: {0}".format(self.error.get("message")), data=data, info=info) self.error = payload if "code" in payload: + if self.status == 404 and ignore_not_found_error: + return {} self.fail_json(msg="ND Error {code}: {message}".format(**payload), data=data, info=info, payload=payload) elif "messages" in payload and len(payload.get("messages")) > 0: self.fail_json(msg="ND Error {code} ({severity}): {message}".format(**payload["messages"][0]), data=data, info=info, payload=payload) @@ -375,12 +355,45 @@ def get_obj(self, path, **kwargs): self.fail_json(msg="More than one object matches unique filter: {0}".format(kwargs)) return objs[0] - def sanitize(self, updates, collate=False, required=None, unwanted=None): + def get_object_by_nested_key_value(self, path, nested_key_path, value, data_key=None): + + response_data = self.request(path, method="GET") + + if not response_data: + return None + + object_list = [] + if isinstance(response_data, list): + object_list = response_data + elif data_key and data_key in response_data: + object_list = response_data.get(data_key) + else: + return None + + keys = nested_key_path.split(".") + + for obj in object_list: + current_level = obj + for key in keys: + if isinstance(current_level, dict): + current_level = current_level.get(key) + else: + current_level = None + break + + if current_level == value: + return obj + + return None + + def sanitize(self, updates, collate=False, required=None, unwanted=None, existing=None): """Clean up unset keys from a request payload""" if required is None: required = [] if unwanted is None: unwanted = [] + if existing: + self.existing = existing if isinstance(self.existing, dict): self.proposed = deepcopy(self.existing) self.sent = deepcopy(self.existing) @@ -429,35 +442,45 @@ def sanitize(self, updates, collate=False, required=None, unwanted=None): def exit_json(self, **kwargs): """Custom written method to exit from module.""" + if self.params.get("state") in NETWORK_RESOURCE_MODULE_STATES: + self.result["changed"] = self.changed + self.result["before"] = self.before_data + self.result["after"] = self.after_data - if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - if self.params.get("output_level") in ("debug", "info"): - self.result["previous"] = self.previous - # FIXME: Modified header only works for PATCH - if not self.has_modified and self.previous != self.existing: - self.result["changed"] = True - if self.stdout: - self.result["stdout"] = self.stdout - - # Return the gory details when we need it - if self.params.get("output_level") == "debug": - self.result["method"] = self.method - self.result["response"] = self.response - self.result["status"] = self.status - self.result["url"] = self.url - self.result["httpapi_logs"] = self.httpapi_logs - + if self.params.get("output_level") == "debug": + self.result["nd_logs"] = self.nd_logs + self.result["httpapi_logs"] = self.httpapi_logs + if self.stdout: + self.result["stdout"] = self.stdout + else: if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - self.result["sent"] = self.sent - self.result["proposed"] = self.proposed + if self.params.get("output_level") in ("debug", "info"): + self.result["previous"] = self.previous + # FIXME: Modified header only works for PATCH + if not self.has_modified and self.previous != self.existing: + self.result["changed"] = True + if self.stdout: + self.result["stdout"] = self.stdout + + # Return the gory details when we need it + if self.params.get("output_level") == "debug": + self.result["method"] = self.method + self.result["response"] = self.response + self.result["status"] = self.status + self.result["url"] = self.url + self.result["httpapi_logs"] = self.httpapi_logs + + if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + self.result["sent"] = self.sent + self.result["proposed"] = self.proposed - self.result["current"] = self.existing + self.result["current"] = self.existing - if self.module._diff and self.result.get("changed") is True: - self.result["diff"] = dict( - before=self.previous, - after=self.existing, - ) + if self.module._diff and self.result.get("changed") is True: + self.result["diff"] = dict( + before=self.previous, + after=self.existing, + ) self.result.update(**kwargs) self.module.exit_json(**self.result) @@ -465,29 +488,39 @@ def exit_json(self, **kwargs): def fail_json(self, msg, **kwargs): """Custom written method to return info on failure.""" - if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - if self.params.get("output_level") in ("debug", "info"): - self.result["previous"] = self.previous - # FIXME: Modified header only works for PATCH - if not self.has_modified and self.previous != self.existing: - self.result["changed"] = True - if self.stdout: - self.result["stdout"] = self.stdout + if self.params.get("state") in NETWORK_RESOURCE_MODULE_STATES: + self.result["before"] = self.before_data + self.result["after"] = self.after_data - # Return the gory details when we need it - if self.params.get("output_level") == "debug": - if self.url is not None: - self.result["method"] = self.method - self.result["response"] = self.response - self.result["status"] = self.status - self.result["url"] = self.url + if self.params.get("output_level") == "debug": + self.result["nd_logs"] = self.nd_logs self.result["httpapi_logs"] = self.httpapi_logs - + if self.stdout: + self.result["stdout"] = self.stdout + else: if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: - self.result["sent"] = self.sent - self.result["proposed"] = self.proposed - - self.result["current"] = self.existing + if self.params.get("output_level") in ("debug", "info"): + self.result["previous"] = self.previous + # FIXME: Modified header only works for PATCH + if not self.has_modified and self.previous != self.existing: + self.result["changed"] = True + if self.stdout: + self.result["stdout"] = self.stdout + + # Return the gory details when we need it + if self.params.get("output_level") == "debug": + if self.url is not None: + self.result["method"] = self.method + self.result["response"] = self.response + self.result["status"] = self.status + self.result["url"] = self.url + self.result["httpapi_logs"] = self.httpapi_logs + + if self.params.get("state") in ALLOWED_STATES_TO_APPEND_SENT_AND_PROPOSED: + self.result["sent"] = self.sent + self.result["proposed"] = self.proposed + + self.result["current"] = self.existing self.result.update(**kwargs) self.module.fail_json(msg=msg, **self.result) @@ -499,40 +532,154 @@ def check_changed(self): existing["password"] = self.sent.get("password") return not issubset(self.sent, existing) - def get_diff(self, unwanted=None): + def get_diff(self, unwanted=None, existing=None): """Check if existing payload and sent payload and removing keys that are not required""" if unwanted is None: unwanted = [] + if existing: + self.existing = existing if not self.existing and self.sent: return True - existing = self.existing - sent = self.sent + exists = deepcopy(self.existing) + sent = deepcopy(self.sent) for key in unwanted: if isinstance(key, str): if key in existing: - try: - del existing[key] - except KeyError: - pass - try: - del sent[key] - except KeyError: - pass + del existing[key] + if key in sent: + del sent[key] elif isinstance(key, list): key_path, last = key[:-1], key[-1] try: - existing_parent = reduce(dict.get, key_path, existing) - del existing_parent[last] + existing_parent = reduce(dict.get, key_path, exists) + if existing_parent is not None: + del existing_parent[last] except KeyError: pass try: sent_parent = reduce(dict.get, key_path, sent) - del sent_parent[last] + if sent_parent is not None: + del sent_parent[last] except KeyError: pass - return not issubset(sent, existing) + return not issubset(sent, exists) def set_to_empty_string_when_none(self, val): return val if val is not None else "" + + def delete_none_values(self, obj_to_sanitize, existing=None, recursive=True, is_recursive_call=False): + if not is_recursive_call and existing: + self.existing = existing + + sanitized_obj = None + if isinstance(obj_to_sanitize, dict): + sanitized_dict = {} + for item_key, item_value in obj_to_sanitize.items(): + if item_value is None: + continue + + if recursive and isinstance(item_value, (dict, list)): + sanitized_dict[item_key] = self.delete_none_values(item_value, None, recursive, is_recursive_call=True) + else: + sanitized_dict[item_key] = item_value + sanitized_obj = sanitized_dict + + elif isinstance(obj_to_sanitize, list): + sanitized_list = [] + for item in obj_to_sanitize: + if item is None: + continue + + if recursive and isinstance(item, (dict, list)): + sanitized_list.append(self.delete_none_values(item, None, recursive, is_recursive_call=True)) + else: + sanitized_list.append(item) + sanitized_obj = sanitized_list + + else: + if not is_recursive_call: + self.module.warn("Object to sanitize must be of type list or dict. Got {0}".format(type(obj_to_sanitize))) + sanitized_obj = deepcopy(obj_to_sanitize) + + if not is_recursive_call: + self.proposed = self.sent = sanitized_obj + + return sanitized_obj + + def add_log(self, identifier, status, before_data, after_data, sent_payload_data=None): + item_result = { + "identifier": identifier, + "status": status, + "before": deepcopy(before_data) if before_data is not None else {}, + "after": deepcopy(after_data) if after_data is not None else {}, + "sent_payload": deepcopy(sent_payload_data) if sent_payload_data is not None else {}, + } + self.nd_logs.append(item_result) + + def manage_state(self, state, desired_map, existing_map, action_callbacks, unwanted_keys=None): + item_changed = False + update_callback = action_callbacks["update_callback"] + create_callback = action_callbacks["create_callback"] + delete_callback = action_callbacks["delete_callback"] + if state == "overridden": + for identifier, existing_payload in existing_map.items(): + if identifier not in desired_map: + existing_payload = existing_map[identifier] + self.delete_none_values({}, existing=existing_payload) + self.before_data[identifier] = existing_payload + delete_data = delete_callback(self) + self.after_data[identifier] = delete_data + self.add_logs(identifier=identifier, status="deleted", before_data=existing_payload, after_data=delete_data) + item_changed = True + + if item_changed: + self.changed = True + + if state in ["merged", "replaced", "overidden"]: + for identifier, desired_payload in desired_map.items(): + existing_payload = existing_map.get(identifier) + self.before_data[identifier] = existing_payload or {} + if existing_payload: + if state == "merged": + self.sanitize(desired_payload, existing=existing_payload) + else: + self.delete_none_values(desired_payload, existing=existing_payload) + if self.get_diff(unwanted=unwanted_keys, existing=existing_payload): + update_data = update_callback(self) + self.after_data[identifier] = update_data + self.add_log( + identifier=identifier, status="updated", before_data=existing_payload, after_data=update_data, sent_payload_data=self.sent + ) + item_changed = True + else: + self.after_data[identifier] = existing_payload or {} + self.add_log( + identifier=identifier, status="no_change", before_data=existing_payload, after_data=existing_payload, sent_payload_data=None + ) + else: + self.sanitize(desired_payload, existing={}) + create_data = create_callback(self) + self.after_data[identifier] = create_data + self.add_log(identifier=identifier, status="created", before_data={}, after_data=create_data, sent_payload_data=self.sent) + item_changed = True + + if item_changed: + self.changed = True + + elif state == "deleted": + for identifier, desired_payload in desired_map.items(): + if identifier in existing_map: + existing_payload = existing_map[identifier] + self.delete_none_values(desired_payload, existing=existing_payload) + self.before_data[identifier] = existing_payload + delete_data = delete_callback(self) + self.after_data[identifier] = delete_data + self.add_log(identifier=identifier, status="deleted", before_data=existing_payload, after_data=delete_data) + item_changed = True + else: + self.before_data = {} + self.add_log(identifier=identifier, status="not_found_for_deletion", before_data={}, after_data={}) + if item_changed: + self.changed = True diff --git a/plugins/modules/nd_multi_cluster_connectivity.py b/plugins/modules/nd_multi_cluster_connectivity.py new file mode 100644 index 0000000..4cc951d --- /dev/null +++ b/plugins/modules/nd_multi_cluster_connectivity.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2025, Shreyas Srish (@shrsr) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: nd_multi_cluster_connectivity +version_added: "1.0.0" +short_description: Manages cluster configurations on Cisco Nexus Dashboard. +description: +- This module allows for the management of clusters on Cisco Nexus Dashboard. +- This module is only supported on ND v4.1 and later. +author: +- Shreyas Srish (@shrsr) +options: + config: + description: + - The configuration to manage clusters on Cisco Nexus Dashboard. + type: list + elements: dict + suboptions: + cluster_type: + description: + - The type of the cluster. + type: str + choices: [ nd, apic ] + cluster_hostname: + description: + - The hostname or IP address of the cluster. + type: str + cluster_username: + description: + - The username for authenticating with the cluster. + type: str + cluster_password: + description: + - The password for authenticating with the cluster. + - This value is not logged in the output. + type: str + cluster_login_domain: + description: + - The login domain for the cluster. + type: str + multi_cluster_login_domain: + description: + - The multi-cluster login domain. + type: str + fabric_name: + description: + - The name of the fabric to which the cluster belongs. + type: str + license_tier: + description: + - The license tier for the cluster. + type: str + choices: [ advantage, essentials, premier ] + features: + description: + - A list of features to enable on the cluster. + type: list + elements: str + choices: [ telemetry, orchestration ] + inband_epg: + description: + - The in-band EPG (Endpoint Group) for the cluster. + type: str + security_domain: + description: + - The security domain for the cluster. + type: str + validate_peer_certificate: + description: + - Whether to validate the peer's SSL/TLS certificate. + type: bool + required: true + state: + description: + - The desired state of the network resources on the Cisco Nexus Dashboard. + - Use O(state=merged) to create new resources and updates existing ones as defined in your configuration. + Resources on the Cisco Nexus Dashboard that are not specified in the configuration will be left unchanged. + - Use O(state=replaced) to replace the resources specified in the configuration. + - Use O(state=overridden) to enforce the configuration as the single source of truth. + The resources on the Cisco Nexus Dashboard will be modified to exactly match the configuration. + Any resource existing on the dashboard but not present in the configuration will be deleted. Use with caution. + - Use O(state=deleted) to remove the resources specified in the configuration from the Cisco Nexus Dashboard. + type: str + choices: [ merged, replaced, deleted, overridden ] + default: merged +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- If ACI clusters are part of a multi-cluster configuration, overriding the state will cause the module to throw an error. + This occurs because the API expects a payload containing credentials to process the removal of the ACI clusters. +- An API limitation requires that the features and license tier are configured at the time an ACI cluster is first connected. + These settings cannot be modified later. +""" + +EXAMPLES = r""" +- name: Connect clusters in the config + cisco.nd.nd_multi_cluster_connectivity: + config: + - cluster_type: nd + cluster_hostname: nd_cluster_host + cluster_username: nd_cluster_username + cluster_password: nd_cluster_password + - cluster_type: apic + fabric_name: ansible_test_2 + cluster_hostname: aci_cluster_host + cluster_username: aci_cluster_username + cluster_password: aci_cluster_password + license_tier: advantage + features: + - orchestration + inband_epg: ansible-inband + - cluster_type: apic + fabric_name: ansible_test + cluster_hostname: aci_cluster_host2 + cluster_username: aci_cluster_username2 + cluster_password: aci_cluster_password2 + license_tier: advantage + features: + - orchestration + inband_epg: ansible-inband + state: merged + +- name: Delete clusters in the config + cisco.nd.nd_multi_cluster_connectivity: + config: + - cluster_hostname: nd_cluster_host + - cluster_hostname: aci_cluster_host + cluster_username: aci_cluster_username + cluster_password: aci_cluster_password + - cluster_hostname: aci_cluster_host2 + cluster_username: aci_cluster_username2 + cluster_password: aci_cluster_password2 + state: deleted +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.nd import NDModule, nd_argument_spec + + +def get_cluster_unique_key(cluster_data): + return cluster_data.get("spec", {}).get("onboardUrl") + + +def module_input_to_api_payload(cluster_data_from_module_input): + cluster_type = cluster_data_from_module_input.get("cluster_type").upper() if cluster_data_from_module_input.get("cluster_type") else None + payload = { + "spec": { + "clusterType": cluster_type, + "onboardUrl": cluster_data_from_module_input.get("cluster_hostname"), + "credentials": { + "user": cluster_data_from_module_input.get("cluster_username"), + "password": cluster_data_from_module_input.get("cluster_password"), + "logindomain": cluster_data_from_module_input.get("cluster_login_domain"), + }, + } + } + if cluster_type == "APIC": + telemetry = {} + features = cluster_data_from_module_input.get("features", []) + if features and "telemetry" in features: + telemetry["status"] = "enabled" + if cluster_data_from_module_input.get("inband_epg"): + telemetry["network"] = "inband" + telemetry["epg"] = "uni/tn-mgmt/mgmtp-default/inb-{0}".format(cluster_data_from_module_input.get("inband_epg")) + else: + telemetry["network"] = "outband" + else: + telemetry["status"] = "disabled" + + orchestration = {} + if features and "orchestration" in features: + orchestration["status"] = "enabled" + else: + orchestration["status"] = "disabled" + + payload["spec"]["aci"] = { + "licenseTier": cluster_data_from_module_input.get("license_tier"), + "name": cluster_data_from_module_input.get("fabric_name"), + "securityDomain": cluster_data_from_module_input.get("security_domain"), + "verifyCA": cluster_data_from_module_input.get("validate_peer_certificate"), + "telemetry": telemetry, + "orchestration": orchestration, + } + elif cluster_type == "ND" and cluster_data_from_module_input.get("multi_cluster_login_domain"): + payload["spec"]["nd"] = {"multiClusterLoginDomainName": cluster_data_from_module_input.get("multi_cluster_login_domain")} + return payload + + +# def convert_api_response_to_payload_format(api_response_cluster_data): +# spec = api_response_cluster_data.get("spec", {}) +# cluster_type = spec.get("clusterType") + +# payload = { +# "spec": { +# "clusterType": cluster_type, +# "onboardUrl": spec.get("onboardUrl"), +# "name": spec.get("name"), +# } +# } + +# if cluster_type == "APIC": +# aci_spec = spec.get("aci", {}) +# telemetry_api = aci_spec.get("telemetry", {}) +# orchestration_api = aci_spec.get("orchestration", {}) + +# telemetry_payload = { +# "status": telemetry_api.get("status", "disabled") +# } +# if telemetry_api.get("network"): +# telemetry_payload["network"] = telemetry_api["network"] +# if telemetry_api.get("epg"): +# telemetry_payload["epg"] = telemetry_api["epg"] + +# orchestration_payload = { +# "status": orchestration_api.get("status", "disabled") +# } + +# payload["spec"]["aci"] = { +# "licenseTier": aci_spec.get("licenseTier"), +# "name": aci_spec.get("name"), +# "securityDomain": aci_spec.get("securityDomain"), +# "verifyCA": aci_spec.get("verifyCA"), +# "telemetry": telemetry_payload, +# "orchestration": orchestration_payload, +# } +# elif cluster_type == "ND": +# nd_spec = spec.get("nd", {}) +# if nd_spec.get("multiClusterLoginDomainName"): +# payload["spec"]["nd"] = {"multiClusterLoginDomainName": nd_spec.get("multiClusterLoginDomainName")} + +# return payload + + +def create_cluster(nd): + path = "/api/v1/infra/clusters" + response = nd.proposed + if not nd.module.check_mode: + response = nd.request(path, method="POST", data=nd.proposed) + return response + + +def update_cluster(nd): + path = "/api/v1/infra/clusters" + fabric_name_for_api_path = nd.existing.get("spec", {}).get("name") + response = payload = nd.proposed + payload["spec"]["name"] = fabric_name_for_api_path + if not nd.module.check_mode: + response = nd.request("{0}/{1}".format(path, fabric_name_for_api_path), method="PUT", data=payload) + return response + + +def delete_cluster(nd): + path = "/api/v1/infra/clusters" + fabric_name_for_api_path = nd.existing.get("spec", {}).get("name") + response = payload = {} + if nd.existing.get("spec", {}).get("clusterType", "").upper() == "APIC": + payload = { + "credentials": { + "user": nd.proposed.get("spec", {}).get("credentials", {}).get("user"), + "password": nd.proposed.get("spec", {}).get("credentials", {}).get("password"), + } + } + if not nd.module.check_mode: + response = nd.request("{0}/{1}/remove".format(path, fabric_name_for_api_path), method="POST", data=payload) + return response + + +def main(): + argument_spec = nd_argument_spec() + argument_spec.update( + config=dict( + type="list", + elements="dict", + options=dict( + cluster_type=dict(type="str", choices=["nd", "apic"]), + cluster_hostname=dict(type="str"), + cluster_username=dict(type="str"), + cluster_password=dict(type="str", no_log=True), + cluster_login_domain=dict(type="str"), + multi_cluster_login_domain=dict(type="str"), + fabric_name=dict(type="str"), + license_tier=dict(type="str", choices=["advantage", "essentials", "premier"]), + features=dict(type="list", elements="str", choices=["telemetry", "orchestration"]), + inband_epg=dict(type="str"), + security_domain=dict(type="str"), + validate_peer_certificate=dict(type="bool"), + ), + required=True, + ), + state=dict(type="str", default="merged", choices=["merged", "replaced", "overridden", "deleted"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + nd = NDModule(module) + desired_clusters_from_module_input = nd.params.get("config") + state = nd.params.get("state") + + desired_clusters_map = {} + for cluster_data_input in desired_clusters_from_module_input: + normalized_payload = module_input_to_api_payload(cluster_data_input) + desired_clusters_map[get_cluster_unique_key(normalized_payload)] = normalized_payload + + existing_clusters_raw = nd.query_objs("/api/v1/infra/clusters", key="clusters") + + existing_clusters_map = {} + for cluster_data_raw in existing_clusters_raw: + # normalized_payload = convert_api_response_to_payload_format(cluster_data_raw) + existing_clusters_map[get_cluster_unique_key(cluster_data_raw)] = cluster_data_raw + + callbacks = { + "update_callback": update_cluster, + "create_callback": create_cluster, + "delete_callback": delete_cluster, + } + + nd.manage_state( + state, + desired_clusters_map, + existing_clusters_map, + callbacks, + unwanted_keys=[ + ["spec", "credentials"], + ["spec", "aci", "name"], + ["spec", "aci", "telemetry"], + ["spec", "aci", "orchestration"], + ["spec", "aci", "licenseTier"], + ], + ) + + nd.exit_json() + + +if __name__ == "__main__": + main() diff --git a/tests/integration/inventory.networking b/tests/integration/inventory.networking index 6b37d8f..5cf0b3b 100644 --- a/tests/integration/inventory.networking +++ b/tests/integration/inventory.networking @@ -29,3 +29,10 @@ external_data_service_ip= data_ip= data_gateway= service_package_host=173.36.219.254 +nd_cluster_host= +nd_cluster_username= +nd_cluster_password= +aci_cluster_host= +aci_cluster_host2= +aci_cluster_username= +aci_cluster_password= diff --git a/tests/integration/targets/nd_multi_cluster_connectivity/tasks/main.yml b/tests/integration/targets/nd_multi_cluster_connectivity/tasks/main.yml new file mode 100644 index 0000000..41fcb98 --- /dev/null +++ b/tests/integration/targets/nd_multi_cluster_connectivity/tasks/main.yml @@ -0,0 +1,177 @@ +# Test code for the ND modules +# Copyright: (c) 2025, Shreyas Srish (@shrsr) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Configure clusters in check mode + cisco.nd.nd_multi_cluster_connectivity: &cm_add_clusters + output_level: debug + config: + # - cluster_type: nd + # cluster_hostname: "{{ nd_cluster_host }}" + # cluster_username: "{{ nd_cluster_username }}" + # cluster_password: "{{ nd_cluster_password }}" + - cluster_type: apic + fabric_name: ansible_test_2 + cluster_hostname: "{{ aci_cluster_host }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + license_tier: advantage + # features: + # - orchestration + # inband_epg: ansible-inband + - cluster_type: apic + fabric_name: ansible_test + cluster_hostname: "{{ aci_cluster_host2 }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + license_tier: advantage + # features: + # - orchestration + # inband_epg: ansible-inband + state: merged + check_mode: true + register: cm_connect_cluster_merged + +- name: Connect clusters in normal mode + cisco.nd.nd_multi_cluster_connectivity: + <<: *cm_add_clusters + register: nm_connect_cluster_merged + +- name: Connect clusters again + cisco.nd.nd_multi_cluster_connectivity: + <<: *cm_add_clusters + register: nm_connect_cluster_merged_again + +- name: Assert merged connect cluster + ansible.builtin.assert: + that: + - cm_connect_cluster_merged is changed + - cm_connect_cluster_merged.before["173.36.219.189"] == {} + - cm_connect_cluster_merged.before["173.36.219.190"] == {} + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["clusterType"] == "APIC" + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["licenseTier"] == "advantage" + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["name"] == "ansible_test_2" + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - cm_connect_cluster_merged.after["173.36.219.189"]["spec"]["onboardUrl"] == "173.36.219.189" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["clusterType"] == "APIC" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["licenseTier"] == "advantage" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["name"] == "ansible_test" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]['orchestration']["status"] == "disabled" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - cm_connect_cluster_merged.after["173.36.219.190"]["spec"]["onboardUrl"] == "173.36.219.190" + - nm_connect_cluster_merged is changed + - nm_connect_cluster_merged.before["173.36.219.189"] == {} + - nm_connect_cluster_merged.before["173.36.219.190"] == {} + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["clusterType"] == "APIC" + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["licenseTier"] == "advantage" + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["name"] == "ansible_test_2" + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - nm_connect_cluster_merged.after["173.36.219.189"]["spec"]["onboardUrl"] == "173.36.219.189" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["clusterType"] == "APIC" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["licenseTier"] == "advantage" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["name"] == "ansible_test" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - nm_connect_cluster_merged.after["173.36.219.190"]["spec"]["onboardUrl"] == "173.36.219.190" + - nm_connect_cluster_merged_again is not changed + - nm_connect_cluster_merged_again.before == nm_connect_cluster_merged_again.after + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["clusterType"] == "APIC" + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["aci"]["licenseTier"] == "advantage" + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["name"] == "ansible_test_2" + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - nm_connect_cluster_merged_again.before["173.36.219.189"]["spec"]["onboardUrl"] == "173.36.219.189" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["clusterType"] == "APIC" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["aci"]["licenseTier"] == "advantage" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["name"] == "ansible_test" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - nm_connect_cluster_merged_again.before["173.36.219.190"]["spec"]["onboardUrl"] == "173.36.219.190" + +- name: Replace clusters in normal mode + cisco.nd.nd_multi_cluster_connectivity: + output_level: debug + config: + # - cluster_type: nd + # cluster_hostname: "{{ nd_cluster_host }}" + # cluster_username: "{{ nd_cluster_username }}" + # cluster_password: "{{ nd_cluster_password }}" + - cluster_type: apic + fabric_name: ansible_test_2 + cluster_hostname: "{{ aci_cluster_host }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + license_tier: essentials + # features: + # - orchestration + #inband_epg: ansible-inband + - cluster_type: apic + fabric_name: ansible_test + cluster_hostname: "{{ aci_cluster_host2 }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + license_tier: essentials + # features: + # - orchestration + #inband_epg: ansible-inband + state: replaced + register: nm_connect_cluster_replaced + +- name: Assert replaced connect cluster + ansible.builtin.assert: + that: + - nm_connect_cluster_replaced is not changed + - nm_connect_cluster_replaced.before == nm_connect_cluster_replaced.after + +- name: Delete clusters in check mode + cisco.nd.nd_multi_cluster_connectivity: &cm_remove_clusters + output_level: debug + config: + # - cluster_hostname: "{{ nd_cluster_host }}" + - cluster_hostname: "{{ aci_cluster_host }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + - cluster_hostname: "{{ aci_cluster_host2 }}" + cluster_username: "{{ aci_cluster_username }}" + cluster_password: "{{ aci_cluster_password }}" + state: deleted + check_mode: true + register: cm_remove_clusters + +- name: Delete clusters in normal mode + cisco.nd.nd_multi_cluster_connectivity: + <<: *cm_remove_clusters + register: nm_remove_clusters + +- name: Delete clusters in normal mode again + cisco.nd.nd_multi_cluster_connectivity: + <<: *cm_remove_clusters + register: nm_remove_clusters_again + +- name: Assert deletion of connected clusters + ansible.builtin.assert: + that: + - cm_remove_clusters is changed + - cm_remove_clusters.after["173.36.219.189"] == {} + - cm_remove_clusters.after["173.36.219.190"] == {} + - cm_remove_clusters.before["173.36.219.189"]["spec"]["clusterType"] == "APIC" + - cm_remove_clusters.before["173.36.219.189"]["spec"]["aci"]["licenseTier"] == "advantage" + - cm_remove_clusters.before["173.36.219.189"]["spec"]["name"] == "ansible_test_2" + - cm_remove_clusters.before["173.36.219.189"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - cm_remove_clusters.before["173.36.219.189"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - cm_remove_clusters.before["173.36.219.189"]["spec"]["onboardUrl"] == "173.36.219.189" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["clusterType"] == "APIC" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["aci"]["licenseTier"] == "advantage" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["name"] == "ansible_test" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["aci"]["orchestration"]["status"] == "disabled" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["aci"]["telemetry"]["status"] == "disabled" + - cm_remove_clusters.before["173.36.219.190"]["spec"]["onboardUrl"] == "173.36.219.190" + - nm_remove_clusters is changed + - cm_remove_clusters.before == nm_remove_clusters.before + - nm_remove_clusters.after["173.36.219.189"] == {} + - nm_remove_clusters.after["173.36.219.190"] == {} + - nm_remove_clusters_again is not changed + - nm_remove_clusters_again.before == nm_remove_clusters_again.after == {}