From de43b4cc681ee6084bc4370b4f76e8dd6f4fe6f6 Mon Sep 17 00:00:00 2001 From: Enrique Estrada Date: Mon, 12 Jan 2026 15:23:42 -0600 Subject: [PATCH 01/14] issue NewValidation: CSCwi17652 check for service-ep flag Fixes #294 fixed --- aci-preupgrade-validation-script.py | 71 + admin@10.31.125.151 | 5880 +++++++++++++++++ docs/docs/validations.md | 17 + .../test_service-ep_flag_bd_check.py | 76 + .../vnsLIfCtx-na.json | 1 + .../vnsLIfCtx-neg.json | 173 + .../vnsLIfCtx-pos.json | 311 + 7 files changed, 6529 insertions(+) create mode 100644 admin@10.31.125.151 create mode 100644 tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py create mode 100644 tests/service-ep_flag_bd_check/vnsLIfCtx-na.json create mode 100644 tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json create mode 100644 tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index b57821c..a6aaebd 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -5460,6 +5460,76 @@ def isis_database_byte_check(tversion, **kwargs): return Result(result=NA, msg=VER_NOT_AFFECTED) return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) +@check_wrapper(check_title='Service-EP Flag in BD without PBR') +def service_ep_flag_bd_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Tenant ", "Bridge Domain ", "Service Graph Device", "Device Node Name" ] + data = [] + unformatted_headers = ["DN of vnsLIfCtx"] + unformatted_data = [] + recommended_action = ( + "\n\tConfirm that within these BDs the PBR configuration is complete." + "\n\tPlease check the reference document for details." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#service-ep-flag-in-bd-without-pbr" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if ( + # Older versions not affected + cversion.older_than("5.2(5c)") and tversion.older_than("5.2(5c)") + ) or ( + # Current version not affected target version fixed + cversion.older_than("5.2(5b)") and tversion.newer_than("6.0(8e)") + ) or ( + # Current version and target version fixed + cversion.newer_than("6.0(8e)") and tversion.newer_than("6.0(8e)") + ): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + bd_dn_regex = r"uni/tn-(?P[^/]+)/BD-(?P[^/]+)" + + sg_regex = r"uni/tn-(?P[^/]+)/" + sg_regex += r"ldevCtx-c-(?P[^-][^g]+)" + sg_regex += r"-g-(?P[^-][^n]+)" + sg_regex += r"-n-(?P[^/]+)/" + sg_regex += r"lIfCtx-c-(?P.+)" + + # pbr_regex = r"uni/tn-(?P[^/]+)/" + # pbr_regex += r"svcCont/svcRedirectPol-(?P.+)" + + vnsLIfCtx_api = "vnsLIfCtx.json" + vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" + vnsLIfCtxs = icurl("class", vnsLIfCtx_api) + + for vnsLIfCtx in vnsLIfCtxs: + if ("vnsRsLIfCtxToSvcRedirectPol" not in vnsLIfCtx["vnsLIfCtx"]["children"][0]): + # vnsRsLIfCtxToSvcRedirectPol missing, + sg_graph_name = re.search(sg_regex, vnsLIfCtx["vnsLIfCtx"]["attributes"]["dn"]) + result = FAIL_O + for child in vnsLIfCtx["vnsLIfCtx"]["children"]: + if "vnsRsLIfCtxToBD" in child: + bd_name = re.search(bd_dn_regex, child["vnsRsLIfCtxToBD"]["attributes"]["tDn"]) + if sg_graph_name and bd_name: + data.append([ + sg_graph_name.group("sg_tn"), + bd_name.group("bd"), + sg_graph_name.group("ldev_graph"), + sg_graph_name.group("ldev_node") + ]) + break + + if unformatted_data: + result = MANUAL + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) # Subprocess check - cat + acidiag @check_wrapper(check_title='APIC Database Size') @@ -5719,6 +5789,7 @@ def get_checks(api_only, debug_function): standby_sup_sync_check, isis_database_byte_check, configpush_shard_check, + service_ep_flag_bd_check, ] conn_checks = [ diff --git a/admin@10.31.125.151 b/admin@10.31.125.151 new file mode 100644 index 0000000..7cec59d --- /dev/null +++ b/admin@10.31.125.151 @@ -0,0 +1,5880 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright 2021 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import division +from __future__ import print_function +from six import iteritems, text_type +from six.moves import input +from textwrap import TextWrapper +from getpass import getpass +from collections import defaultdict +from datetime import datetime +from argparse import ArgumentParser +from itertools import chain +import functools +import shutil +import warnings +import time +import pexpect +import logging +import subprocess +import json +import sys +import os +import re + +SCRIPT_VERSION = "v3.2.0" +# result constants +DONE = 'DONE' +PASS = 'PASS' +FAIL_O = 'FAIL - OUTAGE WARNING!!' +FAIL_UF = 'FAIL - UPGRADE FAILURE!!' +ERROR = 'ERROR !!' +MANUAL = 'MANUAL CHECK REQUIRED' +POST = 'POST UPGRADE CHECK REQUIRED' +NA = 'N/A' +# message constants +TVER_MISSING = "Target version not supplied. Skipping." +VER_NOT_AFFECTED = "Version not affected." +# regex constants +node_regex = r'topology/pod-(?P\d+)/node-(?P\d+)' +port_regex = node_regex + r'/sys/phys-\[(?P.+)\]' +path_regex = ( + r"topology/pod-(?P\d+)/" + r"(?:prot)?paths-(?P\d+|\d+-\d+)/" # direct or PC/vPC + r"(?:ext(?:prot)?paths-(?P\d+|\d+-\d+)/)?" # FEX (optional) + r"pathep-\[(?P.+)\]" # ethX/Y or PC/vPC IFPG name +) +dom_regex = r"uni/(?:vmmp-[^/]+/)?(?Pphys|l2dom|l3dom|dom)-(?P[^/]+)" + +tz = time.strftime('%z') +ts = datetime.now().strftime('%Y-%m-%dT%H-%M-%S') +BUNDLE_NAME = 'preupgrade_validator_%s%s.tgz' % (ts, tz) +DIR = 'preupgrade_validator_logs/' +JSON_DIR = DIR + 'json_results/' +META_FILE = DIR + 'meta.json' +RESULT_FILE = DIR + 'preupgrade_validator_%s%s.txt' % (ts, tz) +SUMMARY_FILE = DIR + 'summary.json' +LOG_FILE = DIR + 'preupgrade_validator_debug.log' +warnings.simplefilter(action='ignore', category=FutureWarning) + +log = logging.getLogger() + + +class OldVerClassNotFound(Exception): + """ Later versions of ACI can have class properties not found in older versions """ + pass + + +class OldVerPropNotFound(Exception): + """ Later versions of ACI can have class properties not found in older versions """ + pass + + +class Connection(object): + """ + Object built primarily for executing commands on Cisco IOS/NXOS devices. The following + methods and variables are available for use in this class: + + username (opt) username credential (default 'admin') + password (opt) password credential (default 'cisco') + protocol (opt) telnet/ssh option (default 'ssh') + port (opt) port to connect on (if different from telnet/ssh default) + timeout (opt) wait in seconds between each command (default 30) + prompt (opt) prompt to expect after each command (default for IOS/NXOS) + log (opt) logfile (default None) + verify (opt) verify/enforce strictHostKey values for SSL (disabled by default) + searchwindowsize (opt) maximum amount of data used in matching expressions + extremely important to set to a low value for large outputs + pexpect default = None, setting this class default=256 + force_wait (opt) some OS ignore searchwindowsize and therefore still experience high + CPU and long wait time for commands with large outputs to complete. + A workaround is to sleep the script instead of running regex checking + for prompt character. + This should only be used in those unique scenarios... + Default is 0 seconds (disabled). If needed, set to 8 (seconds) + + functions: + connect() (opt) connect to device with provided protocol/port/hostname + login() (opt) log into device with provided credentials + close() (opt) close current connection + cmd() execute a command on the device (provide matches and timeout) + + Example using all defaults + c = Connection("10.122.140.89") + c.cmd("terminal length 0") + c.cmd("show version") + print "version of code: %s" % c.output + + @author agossett@cisco.com + @version 07/28/2014 + """ + + def __init__(self, hostname): + self.hostname = hostname + self.log = None + self.username = 'admin' + self.password = 'cisco' + self.protocol = "ssh" + self.port = None + self.timeout = 30 + self.prompt = r"#\s.*$" + self.verify = False + self.searchwindowsize = 256 + self.force_wait = 0 + self.child = None + self.output = "" # output from last command + self._term_len = 0 # terminal length for cisco devices + self._login = False # set to true at first successful login + self._log = None # private variable for tracking logfile state + + def __connected(self): + # determine if a connection is already open + connected = (self.child is not None and self.child.isatty()) + log.debug("check for valid connection: %r" % connected) + return connected + + @property + def term_len(self): + return self._term_len + + @term_len.setter + def term_len(self, term_len): + self._term_len = int(term_len) + if (not self.__connected()) or (not self._login): + # login function will set the terminal length + self.login() + else: + # user changing terminal length during operation, need to explicitly + self.cmd("terminal length %s" % self._term_len) + + def start_log(self): + """ start or restart sending output to logfile """ + if self.log is not None and self._log is None: + # if self.log is a string, then attempt to open file pointer (do not catch exception, we want it + # to die if there's an error opening the logfile) + if isinstance(self.log, str) or isinstance(self.log, text_type): + self._log = open(self.log, "ab") + else: + self._log = self.log + log.debug("setting logfile to %s" % self._log.name) + if self.child is not None: + self.child.logfile = self._log + + def stop_log(self): + """ stop sending output to logfile """ + self.child.logfile = None + self._log = None + return + + def connect(self): + # close any currently open connections + self.close() + + # determine port if not explicitly set + if self.port is None: + if self.protocol == "ssh": + self.port = 22 + if self.protocol == "telnet": + self.port = 23 + # spawn new thread + if self.protocol.lower() == "ssh": + log.debug( + "spawning new pexpect connection: ssh %s@%s -p %d" % (self.username, self.hostname, self.port)) + no_verify = " -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null" + if self.verify: no_verify = "" + self.child = pexpect.spawn("ssh %s %s@%s -p %d" % (no_verify, self.username, self.hostname, self.port), + searchwindowsize=self.searchwindowsize) + elif self.protocol.lower() == "telnet": + log.info("spawning new pexpect connection: telnet %s %d" % (self.hostname, self.port)) + self.child = pexpect.spawn("telnet %s %d" % (self.hostname, self.port), + searchwindowsize=self.searchwindowsize) + else: + log.error("unknown protocol %s" % self.protocol) + raise Exception("Unsupported protocol: %s" % self.protocol) + + # start logging + self.start_log() + + def close(self): + # try to gracefully close the connection if opened + if self.__connected(): + log.info("closing current connection") + self.child.close() + self.child = None + self._login = False + + def __expect(self, matches, timeout=None): + """ + receives a dictionary 'matches' and returns the name of the matched item + instead of relying on the index into a list of matches. Automatically + adds following options if not already present + "eof" : pexpect.EOF + "timeout" : pexpect.TIMEOUT + """ + + if "eof" not in matches: + matches["eof"] = pexpect.EOF + if "timeout" not in matches: + matches["timeout"] = pexpect.TIMEOUT + + if timeout is None: timeout = self.timeout + indexed = [] + mapping = [] + for i in matches: + indexed.append(matches[i]) + mapping.append(i) + result = self.child.expect(indexed, timeout) + log.debug("timeout: %d, matched: '%s'\npexpect output: '%s%s'" % ( + timeout, self.child.after, self.child.before, self.child.after)) + if result <= len(mapping) and result >= 0: + log.debug("expect matched result[%d] = %s" % (result, mapping[result])) + return mapping[result] + ds = '' + log.error("unexpected pexpect return index: %s" % result) + for i in range(0, len(mapping)): + ds += '[%d] %s\n' % (i, mapping[i]) + log.debug("mapping:\n%s" % ds) + raise Exception("Unexpected pexpect return index: %s" % result) + + def login(self, max_attempts=7, timeout=17): + """ + returns true on successful login, else returns false + """ + + log.debug("Logging into host") + + # successfully logged in at a different time + if not self.__connected(): self.connect() + # check for user provided 'prompt' which indicates successful login + # else provide approriate username/password + matches = { + "console": "(?i)press return to get started", + "refuse": "(?i)connection refused", + "yes/no": "(?i)yes/no", + "username": "(?i)(user(name)*|login)[ as]*[ \t]*:[ \t]*$", + "password": "(?i)password[ \t]*:[ \t]*$", + "prompt": self.prompt + } + + while max_attempts > 0: + max_attempts -= 1 + match = self.__expect(matches, timeout) + if match == "console": # press return to get started + log.debug("matched console, send enter") + self.child.sendline("\r\n") + elif match == "refuse": # connection refused + log.error("connection refused by host") + return False + elif match == "yes/no": # yes/no for SSH key acceptance + log.debug("received yes/no prompt, send yes") + self.child.sendline("yes") + elif match == "username": # username/login prompt + log.debug("received username prompt, send username") + self.child.sendline(self.username) + elif match == "password": + # don't log passwords to the logfile + self.stop_log() + log.debug("matched password prompt, send password") + self.child.sendline(self.password) + # restart logging + self.start_log() + elif match == "prompt": + log.debug("successful login") + self._login = True + # force terminal length at login + self.term_len = self._term_len + return True + elif match == "timeout": + log.debug("timeout received but connection still opened, send enter") + self.child.sendline("\r\n") + # did not find prompt within max attempts, failed login + log.error("failed to login after multiple attempts") + return False + + def cmd(self, command, **kargs): + """ + execute a command on a device and wait for one of the provided matches to return. + Required argument string command + Optional arguments: + timeout - seconds to wait for command to completed (default to self.timeout) + sendline - boolean flag to use send or sendline fuction (default to true) + matches - dictionary of key/regex to match against. Key corresponding to matched + regex will be returned. By default, the following three keys/regex are applied: + 'eof' : pexpect.EOF + 'timeout' : pexpect.TIMEOUT + 'prompt' : self.prompt + echo_cmd - boolean flag to echo commands sent (default to false) + note most terminals (i.e., Cisco devices) will echo back all typed characters + by default. Therefore, enabling echo_cmd may cause duplicate cmd characters + Return: + returns the key from the matched regex. For most scenarios, this will be 'prompt'. The output + from the command can be collected from self.output variable + """ + + sendline = True + timeout = self.timeout + matches = {} + echo_cmd = False + if "timeout" in kargs: + timeout = kargs["timeout"] + if "matches" in kargs: + matches = kargs["matches"] + if "sendline" in kargs: + sendline = kargs["sendline"] + if "echo_cmd" in kargs: + echo_cmd = kargs["echo_cmd"] + + # ensure prompt is in the matches list + if "prompt" not in matches: + matches["prompt"] = self.prompt + + self.output = "" + # check if we've ever logged into device or currently connected + if (not self.__connected()) or (not self._login): + log.debug("no active connection, attempt to login") + if not self.login(): + raise Exception("failed to login to host") + + # if echo_cmd is disabled, then need to disable logging before + # executing commands + if not echo_cmd: self.stop_log() + + # execute command + log.debug("cmd command: %s" % command) + if sendline: + self.child.sendline(command) + else: + self.child.send(command) + + # remember to re-enable logging + if not echo_cmd: self.start_log() + + # force wait option + if self.force_wait != 0: + time.sleep(self.force_wait) + + result = self.__expect(matches, timeout) + self.output = "%s%s" % (self.child.before.decode("utf-8"), self.child.after.decode("utf-8")) + if result == "eof" or result == "timeout": + log.warning("unexpected %s occurred" % result) + return result + + +class IPAddress: + """Custom IP handling class since old APICs do not have `ipaddress` module. + """ + @classmethod + def ip_to_binary(cls, ip): + if ':' in ip: + return cls.ipv6_to_binary(ip) + else: + return cls.ipv4_to_binary(ip) + + @staticmethod + def ipv4_to_binary(ipv4): + octets = ipv4.split(".") + octets_bin = [format(int(octet), "08b") for octet in octets] + return "".join(octets_bin) + + @staticmethod + def ipv6_to_binary(ipv6): + HEXTET_COUNT = 8 + _hextets = ipv6.split(":") + dbl_colon_index = None + if '' in _hextets: + # leading/trailing '::' results in additional '' at the beginning/end. + if _hextets[0] == '': + _hextets = _hextets[1:] + if _hextets[-1] == '': + _hextets = _hextets[:-1] + # Uncompress all zero hextets represented by '::' + dbl_colon_index = _hextets.index('') + skipped_hextets = HEXTET_COUNT - len(_hextets) + 1 + hextets = _hextets[:dbl_colon_index] + hextets += ['0'] * skipped_hextets + hextets += _hextets[dbl_colon_index+1:] + else: + hextets = _hextets + hextets_bin = [format(int(hextet, 16), "016b") for hextet in hextets] + return "".join(hextets_bin) + + @classmethod + def get_network_binary(cls, ip, pfxlen): + maxlen = 128 if ':' in ip else 32 + ip_bin = cls.ip_to_binary(ip) + return ip_bin[0:maxlen-(maxlen-int(pfxlen))] + + @classmethod + def ip_in_subnet(cls, ip, subnet): + if "/" in ip: + raise ValueError( + "IP address {} should not have a subnet mask".format(ip) + ) + if "/" not in subnet: + return False + subnet_ip, subnet_pfxlen = subnet.split("/") + subnet_network = cls.get_network_binary(subnet_ip, subnet_pfxlen) + ip_network = cls.get_network_binary(ip, subnet_pfxlen) + return ip_network == subnet_network + + +class AciVersion(): + """ + ACI Version parser class. Parses the version string and provides methods to compare versions. + Supported version formats: + - APIC: `5.2(7f)`, `5.2.7f`, `5.2(7.123a)`, `5.2.7.123a`, `5.2(7.123)`, `5.2.7.123`, `aci-apic-dk9.5.2.7f.iso/bin` + - Switch: `15.2(7f)`, `15.2.7f`, `15.2(7.123a)`, `15.2.7.123a`, `15.2(7.123)`, `15.2.7.123`, `aci-n9000-dk9.15.2.7f.bin` + """ + v_regex = r'(?:dk9\.)?[1]?(?P\d)\.(?P\d)(?:\.|\()(?P\d+)(?P\.?)(?P(?:[a-z]|\d+))(?P[a-z]?)\)?' + + def __init__(self, version): + self.original = version + v = re.search(self.v_regex, version) + if not v: + raise ValueError("Parsing failure of ACI version `%s`" % version) + self.version = "{major1}.{major2}({maint}{QAdot}{patch1}{patch2})".format(**v.groupdict()) + self.dot_version = "{major1}.{major2}.{maint}{QAdot}{patch1}{patch2}".format(**v.groupdict()) + self.simple_version = "{major1}.{major2}({maint})".format(**v.groupdict()) + self.major_version = "{major1}.{major2}".format(**v.groupdict()) + self.major1 = v.group("major1") + self.major2 = v.group("major2") + self.maint = v.group("maint") + self.patch1 = v.group("patch1") + self.patch2 = v.group("patch2") + self.regex = v + + def __str__(self): + return self.version + + def older_than(self, version): + v2 = version if isinstance(version, AciVersion) else AciVersion(version) + for key in ["major1", "major2", "maint"]: + if int(self.regex.group(key)) > int(v2.regex.group(key)): return False + elif int(self.regex.group(key)) < int(v2.regex.group(key)): return True + # Patch1 can be alphabet or number + if self.patch1.isalpha() and v2.patch1.isdigit(): + return True # e.g., 5.2(7f) is older than 5.2(7.123) + elif self.patch1.isdigit() and v2.patch1.isalpha(): + return False + elif self.patch1.isalpha() and v2.patch1.isalpha(): + if self.patch1 > v2.patch1: return False + elif self.patch1 < v2.patch1: return True + elif self.patch1.isdigit() and v2.patch1.isdigit(): + if int(self.patch1) > int(v2.patch1): return False + elif int(self.patch1) < int(v2.patch1): return True + # Patch2 (alphabet) is optional. + if not self.patch2 and v2.patch2: + return True # one without Patch2 is older. + elif self.patch2 and not v2.patch2: + return False + elif self.patch2 and v2.patch2: + if self.patch2 > v2.patch2: return False + elif self.patch2 < v2.patch2: return True + return False + + def newer_than(self, version): + return not self.older_than(version) and not self.same_as(version) + + def same_as(self, version): + v2 = version if isinstance(version, AciVersion) else AciVersion(version) + return self.version == v2.version + + +class AciObjectCrawler(object): + """ + Args: + mos (list of dict): MOs in the form of output from the function `icurl()` with + the filter `query-target` that returns a flat list. + """ + + def __init__(self, mos): + self.mos = mos + self.mos_per_class = defaultdict(list) + + self.init_mos_per_class() + + def init_mos_per_class(self): + """ + Create `self.mos_per_class` (dict) which stores lists of MOs per class. + """ + for mo in self.mos: + classname = list(mo.keys())[0] + _mo = {"classname": classname} + _mo.update(mo[classname]["attributes"]) + self.mos_per_class[classname].append(_mo) + + def get_mos(self, classname): + return self.mos_per_class.get(classname, []) + + def get_children(self, parent_dn, children_class): + """ + Args: + parent_dn (str): DN of the parent MO. + children_class (str): Class name of the (grand) children under parent_dn. + Returns: + list of dict: The MOs of children_class under parent_dn. + """ + mos = self.get_mos(children_class) + return [mo for mo in mos if mo["dn"].startswith(parent_dn + "/")] + + def get_parent(self, child_dn, parent_class): + """ + Args: + child_dn (str): DN of the child MO. + parent_class (str): Class name of the (grand) parent of child_dn. + Returns: + dict: The parent MO of child_dn. + """ + mos = self.get_mos(parent_class) + for mo in mos: + if child_dn.startswith(mo["dn"] + "/"): + return mo + return {} + + def get_rel_targets(self, src_dn, rel_class): + """ + Args: + src_dn (str): DN of the source object. + rel_class (str): Relation class with tDn/tCl. Children of src_dn + Returns: + list of dict: MOs that are pointed by tDn from src_dn + """ + targets = [] + rel_mos = self.get_children(src_dn, rel_class) + for rel_mo in rel_mos: + mos = self.get_mos(rel_mo["tCl"]) + for mo in mos: + if mo["dn"] == rel_mo["tDn"]: + targets.append(mo) + break + else: + # The target objects may not be in our self.mos_per_class. + # In that case, just return the DN and class. + targets.append({"dn": rel_mo["tDn"], "classname": rel_mo["tCl"]}) + return targets + + def get_src_from_tDn(self, tDn, rs_class, src_class): + """ + Args: + tDn (str): Target DN. Get all MOs with this DN as the target via rs_class. + rs_class (str): Relation class. + src_class (str): Class name of source MOs that may have tDn as the target + via rs_class. + Returns: + list of dict: MOs that point to tDn via rs_class. + """ + src_mos = [] + rs_mos = self.get_mos(rs_class) + for rs_mo in rs_mos: + if rs_mo["tDn"] == tDn: + src_mo = self.get_parent(rs_mo["dn"], src_class) + if src_mo: + src_mos.append(src_mo) + return src_mos + + +class AciAccessPolicyParser(AciObjectCrawler): + """ + port_data: + key: port_path in the format shown below: + `/eth/` + `//eth/` + `/` + `//` + value: { + "ifpg": Name of IFPG + "override_ifpg": Name of override IFPG. Skipped if not override + "pc_type": none|pc|vpc. From the IFPG + "aep": Name of AEP + "domain_dns": List of domain DNs associated to the AEP + "vlan_scope": global or portlocal. From the IFPG + "node": Node ID + "fex": Fex ID or 0 + "port": ethX/Y, ethX/Y/Z, IFPG name + } + vpool_per_dom: + key: domain DN + value: { + "name": Name of VLAN Pool + "vlan_ids": List of VLAN IDs. ex) [1,2,3,100,101] + "dom_name": Name of domain + "dom_type": Type of domain (phys, l3dom, vmm) + } + """ + # VLAN Pool + VLANPool = "fvnsVlanInstP" + VLANBlk = "fvnsEncapBlk" + # AEP + AEP = "infraAttEntityP" + # Leaf Interface Profile etc. + IFP = "infraAccPortP" + IFSel = "infraHPortS" + PortBlk = "infraPortBlk" + SubPortBlk = "infraSubPortBlk" # breakout + IFPath = "infraHPathS" # override + # Leaf Switch Profile etc. + SWP = "infraNodeP" + SWSel = "infraLeafS" + NodeBlk = "infraNodeBlk" + # FEX + FEXP = "infraFexP" + FEXPG = "infraFexBndlGrp" + + # Leaf Interface Policy Group etc. + IFPG = "infraAccPortGrp" + IFPG_PC = "infraAccBndlGrp" + IFPG_PC_O = "infraAccBndlPolGrp" # override (PC/VPC PG) + + # Leaf Interface Policy + IFPol_L2 = "l2IfPol" + + # Relation objects (_to_) + VLAN_to_Dom = "fvnsRtVlanNs" + AEP_to_Dom = "infraRsDomP" + IFPG_to_AEP = "infraRsAttEntP" + IFSel_to_IFPG = "infraRsAccBaseGrp" + IFPath_to_IFPG = "infraRsPathToAccBaseGrp" # override + IFPath_to_Path = "infraRsHPathAtt" # override + SWP_to_IFP = "infraRsAccPortP" + IFPol_L2_to_IFPG = "l2RtL2IfPol" + + def __init__(self, mos): + super(AciAccessPolicyParser, self).__init__(mos) + self.nodes_per_ifp = defaultdict(list) + self.port_data = defaultdict(dict) + self.vpool_per_dom = defaultdict(dict) + + self.create_port_data() + self.create_vlanpool_per_domain() + + @classmethod + def get_classes(cls): + """Get all ACI object classes used in this class""" + classes = [] + for key, val in iteritems(AciAccessPolicyParser.__dict__): + if key.startswith("__") or not isinstance(val, str): + continue + classes.append(val) + return classes + + def get_node_ids_from_ifp(self, ifp_dn): + if ifp_dn in self.nodes_per_ifp: + return self.nodes_per_ifp[ifp_dn] + node_ids = [] + swps = self.get_src_from_tDn(ifp_dn, self.SWP_to_IFP, self.SWP) + for swp in swps: + swsels = self.get_children(swp["dn"], self.SWSel) + for swsel in swsels: + node_blks = self.get_children(swsel["dn"], self.NodeBlk) + for node_blk in node_blks: + _from = int(node_blk["from_"]) + _to = int(node_blk["to_"]) + node_ids += range(_from, _to + 1) + self.nodes_per_ifp[ifp_dn] = node_ids + return node_ids + + def get_node_ids_from_ifsel(self, ifsel_dn): + ifp = self.get_parent(ifsel_dn, self.IFP) + if not ifp: + log.warning("No I/F Profile for Selector (%s)", ifsel_dn) + return [] + node_ids = self.get_node_ids_from_ifp(ifp["dn"]) + return node_ids + + def get_fex_id_from_ifsel(self, ifsel_dn): + """Get FEX ID if ifsel is FEX NIF""" + fex_id = 0 + rs_ifpgs = self.get_children(ifsel_dn, self.IFSel_to_IFPG) + if rs_ifpgs and rs_ifpgs[0]["tCl"] == "infraFexBndlGrp": + fex_id = int(rs_ifpgs[0]["fexId"]) + return fex_id + + def get_fexnif_ifsels_from_fexhif(self, hif_ifsel_dn): + """ + Get FEX NIF I/F selectors from a FEX HIF I/F Selector + """ + # 1. Get FEXPG from FEX HIF IFSel via the parent (FEXP). + # FEXP -+- IFSel (FEX HIF) + # +- FEXPG + fexp = self.get_parent(hif_ifsel_dn, self.FEXP) + if not fexp: + return [] + fexpgs = self.get_children(fexp["dn"], self.FEXPG) + if not fexpgs: + return [] + # There should be only one FEXPG for each FEXP + fexpg = fexpgs[0] + # 2. Get FEX NIF IFSels from FEXPG via the relation. + # IFSel (FEX NIF) <--[IFSel_to_IFPG]-- FEXPG + fexnif_ifsels = self.get_src_from_tDn( + fexpg["dn"], self.IFSel_to_IFPG, self.IFSel + ) + return fexnif_ifsels + + def get_ports_from_ifsel(self, ifsel_dn): + ports = [] + port_blks = self.get_children(ifsel_dn, self.PortBlk) + subport_blks = self.get_children(ifsel_dn, self.SubPortBlk) + for port_blk in port_blks + subport_blks: + from_card = int(port_blk["fromCard"]) + from_port = int(port_blk["fromPort"]) + from_subport = int(port_blk["fromSubPort"]) if port_blk["classname"] == self.SubPortBlk else 0 + to_card = int(port_blk["toCard"]) + to_port = int(port_blk["toPort"]) + to_subport = int(port_blk["toSubPort"]) if port_blk["classname"] == self.SubPortBlk else 0 + for card in range(from_card, to_card + 1): + for port in range(from_port, to_port + 1): + for subport in range(from_subport, to_subport + 1): + if subport: + ports.append("eth{}/{}/{}".format(card, port, subport)) + else: + ports.append("eth{}/{}".format(card, port)) + return ports + + def create_port_data(self): + ifsels = self.get_mos(self.IFSel) + for ifsel in ifsels: + # GET Node IDs and FEX IDs + node2fexid = {} + if ifsel["dn"].startswith("uni/infra/fexprof-"): + # When ifsel is of FEX HIF, get node IDs and FEX IDs from FEX NIFs. + # ACI supports only single-homed FEXes with or without vPC. + # One FEX HIF can be tied to 2 nodes, one FEX for each, at maximum. + nifs = self.get_fexnif_ifsels_from_fexhif(ifsel["dn"]) + for nif in nifs: + _node_ids = self.get_node_ids_from_ifsel(nif["dn"]) + fex_id = self.get_fex_id_from_ifsel(nif["dn"]) + for _node_id in _node_ids: + node2fexid[_node_id] = fex_id + node_ids = node2fexid.keys() + if len(node_ids) > 2: + log.error( + "FEX HIF handling failed as it shows more than 2 nodes." + ) + break + else: + node_ids = self.get_node_ids_from_ifsel(ifsel["dn"]) + if not node_ids: + continue + + # Get IFPG + ifpgs = self.get_rel_targets(ifsel["dn"], self.IFSel_to_IFPG) + if not ifpgs: + continue + ifpg = ifpgs[0] + + # Get ports or use IFPG Name for PC/VPC + if ifpg.get("classname") == self.IFPG_PC and ifpg.get("name"): + ports = [ifpg["name"]] + else: + ports = self.get_ports_from_ifsel(ifsel["dn"]) + if not ports: + continue + + # Get settings from IFPG + pc_type = self.get_pc_type(ifpg) + + l2if = self.get_ifpol_l2if_from_ifpg(ifpg["dn"]) + vlan_scope = l2if.get("vlanScope", "unknown") + + # Get AEP from IFPG + aeps = self.get_rel_targets(ifpg.get("dn", ""), self.IFPG_to_AEP) + aep = aeps[0] if aeps else {} + # Get Domains from AEP + doms = self.get_rel_targets(aep.get("dn", ""), self.AEP_to_Dom) + + for node_id in node_ids: + fex_id = node2fexid.get(node_id, 0) + for port in ports: + if fex_id: + path = "/".join([str(node_id), str(fex_id), port]) + else: + path = "/".join([str(node_id), port]) + self.port_data[path] = { + "node": str(node_id), + "fex": str(fex_id), + "port": port, + "ifpg_name": ifpg.get("name", ""), + "pc_type": pc_type, + "vlan_scope": vlan_scope, + "aep_name": aep.get("name", ""), + "domain_dns": [dom["dn"] for dom in doms], + } + + # Override + ifpaths = self.get_mos(self.IFPath) + for ifpath in ifpaths: + # Get Node/FEX/Port ID + override_paths = self.get_children(ifpath["dn"], self.IFPath_to_Path) + if not override_paths: + continue + override_path = override_paths[0] + p = re.search(path_regex, override_path["tDn"]) + nodes = p.group("nodes").split("-") + fexes = p.group("fex").split("-") if p.group("fex") else [] + port = p.group("port") + + # Get IFPG + ifpgs = self.get_rel_targets(ifpath["dn"], self.IFPath_to_IFPG) + if not ifpgs: + continue + ifpg = ifpgs[0] + + # Get settings from IFPG + l2if = self.get_ifpol_l2if_from_ifpg(ifpg["dn"]) + vlan_scope = l2if.get("vlanScope", "unknown") + + # Get AEP from IFPG + aeps = self.get_rel_targets(ifpg.get("dn", ""), self.IFPG_to_AEP) + aep = aeps[0] if aeps else {} + # Get Domains from AEP + doms = self.get_rel_targets(aep.get("dn", ""), self.AEP_to_Dom) + + for idx, node in enumerate(nodes): + fex = "0" + if fexes: + fex = fexes[0] if len(fexes) == 1 else fexes[idx] + path = "/".join([node, fex, port]) + else: + path = "/".join([node, port]) + self.port_data[path].update({ + "node": node, + "fex": fex, + "port": port, + "override_ifpg_name": ifpg.get("name", ""), + "vlan_scope": vlan_scope, + "aep_name": aep.get("name", ""), + "domain_dns": [dom["dn"] for dom in doms], + }) + + def create_vlanpool_per_domain(self): + vlan_pools = self.get_mos(self.VLANPool) + for vlan_pool in vlan_pools: + vlan_ids = [] + vlan_blks = self.get_children(vlan_pool["dn"], self.VLANBlk) + for vlan_blk in vlan_blks: + vlan_ids += range( + int(vlan_blk["from"].split("-")[1]), + int(vlan_blk["to"].split("-")[1]) + 1, + ) + rs_domains = self.get_children(vlan_pool["dn"], self.VLAN_to_Dom) + for rs_domain in rs_domains: + dom_match = re.search(dom_regex, rs_domain["tDn"]) + dom_name = "..." if not dom_match else dom_match.group("dom") + dom_type = "..." if not dom_match else dom_match.group("type") + # No need to worry about overwrite because there can be + # only one VLAN pool per domain. + self.vpool_per_dom[rs_domain["tDn"]] = { + "name": vlan_pool["name"], + "vlan_ids": vlan_ids, + "dom_name": dom_name, + "dom_type": "vmm" if dom_type == "dom" else dom_type, + } + return self.vpool_per_dom + + def get_pc_type(self, ifpg): + pc_type = "none" + if ifpg.get("lagT") == "node": + pc_type = "vpc" + elif ifpg.get("lagT") in ["link", "fc-link"]: + pc_type = "pc" + return pc_type + + def get_ifpol_l2if_from_ifpg(self, ifpg_dn): + ifpol_l2s = self.get_src_from_tDn(ifpg_dn, self.IFPol_L2_to_IFPG, self.IFPol_L2) + return ifpol_l2s[0] if ifpol_l2s else {} + + +def is_firstver_gt_secondver(first_ver, second_ver): + """ Used for CIMC version comparison """ + result = False + if first_ver[0] > second_ver[0]: + return True + elif first_ver[0] == second_ver[0]: + if first_ver[2] > second_ver[2]: + return True + elif first_ver[2] == second_ver[2]: + if first_ver[4] > second_ver[4]: + return True + elif first_ver[4] == second_ver[4]: + if first_ver[5] >= second_ver[5]: + result = True + return result + + +class AciResult: + """ + APIC uses an object called `syntheticMaintPValidate` to store the results of + each rule/check in the pre-upgrade validation which runs during the upgrade + workflow in the APIC GUI. When this script is invoked during the workflow, it + is expected to write the results of each rule/check to a JSON file (one per rule) + in a specific format compliant with `syntheticMaintPValidate`. + """ + # Expected keys in the JSON file + __slots__ = ( + "ruleId", "name", "description", "reason", "sub_reason", "recommended_action", + "docUrl", "severity", "ruleStatus", "showValidation", "failureDetails", + ) + + # ruleStatus + IN_PROGRESS = "in-progress" + PASS = "passed" + FAIL = "failed" + + def __init__(self, func_name, name, description): + self.ruleId = func_name + self.name = name + self.description = description + self.reason = "" + self.sub_reason = "" + self.recommended_action = "" + self.docUrl = "" + self.severity = "informational" + self.ruleStatus = AciResult.IN_PROGRESS + self.showValidation = True + self.failureDetails = { + "failType": "", + "data": [], + "unformatted_data": [], + } + + @property + def filename(self): + return re.sub(r'[^a-zA-Z0-9_]+|\s+', '_', self.ruleId) + '.json' + + @staticmethod + def craftData(column, rows): + if not (isinstance(rows, list) and isinstance(column, list)): + raise TypeError("Rows and column must be lists.") + data = [] + c_len = len(column) + for row_entry in range(len(rows)): + r_len = len(rows[row_entry]) + if r_len != c_len: + raise ValueError("Row length ({}), data: {} does not match column length ({}).".format(r_len, rows[row_entry], c_len)) + entry = {} + for col_pos in range(c_len): + entry[column[col_pos]] = str(rows[row_entry][col_pos]) + data.append(entry) + return data + + def updateWithResults(self, result, recommended_action, msg, doc_url, headers, data, unformatted_headers, unformatted_data): + self.reason = msg + self.recommended_action = recommended_action + self.docUrl = doc_url + + # Show validation + if result in [NA, POST]: + self.showValidation = False + + # Severity + if result in [FAIL_O, FAIL_UF]: + self.severity = "critical" + elif result in [ERROR]: + self.severity = "major" + elif result in [MANUAL]: + self.severity = "warning" + + self.ruleStatus = AciResult.PASS + if result not in [NA, PASS]: + self.ruleStatus = AciResult.FAIL + if not self.reason: + self.reason = "See Failure Details" + self.failureDetails["failType"] = result + self.failureDetails["header"] = headers + self.failureDetails["data"] = self.craftData(headers, data) + if unformatted_headers and unformatted_data: + self.failureDetails["unformatted_data"] = self.craftData(unformatted_headers, unformatted_data) + if self.reason: + self.reason += "\n" + self.reason += ( + "Parse failure occurred, the provided data may not be complete. " + "Please contact Cisco TAC to identify the missing data." + ) + + def buildResult(self): + return {slot: getattr(self, slot) for slot in self.__slots__} + + def writeResult(self, path=JSON_DIR): + if not os.path.isdir(path): + os.mkdir(path) + with open(os.path.join(path, self.filename), "w") as f: + json.dump(self.buildResult(), f, indent=2) + return "{}/{}".format(path, self.filename) + + +class Result: + """Class to hold the result of a check.""" + __slots__ = ("result", "msg", "headers", "data", "unformatted_headers", "unformatted_data", "recommended_action", "doc_url", "adjust_title") + + def __init__(self, result=PASS, msg="", headers=None, data=None, unformatted_headers=None, unformatted_data=None, recommended_action="", doc_url="", adjust_title=False): + self.result = result + self.msg = msg + self.headers = headers if headers is not None else [] + self.data = data if data is not None else [] + self.unformatted_headers = unformatted_headers if unformatted_headers is not None else [] + self.unformatted_data = unformatted_data if unformatted_data is not None else [] + self.recommended_action = recommended_action + self.doc_url = doc_url + self.adjust_title = adjust_title + + def as_dict(self): + return {slot: getattr(self, slot) for slot in self.__slots__} + + def as_dict_for_json_result(self): + return {slot: getattr(self, slot) for slot in self.__slots__ if slot != "adjust_title"} + + +def check_wrapper(check_title): + """ + Decorator to wrap a check function to handle the printing of title and results, + and to write the results in a file in a JSON format. + """ + def decorator(check_func): + @functools.wraps(check_func) + def wrapper(index, total_checks, *args, **kwargs): + # When init is True, we just initialize the result file and return + if kwargs.get("init") is True: + synth = AciResult(wrapper.__name__, check_title, "") + synth.writeResult() + return None + + try: + # Print `[Check 1/81] ...` + print_title(check_title, index, total_checks) + + # Run check, expecting it to return a `Result` object + r = check_func(*args, **kwargs) + + # Print `[Check 1/81] <title>... <msg> <result>\n<failure details>` + print_result(title=check_title, **r.as_dict()) + except Exception as e: + log.exception(e) + r = Result(result=ERROR, msg='Unexpected Error: {}'.format(e)) + print_result(title=check_title, **r.as_dict()) + finally: + # Write results in JSON + # Using `wrapper.__name__` instead of `check_func.__name` because + # both show the original check func name and `wrapper.__name__` can + # be dynamically changed inside each check func if needed. (mainly + # for test or debugging) + synth = AciResult(wrapper.__name__, check_title, "") + synth.updateWithResults(**r.as_dict_for_json_result()) + synth.writeResult() + return r.result + return wrapper + return decorator + + +def format_table(headers, data, + min_width=5, left_padding=2, hdr_sp='-', col_sp=' '): + """ get string results in table format + Args: + header (list): list of column headers (optional) + each header can either be a string representing the name or a + dictionary with following attributes: + { + name (str): column name + width (int or str): integer width of column. can also be a string 'auto' + which is based on the longest string in column + max_width (int): integer value of max width when combined with + } + data (list): list of rows, where each row is a list of values + corresponding to the appropriate header. If length of row + exceeds length of headers, it is is ignored. + min_width (int, optional): minimum width enforced on any auto-calculated column. Defaults to 5. + left_padding (int, optional): number of spaces to 'pad' left most column. Defaults to 2. + hdr_sp (str, optional): print a separator string between hdr and data row. Defaults to '-'. + col_sp (str, optional): print a separator string between data columns. Defaults to ' '. + Returns: + str: table with columns aligned with spacing + """ + if type(data) is not list or len(data) == 0: + return "" + cl = 800 + col_widths = [] + rows = [] + + def update_col_widths(idx, new_width): + if len(col_widths) < idx + 1: + col_widths.append(new_width) + elif col_widths[idx] < new_width: + col_widths[idx] = new_width + + for row in data: + if type(row) is not list: + return "" + for idx, col in enumerate(row): + update_col_widths(idx, len(str(col))) + rows.append([str(col) for col in row]) + h_cols = [] + for idx, col in enumerate(headers): + if isinstance(col, str): + update_col_widths(idx, len(col)) + h_cols.append({'name': col, 'width': 'auto'}) + elif isinstance(col, dict): + name = col.get('name', '') + width = col.get('width', '') + max_w = col.get('max_width', 0) + update_col_widths(idx, len(name)) + if width == 'auto' and max_w: + try: + if int(max_w) < col_widths[idx]: + col_widths[idx] = int(max_w) + except ValueError: + max_w = 0 + else: + try: + col_widths[idx] = int(width) + except ValueError: + width = 'auto' + h_cols.append({'name': name, 'width': width}) + + # Adjust column width to fit the table with + recovery_width = 3 * min_width + total_width = sum(col_widths) + len(col_sp) * len(col_widths) + left_padding + for idx, h in enumerate(h_cols): + if total_width <= cl: break + if h['width'] == 'auto' and col_widths[idx] > recovery_width: + total_width -= col_widths[idx] - recovery_width + col_widths[idx] = recovery_width + + pad = ' ' * left_padding + output = [] + if headers: + output.append( + get_row(col_widths, [c['name'] for c in h_cols], col_sp, pad) + ) + if isinstance(hdr_sp, str): + if len(hdr_sp) > 0: + hsp_sp = hdr_sp[0] # only single char for hdr_sp + values = [hsp_sp * len(c['name']) for c in h_cols] + output.append( + get_row(col_widths, values, col_sp, pad) + ) + for row in rows: + output.append(get_row(col_widths, row, col_sp, pad)) + return '\n'.join(output) + + +def get_row(widths, values, spad=" ", lpad=""): + cols = [] + row_maxnum = 0 + for i, value in enumerate(values): + w = widths[i] if widths[i] > 0 else 1 + tw = TextWrapper(width=w) + lines = [] + for v in value.split('\n'): + lines += tw.wrap(v) + cols.append({'width': w, 'lines': lines}) + if row_maxnum < len(lines): row_maxnum = len(lines) + spad2 = ' ' * len(spad) # space separators except for the 1st line + output = [] + for i in range(row_maxnum): + row = [] + for c in cols: + if len(c['lines']) > i: + row.append('{:{}}'.format(c['lines'][i], c['width'])) + else: + row.append('{:{}}'.format('', c['width'])) + if not output: + output.append("%s%s" % (lpad, spad.join(row).rstrip())) + else: + output.append("%s%s" % (lpad, spad2.join(row).rstrip())) + return ('\n'.join(output).rstrip()) + + +def prints(objects, sep=' ', end='\n'): + with open(RESULT_FILE, 'a') as f: + print(objects, sep=sep, end=end, file=sys.stdout) + print(objects, sep=sep, end=end, file=f) + sys.stdout.flush() + f.flush() + + +def print_title(title, index=None, total=None): + if index and total: + prints('[Check{:3}/{}] {}... '.format(index, total, title), end='') + else: + prints('{:14}{}... '.format('', title), end='') + + +def print_result(title, result, msg='', + headers=None, data=None, + unformatted_headers=None, unformatted_data=None, + recommended_action='', + doc_url='', + adjust_title=False): + FULL_LEN = 138 # length of `[Check XX/YY] <title>... <msg> --padding-- <RESULT>` + CHECK_LEN = 18 # length of `[Check XX/YY] ... ` + padding = FULL_LEN - CHECK_LEN - len(title) - len(msg) + if adjust_title: + # adjust padding when the result is on the second line. + # 1st: `[Check XX/YY] <title>... ` + # 2nd: ` <msg> --padding-- <RESULT>` + padding += len(title) + CHECK_LEN + if padding < len(result): + # when `msg` is too long (ex. unknown exception), `padding` may get shorter + # than what it's padding (`result`), or worse, may get negative. + # In such a case, keep one whitespace padding even if the full length gets longer. + padding = len(result) + 1 + output = '{}{:>{}}'.format(msg, result, padding) + if data: + data.sort() + output += '\n' + format_table(headers, data) + if unformatted_data: + unformatted_data.sort() + output += '\n\n' + format_table(unformatted_headers, unformatted_data) + if data or unformatted_data: + output += '\n' + if recommended_action: + output += '\n Recommended Action: %s' % recommended_action + if doc_url: + output += '\n Reference Document: %s' % doc_url + output += '\n' * 2 + prints(output) + + +def _icurl_error_handler(imdata): + if imdata and "error" in imdata[0]: + if "not found in class" in imdata[0]['error']['attributes']['text']: + raise OldVerPropNotFound('Your current ACI version does not have requested property') + elif "unresolved class for" in imdata[0]['error']['attributes']['text']: + raise OldVerClassNotFound('Your current ACI version does not have requested class') + elif "not found" in imdata[0]['error']['attributes']['text']: + raise OldVerClassNotFound('Your current ACI version does not have requested class') + else: + raise Exception('API call failed! Check debug log') + + +def _icurl(apitype, query, page=0, page_size=100000): + if apitype not in ['class', 'mo']: + print('invalid API type - %s' % apitype) + return [] + pre = '&' if '?' in query else '?' + query += '{}page={}&page-size={}'.format(pre, page, page_size) + uri = 'http://127.0.0.1:7777/api/{}/{}'.format(apitype, query) + cmd = ['icurl', '-gs', uri] + log.info('cmd = ' + ' '.join(cmd)) + response = subprocess.check_output(cmd) + log.debug('response: ' + str(response)) + data = json.loads(response) + _icurl_error_handler(data['imdata']) + return data + + +def icurl(apitype, query, page_size=100000): + total_imdata = [] + total_cnt = 999999 + page = 0 + while total_cnt > len(total_imdata): + data = _icurl(apitype, query, page, page_size) + if not data['imdata']: + break + total_imdata += data['imdata'] + total_cnt = int(data['totalCount']) + page += 1 + return total_imdata + + +def run_cmd(cmd, splitlines=True): + """ + Run a shell command. + :param cmd: Command to run, can be a string or a list. + :param splitlines: If True, splits the output into a list of lines. + If False, returns the raw text output as a single string. + Returns the output of the command. + """ + if isinstance(cmd, list): + cmd = ' '.join(cmd) + try: + log.info('run_cmd = ' + cmd) + response = subprocess.check_output(cmd, shell=True).decode('utf-8') + log.debug('response: ' + str(response)) + if splitlines: + return response.splitlines() + return response + except subprocess.CalledProcessError as e: + log.error("Command '%s' failed with error: %s", cmd, str(e)) + raise e + + +def get_credentials(): + prints('To use a non-default Login Domain, enter apic#DOMAIN\\\\USERNAME') + while True: + usr = input('Enter username for APIC login : ') + if usr: break + while True: + pwd = getpass('Enter password for corresponding User : ') + if pwd: break + print('') + return usr, pwd + + +def get_current_version(arg_cversion): + """ Returns: AciVersion instance """ + if arg_cversion: + prints("Current APIC version is overridden to %s" % arg_cversion) + try: + current_version = AciVersion(arg_cversion) + except ValueError as e: + prints(e) + sys.exit(1) + return current_version + prints("Checking current APIC version...", end='') + firmwares = icurl('class', 'firmwareCtrlrRunning.json') + for firmware in firmwares: + if 'node-1' in firmware['firmwareCtrlrRunning']['attributes']['dn']: + apic1_version = firmware['firmwareCtrlrRunning']['attributes']['version'] + break + current_version = AciVersion(apic1_version) + prints('%s\n' % current_version) + return current_version + + +def get_target_version(arg_tversion): + """ Returns: AciVersion instance """ + if arg_tversion: + prints("Target APIC version is overridden to %s" % arg_tversion) + try: + target_version = AciVersion(arg_tversion) + except ValueError as e: + prints(e) + sys.exit(1) + return target_version + prints("Gathering APIC Versions from Firmware Repository...\n") + repo_list = [] + response_json = icurl('class', + 'firmwareFirmware.json?query-target-filter=and(wcard(firmwareFirmware.isoname,"aci-apic"),eq(firmwareFirmware.type,"controller"))') + if response_json: + for version in response_json: + repo_list.append(version['firmwareFirmware']['attributes']['isoname']) + repo_list.sort() + # Display version info to User + for i, value in enumerate(repo_list): + prints("[%s]: %s" % (i + 1, value)) + prints('') + + version_choice = None + while version_choice is None: + version_choice = input("What is the Target Version? : ") + try: + version_choice = int(version_choice) + if version_choice < 1 or version_choice > len(repo_list): raise ValueError("") + except ValueError: + prints("Please select a value between 1 and %s" % len(repo_list)) + version_choice = None + + version = repo_list[version_choice - 1] + target_version = AciVersion(version) + prints('\nYou have chosen version "%s"\n' % target_version) + return target_version + else: + prints("No Firmware Detected! Please Upload APIC Firmware and re-run the script.\n") + return None + + +def get_vpc_nodes(): + """ Returns list of VPC Node IDs; ['101', '102', etc...] """ + prints("Collecting VPC Node IDs...", end='') + vpc_nodes = [] + prot_pols = icurl('class', 'fabricNodePEp.json') + for vpc_node in prot_pols: + vpc_nodes.append(vpc_node['fabricNodePEp']['attributes']['id']) + vpc_nodes.sort() + # Display up to 4 node IDs + max_display = 4 + if len(vpc_nodes) <= max_display: + prints('%s\n' % ", ".join(vpc_nodes)) + else: + omitted_count = len(vpc_nodes) - max_display + prints('%s, ... (and %d more)\n' % (", ".join(vpc_nodes[:max_display]), omitted_count)) + return vpc_nodes + + +def get_switch_version(): + """ Returns lowest switch version as AciVersion instance """ + prints("Gathering Lowest Switch Version from Firmware Repository...", end='') + firmwares = icurl('class', 'firmwareRunning.json') + versions = set() + + for firmware in firmwares: + versions.add(firmware['firmwareRunning']['attributes']['peVer']) + + if versions: + lowest_sw_ver = AciVersion(versions.pop()) + for version in versions: + version = AciVersion(version) + if lowest_sw_ver.newer_than(str(version)): + lowest_sw_ver = version + prints('%s\n' % lowest_sw_ver) + return lowest_sw_ver + else: + prints("No Switches Detected! Join switches to the fabric then re-run this script.\n") + return None + + +@check_wrapper(check_title="APIC Cluster Status") +def apic_cluster_health_check(cversion, **kwargs): + result = FAIL_UF + msg = '' + headers = ['APIC-ID\n(Seen By)', 'APIC-ID\n(Affected)', 'Admin State', 'Operational State', 'Health State'] + unformatted_headers = ['Affected DN', 'Admin State', 'Operational State', 'Health State'] + data = [] + unformatted_data = [] + doc_url = 'http://cs.co/9003ybZ1d' # ACI Troubleshooting Guide 2nd Edition + if cversion.older_than("4.2(1a)"): + recommended_action = 'Follow "Initial Fabric Setup" in ACI Troubleshooting Guide 2nd Edition' + else: + recommended_action = 'Troubleshoot by running "acidiag cluster" on APIC CLI' + dn_regex = node_regex + r'/av/node-(?P<winode>\d)' + infraWiNodes = icurl('class', 'infraWiNode.json') + for av in infraWiNodes: + av_attr = av['infraWiNode']['attributes'] + if av_attr['health'] == 'fully-fit': + continue + dn = re.search(dn_regex, av_attr['dn']) + if dn: + data.append([dn.group('node'), dn.group('winode'), + av_attr['adminSt'], av_attr['operSt'], av_attr['health']]) + else: + unformatted_data.append([av_attr['dn'], av_attr['adminSt'], + av_attr['operSt'], av_attr['health']]) + if not infraWiNodes: + result = ERROR + msg = 'infraWiNode (Appliance Vector) not found!' + elif not data and not unformatted_data: + result = PASS + return Result(result=result, msg=msg, headers=headers, data=data, unformatted_headers=unformatted_headers, unformatted_data=unformatted_data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Switch Fabric Membership Status") +def switch_status_check(**kwargs): + result = FAIL_UF + msg = '' + headers = ['Pod-ID', 'Node-ID', 'State'] + data = [] + recommended_action = 'Bring this node back to "active"' + # fabricNode.fabricSt shows `disabled` for both Decommissioned and Maintenance (GIR). + # fabricRsDecommissionNode.debug==yes is required to show `disabled (Maintenance)`. + fabricNodes = icurl('class', 'fabricNode.json?&query-target-filter=ne(fabricNode.role,"controller")') + girNodes = icurl('class', + 'fabricRsDecommissionNode.json?&query-target-filter=eq(fabricRsDecommissionNode.debug,"yes")') + for fabricNode in fabricNodes: + state = fabricNode['fabricNode']['attributes']['fabricSt'] + if state == 'active': + continue + dn = re.search(node_regex, fabricNode['fabricNode']['attributes']['dn']) + pod_id = dn.group("pod") + node_id = dn.group("node") + for gir in girNodes: + if node_id == gir['fabricRsDecommissionNode']['attributes']['targetId']: + state = state + ' (Maintenance)' + data.append([pod_id, node_id, state]) + if not fabricNodes: + result = MANUAL + msg = 'Switch fabricNode not found!' + elif not data: + result = PASS + return Result(result=result, msg=msg, headers=headers, data=data, recommended_action=recommended_action) + + +@check_wrapper(check_title="Firmware/Maintenance Groups when crossing 4.0 Release") +def maintp_grp_crossing_4_0_check(cversion, tversion, **kwargs): + result = PASS + msg = '' + headers = ["Group Name", "Group Type"] + data = [] + recommended_action = 'Remove the group prior to APIC upgrade. Create a new switch group once APICs are upgraded to post-4.0.' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#firmwaremaintenance-groups-when-crossing-40-release" + if (int(cversion.major1) >= 4) or (tversion and (int(tversion.major1) <= 3)): + result = NA + msg = VER_NOT_AFFECTED + elif (int(cversion.major1) < 4) and not tversion: + result = MANUAL + msg = TVER_MISSING + else: + groups = icurl('mo', '/uni/fabric.json?query-target=children&target-subtree-class=maintMaintP,firmwareFwP') + for g in groups: + result = FAIL_O + if g.get('maintMaintP'): + data.append([g['maintMaintP']['attributes']['name'], 'Maintenance Group']) + else: + data.append([g['firmwareFwP']['attributes']['name'], 'Firmware Group']) + return Result(result=result, msg=msg, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="NTP Status") +def ntp_status_check(**kargs): + result = FAIL_UF + headers = ["Pod-ID", "Node-ID"] + data = [] + recommended_action = 'Not Synchronized. Check NTP config and NTP server reachability.' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#ntp-status" + fabricNodes = icurl('class', 'fabricNode.json') + nodes = [fn['fabricNode']['attributes']['id'] for fn in fabricNodes] + apicNTPs = icurl('class', 'datetimeNtpq.json') + switchNTPs = icurl('class', 'datetimeClkPol.json') + for apicNTP in apicNTPs: + if '*' == apicNTP['datetimeNtpq']['attributes']['tally']: + dn = re.search(node_regex, apicNTP['datetimeNtpq']['attributes']['dn']) + if dn and dn.group('node') in nodes: + nodes.remove(dn.group('node')) + for switchNTP in switchNTPs: + if 'synced' in switchNTP['datetimeClkPol']['attributes']['srvStatus']: + dn = re.search(node_regex, switchNTP['datetimeClkPol']['attributes']['dn']) + if dn and dn.group('node') in nodes: + nodes.remove(dn.group('node')) + for fn in fabricNodes: + if fn['fabricNode']['attributes']['id'] in nodes: + dn = re.search(node_regex, fn['fabricNode']['attributes']['dn']) + data.append([dn.group('pod'), dn.group('node')]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Features that need to be Disabled prior to Upgrade") +def features_to_disable_check(cversion, tversion, **kwargs): + result = FAIL_O + headers = ["Feature", "Name", "Status", "Recommended Action"] + data = [] + recommended_action = 'Disable the feature prior to upgrade' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#features-that-need-to-be-disabled-prior-to-upgrade" + + apPlugins = icurl('class', 'apPlugin.json?&query-target-filter=ne(apPlugin.pluginSt,"inactive")') + infraMOs = icurl('mo', 'uni/infra.json?query-target=subtree&target-subtree-class=infrazoneZone,epControlP') + default_apps = ['IntersightDC', 'NIALite', 'NIBASE', 'ApicVision'] + default_appDNs = ['pluginContr/plugin-Cisco_' + app for app in default_apps] + if apPlugins: + for apPlugin in apPlugins: + if apPlugin['apPlugin']['attributes']['dn'] not in default_appDNs: + name = apPlugin['apPlugin']['attributes']['name'] + pluginSt = apPlugin['apPlugin']['attributes']['pluginSt'] + data.append(['App Center', name, pluginSt, 'Disable the app']) + for mo in infraMOs: + if mo.get('infrazoneZone') and mo['infrazoneZone']['attributes']['deplMode'] == 'disabled': + name = mo['infrazoneZone']['attributes']['name'] + data.append(['Config Zone', name, 'Locked', + 'Change the status to "Open" or remove the zone']) + elif mo.get('epControlP') and mo['epControlP']['attributes']['adminSt'] == 'enabled': + ra = '' + if not tversion: + ra = 'Disable Rogue EP during the upgrade if your current version is 4.1 or your target version is 4.1' + else: + cv_is_4_1 = cversion.major1 == '4' and cversion.major2 == '1' + tv_is_4_1 = tversion.major1 == '4' and tversion.major2 == '1' + if cv_is_4_1 and not tv_is_4_1: + ra = 'Disable Rogue EP during the upgrade because your current version is 4.1' + elif not cv_is_4_1 and tv_is_4_1: + ra = 'Disable Rogue EP during the upgrade because your target version is 4.1' + if ra: + name = mo['epControlP']['attributes']['name'] + data.append(['Rogue Endpoint', name, 'Enabled', ra]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Switch Upgrade Group Guidelines") +def switch_group_guideline_check(**kwargs): + result = FAIL_O + headers = ['Group Name', 'Pod-ID', 'Node-IDs', 'Failure Reason'] + data = [] + recommended_action = 'Upgrade nodes in each line above separately in another group.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#switch-upgrade-group-guidelines' + + maints = icurl('class', 'maintMaintGrp.json?rsp-subtree=children') + if not maints: + return Result(result=MANUAL, msg='No upgrade groups found!', doc_url=doc_url) + + spine_type = ['', 'RR ', 'IPN/ISN '] + f_spines = [defaultdict(list) for t in spine_type] + reason = 'All {}spine nodes in this pod are in the same group.' + reasons = [reason.format(t) for t in spine_type] + reason_apicleaf = 'All leaf nodes connected to APIC {} are in the same group.' + reason_vpc = 'Both leaf nodes in the same vPC pair are in the same group.' + + nodes = {} + fabricNodes = icurl('class', 'fabricNode.json') + for fn in fabricNodes: + attr = fn['fabricNode']['attributes'] + nodes[attr['dn']] = {'role': attr['role'], 'nodeType': attr['nodeType']} + + for key in nodes: + if nodes[key]['role'] == 'spine': + dn = re.search(node_regex, key) + if not dn: + log.error('Failed to parse - %s', key) + continue + f_spines[0][dn.group('pod')].append(int(dn.group('node'))) + + bgpRRs = icurl('class', 'bgpRRNodePEp.json') + for bgpRR in bgpRRs: + pod = bgpRR['bgpRRNodePEp']['attributes']['podId'] + node = bgpRR['bgpRRNodePEp']['attributes']['id'] + f_spines[1][pod].append(int(node)) + + infraL3Outs = icurl('class', + 'l3extRsNodeL3OutAtt.json?query-target-filter=wcard(l3extRsNodeL3OutAtt.dn,"tn-infra/")') + for infraL3Out in infraL3Outs: + tDn = infraL3Out['l3extRsNodeL3OutAtt']['attributes']['tDn'] + if nodes.get(tDn, {}).get('role') == 'spine': + dn = re.search(node_regex, tDn) + if not dn: + log.error('Failed to parse - %s', tDn) + continue + f_spines[2][dn.group('pod')].append(int(dn.group('node'))) + + apic_leafs = defaultdict(set) + lldps = icurl('class', 'lldpCtrlrAdjEp.json') + for lldp in lldps: + dn = re.search(node_regex, lldp['lldpCtrlrAdjEp']['attributes']['dn']) + if not dn: + log.error('Failed to parse - %s', lldp['lldpCtrlrAdjEp']['attributes']['dn']) + continue + apic_id_pod = '-'.join([lldp['lldpCtrlrAdjEp']['attributes']['id'], dn.group('pod')]) + apic_leafs[apic_id_pod].add(int(dn.group('node'))) + + vpcs = icurl('class', 'fabricExplicitGEp.json?rsp-subtree=children&rsp-subtree-class=fabricNodePEp') + + for m in maints: + m_nodes = [] + m_name = '' + for mc in m['maintMaintGrp']['children']: + if mc.get('maintRsMgrpp'): + m_name = mc['maintRsMgrpp']['attributes']['tnMaintMaintPName'] + elif mc.get('fabricNodeBlk'): + m_nodes += range(int(mc['fabricNodeBlk']['attributes']['from_']), + int(mc['fabricNodeBlk']['attributes']['to_']) + 1) + + m_spines = [defaultdict(list) for t in spine_type] + for m_node in m_nodes: + for idx, fabric in enumerate(f_spines): + for pod in fabric: + if m_node in fabric[pod]: + m_spines[idx][pod].append(m_node) + break + for m, f, r in zip(m_spines, f_spines, reasons): + for pod in m: + if len(m[pod]) == len(f[pod]): + data.append([m_name, pod, ','.join(str(x) for x in m[pod]), r]) + + for apic_id_pod in apic_leafs: + if apic_leafs[apic_id_pod] == apic_leafs[apic_id_pod].intersection(m_nodes): + pod = apic_id_pod.split('-')[1] + apic_id = apic_id_pod.split('-')[0] + data.append([m_name, pod, ','.join(str(x) for x in apic_leafs[apic_id_pod]), + reason_apicleaf.format(apic_id)]) + + for vpc in vpcs: + m_vpc_peers = [] + for vpc_peer in vpc['fabricExplicitGEp']['children']: + if int(vpc_peer['fabricNodePEp']['attributes']['id']) in m_nodes: + m_vpc_peers.append({ + 'node': vpc_peer['fabricNodePEp']['attributes']['id'], + 'pod': vpc_peer['fabricNodePEp']['attributes']['podId'] + }) + if len(m_vpc_peers) > 1: + data.append([m_name, m_vpc_peers[0]['pod'], + ','.join(x['node'] for x in m_vpc_peers), + reason_vpc]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Switch Node /bootflash usage") +def switch_bootflash_usage_check(tversion, **kwargs): + result = FAIL_UF + msg = '' + headers = ["Pod-ID", "Node-ID", "Utilization"] + data = [] + recommended_action = "Over 50% usage! Contact Cisco TAC for Support" + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#switch-node-bootflash-usage" + + partitions_api = 'eqptcapacityFSPartition.json' + partitions_api += '?query-target-filter=eq(eqptcapacityFSPartition.path,"/bootflash")' + + download_sts_api = 'maintUpgJob.json' + download_sts_api += '?query-target-filter=and(eq(maintUpgJob.dnldStatus,"downloaded")' + download_sts_api += ',eq(maintUpgJob.desiredVersion,"n9000-1{}"))'.format(tversion) + + partitions = icurl('class', partitions_api) + if not partitions: + return Result(result=ERROR, msg='bootflash objects not found', doc_url=doc_url) + + predownloaded_nodes = [] + try: + download_sts = icurl('class', download_sts_api) + except OldVerPropNotFound: + # Older versions don't have 'dnldStatus' param + download_sts = [] + + for maintUpgJob in download_sts: + dn = re.search(node_regex, maintUpgJob['maintUpgJob']['attributes']['dn']) + node = dn.group("node") + predownloaded_nodes.append(node) + + for eqptcapacityFSPartition in partitions: + dn = re.search(node_regex, eqptcapacityFSPartition['eqptcapacityFSPartition']['attributes']['dn']) + pod = dn.group("pod") + node = dn.group("node") + avail = int(eqptcapacityFSPartition['eqptcapacityFSPartition']['attributes']['avail']) + used = int(eqptcapacityFSPartition['eqptcapacityFSPartition']['attributes']['used']) + + usage = (used / (avail + used)) * 100 + if (usage >= 50) and (node not in predownloaded_nodes): + data.append([pod, node, usage]) + + if not data: + result = PASS + msg = 'All below 50% or pre-downloaded' + return Result(result=result, msg=msg, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="L3Out MTU") +def l3out_mtu_check(**kwargs): + result = MANUAL + msg = "" + headers = ["Tenant", "L3Out", "Node Profile", "Interface Profile", + "Pod", "Node", "Interface", "Type", "VLAN", "IP Address", "MTU"] + data = [] + unformatted_headers = ['L3 DN', "Type", "IP Address", "MTU"] + unformatted_data = [] + recommended_action = 'Verify that these MTUs match with connected devices' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-mtu" + + fabricMtu = None + regex_prefix = r'tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/lnodep-(?P<lnodep>[^/]+)/lifp-(?P<lifp>[^/]+)' + path_dn_regex = regex_prefix + r'/rspathL3OutAtt-\[topology/pod-(?P<pod>[^/]+)/.*paths-(?P<node>\d{3,4}|\d{3,4}-\d{3,4})/pathep-\[(?P<int>.+)\]\]' + vlif_dn_regex = regex_prefix + r'/vlifp-\[topology/pod-(?P<pod>[^/]+)/node-(?P<node>\d{3,4})\]-\[vlan-(\d{1,4})\]' + l3extPaths = icurl('class', 'l3extRsPathL3OutAtt.json') # Regular L3Out + try: + l3extVLIfPs = icurl('class', 'l3extVirtualLIfP.json') # Floating L3Out + except OldVerClassNotFound: + l3extVLIfPs = [] # Pre 4.2 did not have this class + for mo in chain(l3extPaths, l3extVLIfPs): + if fabricMtu is None: + l2Pols = icurl('mo', 'uni/fabric/l2pol-default.json') + fabricMtu = l2Pols[0]['l2InstPol']['attributes']['fabricMtu'] + + is_floating = True if mo.get('l3extVirtualLIfP') else False + + mo_class = 'l3extVirtualLIfP' if is_floating else 'l3extRsPathL3OutAtt' + mtu = mo[mo_class]['attributes']['mtu'] + addr = mo[mo_class]['attributes']['addr'] + vlan = mo[mo_class]['attributes']['encap'] + iftype = mo[mo_class]['attributes']['ifInstT'] + # Differentiate between regular and floating SVI. Both use ext-svi in the object. + if is_floating: + iftype = "floating svi" + + if mtu == 'inherit': + mtu += " (%s)" % fabricMtu + + dn_regex = vlif_dn_regex if is_floating else path_dn_regex + dn = re.search(dn_regex, mo[mo_class]['attributes']['dn']) + if dn: + data.append([ + dn.group("tenant"), + dn.group("l3out"), + dn.group("lnodep"), + dn.group("lifp"), + dn.group("pod"), + dn.group("node"), + dn.group("int") if not is_floating else '---', + iftype, + vlan, + addr, + mtu, + ]) + else: + unformatted_data.append([mo[mo_class]['attributes']['dn'], iftype, addr, mtu]) + + if not data and not unformatted_data: + result = NA + msg = 'No L3Out Interfaces found' + return Result( + result=result, + msg=msg, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="L3 Port Config (F0467 port-configured-as-l2)") +def port_configured_as_l2_check(**kwargs): + result = FAIL_O + headers = ['Fault', 'Tenant', 'L3Out', 'Node', 'Path'] + data = [] + unformatted_headers = ['Fault', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Resolve the conflict by removing this config or other configs using this port as L2' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l2l3-port-config" + + l2dn_regex = r'uni/tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/fd-\[.+rtdOutDef-.+/node-(?P<node>\d{3,4})/(?P<path>.+)/nwissues' + l2response_json = icurl('class', + 'faultDelegate.json?&query-target-filter=wcard(faultInst.changeSet,"port-configured-as-l2")') + for faultDelegate in l2response_json: + fc = faultDelegate['faultDelegate']['attributes']['code'] + dn = re.search(l2dn_regex, faultDelegate['faultDelegate']['attributes']['dn']) + if dn: + data.append([fc, dn.group('tenant'), dn.group('l3out'), dn.group('node'), dn.group('path')]) + else: + unformatted_data.append([fc, faultDelegate['faultDelegate']['attributes']['dn']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="L2 Port Config (F0467 port-configured-as-l3)") +def port_configured_as_l3_check(**kwargs): + result = FAIL_O + headers = ['Fault', 'Pod', 'Node', 'Tenant', 'AP', 'EPG', 'Port'] + data = [] + unformatted_headers = ['Fault', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Resolve the conflict by removing this config or other configs using this port as L3' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l2l3-port-config" + + l3affected_regex = r'topology/(?P<pod>[^/]+)/(?P<node>[^/]+)/.+uni/tn-(?P<tenant>[^/]+)/ap-(?P<ap>[^/]+)/epg-(?P<epg>\w+).+(?P<port>eth\d+/\d+)' + l3response_json = icurl('class', + 'faultDelegate.json?&query-target-filter=wcard(faultInst.changeSet,"port-configured-as-l3")') + for faultDelegate in l3response_json: + fc = faultDelegate['faultDelegate']['attributes']['code'] + affected_array = re.search(l3affected_regex, faultDelegate['faultDelegate']['attributes']['dn']) + if affected_array: + data.append([ + fc, affected_array.group("pod"), affected_array.group("node"), affected_array.group("tenant"), + affected_array.group("ap"), affected_array.group("epg"), affected_array.group("port") + ]) + else: + unformatted_data.append([fc, faultDelegate['faultDelegate']['attributes']['dn']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="L3Out Subnets (F0467 prefix-entry-already-in-use)") +def prefix_already_in_use_check(**kwargs): + result = FAIL_O + headers = ["VRF Name", "Prefix", "L3Out EPGs without F0467", "L3Out EPGs with F0467"] + headers_old = ["Fault", "Failed L3Out EPG"] + data = [] + unformatted_headers = ['Fault', 'Fault Description', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Resolve the conflict by removing the overlapping prefix from the faulted L3Out EPG.' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-subnets" + + # Old versions (pre-CSCvq93592) do not show VRF VNID and prefix in use (2nd line) + desc_regex = r'Configuration failed for (?P<failedEpg>.+) due to Prefix Entry Already Used in Another EPG' + desc_regex += r'(.+Prefix entry sys/ctx-\[vxlan-(?P<vrfvnid>\d+)\]/pfx-\[(?P<prefixInUse>.+)\] is in use)?' + + filter = '?query-target-filter=and(wcard(faultInst.changeSet,"prefix-entry-already-in-use"),wcard(faultInst.dn,"uni/epp/rtd"))' + faultInsts = icurl("class", "faultInst.json" + filter) + if not faultInsts: + return Result(result=PASS) + + vnid2vrf = {} + fvCtxs = icurl("class", "fvCtx.json") + for fvCtx in fvCtxs: + vrf_vnid = fvCtx["fvCtx"]["attributes"]["scope"] + vrf_dn = fvCtx["fvCtx"]["attributes"]["dn"] + vnid2vrf[vrf_vnid] = vrf_dn + + conflicts = defaultdict(dict) # vrf -> prefix -> extepgs, faulted_extepgs + for faultInst in faultInsts: + code = faultInst["faultInst"]["attributes"]["code"] + desc = re.search(desc_regex, faultInst["faultInst"]["attributes"]["descr"]) + if not desc: + unformatted_data.append([ + code, + faultInst["faultInst"]["attributes"]["descr"], + faultInst["faultInst"]["attributes"]["dn"], + ]) + continue + + extepg_dn = desc.group("failedEpg") + vrf_vnid = desc.group("vrfvnid") if desc.group("vrfvnid") else "_" + vrf_dn = vnid2vrf.get(vrf_vnid, "_") + prefix = desc.group("prefixInUse") if desc.group("prefixInUse") else "_" + + # When the L3Out is deployed on multiple switches, the same fault + # is raised more than once. Skip dup. + # Old ver: `vrf_dn`, `prefix` are always "_" -> keep one extepg, all in (_, _) + # New ver: `vrf_dn`, `prefix` are real values -> keep one extepg per (vrf, prefix) + if prefix not in conflicts[vrf_dn]: + # Should be only one extepg without a fault per prefix. + # But use `set()` just in case. + conflicts[vrf_dn][prefix] = {"extepgs": set(), "faulted_extepgs": set()} + conflicts[vrf_dn][prefix]["faulted_extepgs"].add(extepg_dn) + + # Old ver: print only the L3Out EPGs with faults + if conflicts.get("_", {}).get("_", {}).get("faulted_extepgs"): + data = [["F0467", epg] for epg in conflicts["_"]["_"]["faulted_extepgs"]] + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers_old, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + # Proceed further only for new versions with VRF/prefix data in faults + # Get L3Out DNs in the VRFs mentioned by the faults + l3out2vrf = {} + l3extRsEctxes = icurl("class", "l3extRsEctx.json") + for l3extRsEctx in l3extRsEctxes: + vrf_dn = l3extRsEctx["l3extRsEctx"]["attributes"]["tDn"] + if vrf_dn in conflicts: + # l3extRsEctx.dn is always L3Out DN + "/rsectx" + l3out_dn = l3extRsEctx["l3extRsEctx"]["attributes"]["dn"].split("/rsectx")[0] + l3out2vrf[l3out_dn] = vrf_dn + + # Get conflicting l3extSubnets + l3extSubnets = icurl("class", "l3extSubnet.json") + for l3extSubnet in l3extSubnets: + l3extSubnet_attr = l3extSubnet["l3extSubnet"]["attributes"] + l3out_dn = l3extSubnet_attr["dn"].split("/instP-")[0] + vrf_dn = l3out2vrf.get(l3out_dn) + if not vrf_dn: + continue + # F0467 is only for import-security + if "import-security" not in l3extSubnet_attr["scope"]: + continue + prefix = l3extSubnet_attr["ip"] + if prefix not in conflicts[vrf_dn]: + continue + extepg_dn = l3extSubnet_attr["dn"].split("/extsubnet-")[0] + if extepg_dn not in conflicts[vrf_dn][prefix]["faulted_extepgs"]: + conflicts[vrf_dn][prefix]["extepgs"].add(extepg_dn) + + for vrf_dn in conflicts: + for prefix in conflicts[vrf_dn]: + for faulted_epg in sorted(conflicts[vrf_dn][prefix]["faulted_extepgs"]): + data.append([ + vrf_dn, + prefix, + ",".join(sorted(conflicts[vrf_dn][prefix]["extepgs"])), + faulted_epg, + ]) + + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="Encap Already In Use (F0467 encap-already-in-use)") +def encap_already_in_use_check(**kwargs): + result = FAIL_O + headers = ["Faulted EPG/L3Out", "Node", "Port", "In Use Encap(s)", "In Use by EPG/L3Out"] + data = [] + unformatted_headers = ['Fault Description'] + unformatted_data = [] + recommended_action = 'Resolve the overlapping encap configuration prior to upgrade' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#encap-already-in-use" + + # <port> can be `ethX/X` or the name of I/F policy group + # <vlan> is not there for older versions + desc_regex = r'Configuration failed for (?P<failed>.+) node (?P<node>\d+) (?P<port>.+) due to .* Encap (\(vlan-(?P<vlan>\d+)\) )?is already in use by (?P<inuse>.+);' + + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=wcard(faultInst.descr,"encap-already-in-use")') + fvIfConns = [] + for faultInst in faultInsts: + desc = re.search(desc_regex, faultInst['faultInst']['attributes']['descr']) + if desc: + failed_dn = desc.group("failed") + node_id = desc.group("node") + port_id = desc.group("port") + vlan_id = desc.group("vlan") + inuse_list = desc.group("inuse").split(":") + if len(inuse_list) == 3: + inuse_dn = "uni/tn-{0}/ap-{1}/epg-{2}".format(*inuse_list) + elif len(inuse_list) == 4: + inuse_dn = "uni/tn-{0}/out-{2}".format(*inuse_list) + + # Get already-in-use encap(s) from fvIfConn when a fault doesn't include encap + if vlan_id is None: + faulted_epg_encaps = [] + in_use_epg_encaps = [] + if not fvIfConns: + fvIfConns = icurl('class', 'fvIfConn.json') + for fvIfConn in fvIfConns: + dn = fvIfConn['fvIfConn']['attributes']['dn'] + encap = fvIfConn['fvIfConn']['attributes']['encap'] + if (failed_dn in dn) and ("node-"+node_id in dn): + if encap not in faulted_epg_encaps: + faulted_epg_encaps.append(encap) + + if (inuse_dn in dn) and ("node-"+node_id in dn): + if encap not in in_use_epg_encaps: + in_use_epg_encaps.append(encap) + + overlapping_encaps = [x for x in in_use_epg_encaps if x in faulted_epg_encaps] + vlan_id = ",".join(overlapping_encaps) + + data.append([failed_dn, node_id, port_id, vlan_id, inuse_dn]) + else: + unformatted_data.append([faultInst['faultInst']['attributes']['descr']]) + + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="BD Subnets (F1425 subnet-overlap)") +def bd_subnet_overlap_check(**kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "VRF", "Interface", "Address"] + data = [] + unformatted_headers = ['Fault', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Resolve the conflict by removing BD subnets causing the overlap' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#bd-subnets" + + dn_regex = node_regex + r'/.+dom-(?P<vrf>[^/]+)/if-(?P<int>[^/]+)/addr-\[(?P<addr>[^/]+/\d{2})' + faultInsts = icurl('class', 'faultInst.json?query-target-filter=wcard(faultInst.changeSet,"subnet-overlap")') + if faultInsts: + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + if fc == "F1425": + dn_array = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + if dn_array: + data.append([fc, dn_array.group("pod"), dn_array.group("node"), dn_array.group("vrf"), + dn_array.group("int"), dn_array.group("addr")]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="BD Subnets (F0469 duplicate-subnets-within-ctx)") +def bd_duplicate_subnet_check(**kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "Bridge Domain 1", "Bridge Domain 2"] + data = [] + unformatted_headers = ['Fault', 'Fault DN', 'Fault Description'] + unformatted_data = [] + recommended_action = 'Resolve the conflict by removing BD subnets causing the duplicate' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#bd-subnets" + + descr_regex = r'duplicate-subnets-within-ctx: (?P<bd1>.+)\s,(?P<bd2>.+)' + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=wcard(faultInst.changeSet,"duplicate-subnets-within-ctx")') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = re.search(node_regex, faultInst['faultInst']['attributes']['dn']) + descr = re.search(descr_regex, faultInst['faultInst']['attributes']['descr']) + if dn and descr: + data.append([fc, dn.group("pod"), dn.group("node"), descr.group("bd1"), descr.group("bd2")]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn'], faultInst['faultInst']['attributes']['descr']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="HW Programming Failure (F3544 L3Out Prefixes, F3545 Contracts, actrl-resource-unavailable)") +def hw_program_fail_check(cversion, **kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "Fault Description", "Recommended Action"] + data = [] + unformatted_headers = ['Fault', 'Fault DN', 'Fault Description', 'Recommended Action'] + unformatted_data = [] + recommended_action = { + 'actrlRule': 'Check that "operSt" are set to "enabled". F3545 does not exist on this version.', + 'actrlPfxEntry': 'Check that "operSt" are set to "enabled". F3544 does not exist on this version.', + 'F3544': 'Ensure that LPM and host routes usage are below the capacity and resolve the fault', + 'F3545': 'Ensure that Policy CAM usage is below the capacity and resolve the fault' + } + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#hw-programming-failure" + + # Faults F3544 and F3545 don't exist until 4.1(1a)+ + if cversion.older_than("4.1(1a)"): + headers = ["Object Class", "Recommended Action"] + classes = ["actrlRule", "actrlPfxEntry"] + result = MANUAL + + for entry in classes: + data.append([entry, recommended_action.get(entry, "")]) + else: + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F3544"),eq(faultInst.code,"F3545"))') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = re.search(node_regex, faultInst['faultInst']['attributes']['dn']) + if dn: + data.append([fc, dn.group('pod'), dn.group('node'), + faultInst['faultInst']['attributes']['descr'], + recommended_action.get(fc, 'Resolve the fault')]) + else: + unformatted_data.append([ + fc, faultInst['faultInst']['attributes']['dn'], + faultInst['faultInst']['attributes']['descr'], + recommended_action.get(fc, 'Resolve the fault')]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="Switch SSD Health (F3073, F3074 equipment-flash-warning)") +def switch_ssd_check(**kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "SSD Model", "% Threshold Crossed", "Recommended Action"] + data = [] + unformatted_headers = ["Fault", "Fault DN", "% Threshold Crossed", "Recommended Action"] + unformatted_data = [] + thresh = {'F3073': '90%', 'F3074': '80%'} + recommended_action = { + 'F3073': 'Contact Cisco TAC for replacement procedure', + 'F3074': 'Monitor (no impact to upgrades)' + } + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#switch-ssd-health" + + cs_regex = r'model \(New: (?P<model>\w+)\),' + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F3073"),eq(faultInst.code,"F3074"))') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn_array = re.search(node_regex, faultInst['faultInst']['attributes']['dn']) + cs_array = re.search(cs_regex, faultInst['faultInst']['attributes']['changeSet']) + if dn_array and cs_array: + data.append([fc, dn_array.group("pod"), dn_array.group("node"), + cs_array.group("model"), + thresh.get(fc, ''), + recommended_action.get(fc, 'Resolve the fault')]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn'], + thresh.get(fc, ''), + recommended_action.get(fc, 'Resolve the fault')]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + doc_url=doc_url, + ) + + +# Connection Based Check +@check_wrapper(check_title="APIC SSD Health") +def apic_ssd_check(cversion, username, password, **kwargs): + result = FAIL_UF + headers = ["Pod", "Node", "Storage Unit", "% lifetime remaining", "Recommended Action"] + data = [] + unformatted_headers = ["Fault", "Fault DN", "% lifetime remaining", "Recommended Action"] + unformatted_data = [] + recommended_action = "Contact TAC for replacement" + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-ssd-health" + + has_error = False + dn_regex = node_regex + r'/.+p-\[(?P<storage>.+)\]-f' + faultInsts = icurl('class', 'faultInst.json?query-target-filter=eq(faultInst.code,"F2731")') + adjust_title = False + if len(faultInsts) == 0 and (cversion.older_than("4.2(7f)") or cversion.older_than("5.2(1g)")): + controller = icurl('class', 'topSystem.json?query-target-filter=eq(topSystem.role,"controller")') + if not controller: + return Result(result=ERROR, msg="topSystem response empty. Is the cluster healthy?", doc_url=doc_url) + + print('') + adjust_title = True + report_other = False + checked_apics = {} + for apic in controller: + attr = apic['topSystem']['attributes'] + if attr['address'] in checked_apics: continue + checked_apics[attr['address']] = 1 + pod_id = attr['podId'] + node_id = attr['id'] + node_title = 'Checking %s...' % attr['name'] + print_title(node_title) + try: + c = Connection(attr['address']) + c.username = username + c.password = password + c.log = LOG_FILE + c.connect() + except Exception as e: + data.append([attr['id'], attr['name'], '-', '-', str(e)]) + print_result(node_title, ERROR) + has_error = True + continue + try: + c.cmd( + 'grep -oE "SSD Wearout Indicator is [0-9]+" /var/log/dme/log/svc_ifc_ae.bin.log | tail -1') + except Exception as e: + data.append([attr['id'], attr['name'], '-', '-', str(e)]) + print_result(node_title, ERROR) + has_error = True + continue + + wearout_ind = re.search(r'SSD Wearout Indicator is (?P<wearout>[0-9]+)', c.output) + if wearout_ind is not None: + wearout = wearout_ind.group('wearout') + if int(wearout) < 5: + data.append([pod_id, node_id, "Solid State Disk", wearout, recommended_action]) + report_other = True + print_result(node_title, DONE) + continue + if report_other: + data.append([pod_id, node_id, "Solid State Disk", wearout, "No Action Required"]) + print_result(node_title, DONE) + else: + headers = ["Fault", "Pod", "Node", "Storage Unit", "% lifetime remaining", "Recommended Action"] + for faultInst in faultInsts: + dn_array = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + lifetime_remaining = "<5%" + if dn_array: + data.append(['F2731', dn_array.group("pod"), dn_array.group("node"), dn_array.group("storage"), + lifetime_remaining, recommended_action]) + else: + unformatted_data.append( + ['F2731', faultInst['faultInst']['attributes']['dn'], lifetime_remaining, recommended_action]) + if has_error: + result = ERROR + elif not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + doc_url=doc_url, + adjust_title=adjust_title, + ) + + +@check_wrapper(check_title="Config On APIC Connected Port (F0467 port-configured-for-apic)") +def port_configured_for_apic_check(**kwargs): + result = FAIL_UF + headers = ["Fault", "Pod", "Node", "Port", "EPG"] + data = [] + unformatted_headers = ['Fault', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Remove config overlapping with APIC Connected Interfaces' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#config-on-apic-connected-port" + + dn_regex = node_regex + r'/.+fv-\[(?P<epg>.+)\]/node-\d{3,4}/.+\[(?P<port>eth\d{1,2}/\d{1,2}).+/nwissues' + faultInsts = icurl('class', + 'faultInst.json?&query-target-filter=wcard(faultInst.changeSet,"port-configured-for-apic")') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + if dn: + data.append([fc, dn.group("pod"), dn.group("node"), dn.group("port"), dn.group("epg")]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="Overlapping VLAN Pools") +def overlapping_vlan_pools_check(**kwargs): + result = PASS + headers = ['Tenant', 'AP', 'EPG', 'Node', 'Port', 'VLAN Scope', 'VLAN ID', 'VLAN Pools (Domains)', 'Impact'] + data = [] + recommended_action = """ + Each node must have only one VLAN pool per VLAN ID across all the ports or across the ports with VLAN scope `portlocal` in the same EPG.' + When `Impact` shows `Outage`, you must resolve the overlapping VLAN pools. + When `Impact` shows `Flood Scope`, you should check whether it is ok that STP BPDUs, or any BUM traffic when using Flood-in-Encap, may not be flooded within the same VLAN ID across all the nodes/ports. + Note that only the nodes causing the overlap are shown above.""" + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#overlapping-vlan-pool' + + infraSetPols = icurl('mo', 'uni/infra/settings.json') + if infraSetPols[0]['infraSetPol']['attributes'].get('validateOverlappingVlans') in ['true', 'yes']: + return Result(result=PASS, msg="`Enforce EPG VLAN Validation` is enabled. No need to check overlapping VLANs") + + # Get VLAN pools and ports from access policy + mo_classes = AciAccessPolicyParser.get_classes() + filter = '?query-target=subtree&target-subtree-class=' + ','.join(mo_classes) + infra_mos = icurl('class', 'infraInfra.json' + filter) + mos = AciAccessPolicyParser(infra_mos) + + # Get EPG port deployments + epg_regex = r'uni/tn-(?P<tenant>[^/]+)/ap-(?P<ap>[^/]+)/epg-(?P<epg>[^/]+)' + conn_regex = ( + r"uni/epp/fv-\[" + epg_regex + r"]/" + r"node-(?P<node>\d+)/" + r"(?:" + r"(?:ext)?stpathatt-\[(?P<stport>[^\]]+)\](:?-extchid-(?P<stfex>\d+))?|" # static port binding + r"dyatt-\[.+(?:ext(?:prot)?paths-(?P<dyfex>\d+)/)?pathep-\[(?P<dyport>[^\]]+)\]\]|" # dynamic port binding + r"attEntitypathatt-\[(?P<aep>.+)\]" # AEP binding + r")/" + r".*\[vlan-(?P<vlan>\d+)" + ) + # uni/epp/fv-[{epgPKey}]/node-{id}/stpathatt-[{pathName}]/conndef/conn-[{encap}]-[{addr}] + # uni/epp/fv-[{epgPKey}]/node-{id}/extstpathatt-[{pathName}]-extchid-{extChId}/conndef/conn-[{encap}]-[{addr}] + # uni/epp/fv-[{epgPKey}]/node-{id}/dyatt-[{targetDn}]/conndef/conn-[{encap}]-[{addr}] + # uni/epp/fv-[{epgPKey}]/node-{id}/attEntitypathatt-[{pathName}]/conndef/conn-[{encap}]-[{addr}] + ports_per_epg = defaultdict(list) + fvIfConns = icurl('class', 'fvIfConn.json') + for fvIfConn in fvIfConns: + dn = re.search(conn_regex, fvIfConn['fvIfConn']['attributes']['dn']) + if not dn: + continue + epg_key = ':'.join([dn.group('tenant'), dn.group('ap'), dn.group('epg')]) + port_keys = [] + if not dn.group('aep'): + fex = dn.group('stfex') if dn.group('stfex') else dn.group('dyfex') + port = dn.group('stport') if dn.group('stport') else dn.group('dyport') + if fex: + port_keys.append('/'.join([dn.group('node'), fex, port])) + else: + port_keys.append('/'.join([dn.group('node'), port])) + else: + for port_key, port_data in iteritems(mos.port_data): + if port_data.get('aep_name') == dn.group('aep') and port_data.get('node') == dn.group('node'): + port_keys.append(port_key) + for port_key in port_keys: + port_data = mos.port_data.get(port_key) + if not port_data: + continue + ports_per_epg[epg_key].append({ + 'tenant': str(dn.group('tenant')), + 'ap': str(dn.group('ap')), + 'epg': str(dn.group('epg')), + 'node': str(port_data.get('node', '')), + 'fex': str(port_data.get('fex', '')), + 'port': str(port_data.get('port', '')), + 'vlan': str(dn.group('vlan')), + 'aep': str(port_data.get('aep_name', '')), + 'domain_dns': port_data.get('domain_dns', []), + 'pc_type': str(port_data.get('pc_type', '')), + 'vlan_scope': str(port_data.get('vlan_scope', '')), + }) + + # Check overlapping VLAN pools per EPG + epg_filter = '?rsp-subtree-include=required&rsp-subtree=children&rsp-subtree-class=fvRsDomAtt' + fvAEPgs_with_domains = icurl('class', 'fvAEPg.json' + epg_filter) + for fvAEPg in fvAEPgs_with_domains: + # `rsp-subtree-include=required` ensures that fvRsDomAtt are the only children + rsDoms = fvAEPg['fvAEPg']['children'] + rsDom_dns = [rsDom['fvRsDomAtt']['attributes']['tDn'] for rsDom in rsDoms] + + overlap_vlan_ids = set() + for i in range(len(rsDoms)): + for j in range(i + 1, len(rsDoms)): + i_dn = rsDoms[i]['fvRsDomAtt']['attributes']['tDn'] + j_dn = rsDoms[j]['fvRsDomAtt']['attributes']['tDn'] + i_vpool = mos.vpool_per_dom.get(i_dn) + j_vpool = mos.vpool_per_dom.get(j_dn) + # domains that do not have VLAN pools attached + if not i_vpool or not j_vpool: + continue + if i_vpool['name'] != j_vpool['name']: + overlap_vlan_ids.update( + set(i_vpool['vlan_ids']).intersection(j_vpool['vlan_ids']) + ) + + if not overlap_vlan_ids: + continue + + ports_per_node = defaultdict(dict) + epg_dn = re.search(epg_regex, fvAEPg['fvAEPg']['attributes']['dn']) + epg_key = ':'.join([epg_dn.group('tenant'), epg_dn.group('ap'), epg_dn.group('epg')]) + epg_ports = ports_per_epg.get(epg_key, []) + for port in epg_ports: + vlan_id = int(port['vlan']) + if vlan_id not in overlap_vlan_ids: + continue + + # Get domains that are attached to the port and the EPG + common_domain_dns = set(port['domain_dns']).intersection(rsDom_dns) + # Get VLAN pools for the VLAN ID of the port + # Also store domains for each VLAN pool for the final output + inuse_vpools = defaultdict(list) + for dom_dn in common_domain_dns: + vpool = mos.vpool_per_dom.get(dom_dn, {}) + if vlan_id not in vpool.get('vlan_ids', []): + continue + inuse_vpools[vpool['name']].append(vpool['dom_name']) + if not inuse_vpools: + continue + + # len(inuse_vpools) == 1 at this point means that there is no + # overlapping VLAN pool issue with this port alone. + # But do not skip such a port yet because there may be another port + # on the same node with the same VLAN ID with a different VLAN pool. + port['inuse_vpools'] = inuse_vpools + vlan_scope = port.get('vlan_scope', 'global') + # handle all non-portlocal scope as global + if vlan_scope not in ['global', 'portlocal']: + vlan_scope = 'global' + if vlan_id not in ports_per_node[port['node']]: + ports_per_node[port['node']][vlan_id] = {} + if vlan_scope not in ports_per_node[port['node']][vlan_id]: + ports_per_node[port['node']][vlan_id][vlan_scope] = [] + ports_per_node[port['node']][vlan_id][vlan_scope].append(port) + + for ports_per_vlanid in ports_per_node.values(): + for ports_per_scope in ports_per_vlanid.values(): + for ports in ports_per_scope.values(): + inuse_vpools_across_ports = set() + has_vpc = False + for port in ports: + inuse_vpools_across_ports.update( + port.get('inuse_vpools', {}).keys() + ) + if port.get('pc_type') == 'vpc': + has_vpc = True + + # All ports on the node with the same VLAN ID use the same VLAN pool + if len(inuse_vpools_across_ports) < 2: + continue + + if has_vpc: + result = FAIL_O + elif result == PASS: + result = MANUAL + impact = 'Outage' if has_vpc else 'Flood Scope' + for port in ports: + node = port['node'] + if port.get('fex') != "0": + node += '(FEX {})'.format(port['fex']) + vpool_domains = [] + for v_name, d_names in iteritems(port.get('inuse_vpools', {})): + vpool_domains.append( + '{}({})'.format(v_name, ','.join(sorted(d_names))) + ) + data.append([ + port['tenant'], + port['ap'], + port['epg'], + node, + port['port'], + port['vlan_scope'], + port['vlan'], + ', '.join(vpool_domains), + impact, + ]) + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Scalability (faults related to Capacity Dashboard)") +def scalability_faults_check(**kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "Description"] + data = [] + unformatted_headers = ["Fault", "Fault DN", "Description"] + unformatted_data = [] + recommended_action = 'Review config and reduce the usage' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#scalability-faults-related-to-capacity-dashboard" + + faultInsts = icurl('class', 'eqptcapacityEntity.json?rsp-subtree-include=faults,no-scoped') + for fault in faultInsts: + if not fault.get('faultInst'): + continue + f = fault['faultInst']['attributes'] + dn = re.search(node_regex, f['dn']) + if dn: + data.append([f['code'], dn.group('pod'), dn.group('node'), f['descr']]) + else: + unformatted_data.append([f['code'], f['dn'], f['descr']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="APIC Disk Space Usage (F1527, F1528, F1529 equipment-full)") +def apic_disk_space_faults_check(cversion, **kwargs): + result = FAIL_UF + headers = ['Fault', 'Pod', 'Node', 'Mount Point', 'Current Usage %', 'Recommended Action'] + data = [] + unformatted_headers = ['Fault', 'Fault DN', 'Recommended Action'] + unformatted_data = [] + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-disk-space-usage" + recommended_action = { + '/firmware': 'Remove unneeded images', + '/techsupport': 'Remove unneeded techsupports/cores' + } + default_action = 'Contact Cisco TAC.' + if cversion.same_as('4.0(1h)') or cversion.older_than('3.2(6i)'): + default_action += ' A typical issue is CSCvn13119.' + + dn_regex = node_regex + r'/.+p-\[(?P<mountpoint>.+)\]-f' + desc_regex = r'is (?P<usage>\d{2}%) full' + + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F1527"),eq(faultInst.code,"F1528"),eq(faultInst.code,"F1529"))') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + desc = re.search(desc_regex, faultInst['faultInst']['attributes']['descr']) + if dn and desc: + data.append([fc, dn.group('pod'), dn.group('node'), dn.group('mountpoint'), + desc.group('usage'), + recommended_action.get(dn.group('mountpoint'), default_action)]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn'], default_action]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="L3Out Route Map import/export direction") +def l3out_route_map_direction_check(**kwargs): + """ Implementation change due to CSCvm75395 - 4.1(1) """ + result = FAIL_O + headers = ["Tenant", "L3Out", "External EPG", "Subnet", "Subnet Scope", + "Route Map", "Direction", "Recommended Action", ] + data = [] + recommended_action = 'The subnet scope must have {}-rtctrl' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-route-map-importexport-direction" + + dn_regex = r'uni/tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/instP-(?P<epg>[^/]+)/extsubnet-\[(?P<subnet>[^\]]+)\]' + l3extSubnets = icurl('class', + 'l3extSubnet.json?rsp-subtree=children&rsp-subtree-class=l3extRsSubnetToProfile&rsp-subtree-include=required') + for l3extSubnet in l3extSubnets: + dn = re.search(dn_regex, l3extSubnet['l3extSubnet']['attributes']['dn']) + subnet_scope = l3extSubnet['l3extSubnet']['attributes']['scope'] + basic = [dn.group('tenant'), dn.group('l3out'), dn.group('epg'), dn.group('subnet'), subnet_scope] + for child in l3extSubnet['l3extSubnet']['children']: + dir = child['l3extRsSubnetToProfile']['attributes']['direction'] + rmap = child['l3extRsSubnetToProfile']['attributes']['tnRtctrlProfileName'] + if ((dir == 'export' and 'export-rtctrl' not in subnet_scope) or + (dir == 'import' and 'import-rtctrl' not in subnet_scope)): + data.append(basic + [rmap, dir, recommended_action.format(dir)]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, doc_url=doc_url) + + +@check_wrapper(check_title="L3Out Route Map Match Rule with missing-target") +def l3out_route_map_missing_target_check(cversion, tversion, **kwargs): + """ Implementation change due to CSCwc11570 - 5.2.8/6.0.2 """ + result = FAIL_O + headers = ['Tenant', 'L3Out', 'Route Map', 'Context', 'Action', 'Match Rule'] + data = [] + recommended_action = 'The configured match rules do not exist. Update the route maps with existing match rules.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-route-map-match-rule-with-missing-target' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + def is_old(v): + return True if v.older_than("5.2(8a)") or v.simple_version == "6.0(1)" else False + + c_is_old = is_old(cversion) + t_is_old = is_old(tversion) + if (c_is_old and t_is_old) or (not c_is_old and not t_is_old): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + dn_regex = r'uni/tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/' + # Get a missing-target match rule in a route map with type `combinable` + api = 'rtctrlProfile.json' + api += '?query-target-filter=eq(rtctrlProfile.type,"combinable")' + api += '&rsp-subtree=full&rsp-subtree-filter=eq(rtctrlRsCtxPToSubjP.state,"missing-target")' + profiles = icurl('class', api) + for profile in profiles: + dn = re.search(dn_regex, profile['rtctrlProfile']['attributes']['dn']) + for ctxP in profile['rtctrlProfile'].get('children', []): + if not ctxP.get('rtctrlCtxP'): + continue + for rsCtxPToSubjP in ctxP['rtctrlCtxP'].get('children', []): + if ( + rsCtxPToSubjP.get('rtctrlRsCtxPToSubjP') + and rsCtxPToSubjP['rtctrlRsCtxPToSubjP']['attributes']['state'] == 'missing-target' + ): + data.append([ + dn.group('tenant'), + dn.group('l3out'), + profile['rtctrlProfile']['attributes']['name'], + ctxP['rtctrlCtxP']['attributes']['name'], + ctxP['rtctrlCtxP']['attributes']['action'], + rsCtxPToSubjP['rtctrlRsCtxPToSubjP']['attributes']['tnRtctrlSubjPName'], + ]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="L3Out Loopback IP Overlap With L3Out Interfaces") +def l3out_overlapping_loopback_check(**kwargs): + result = FAIL_O + headers = ['Tenant:VRF', 'Node ID', 'Loopback IP (Tenant:L3Out:NodeP)', 'Interface IP (Tenant:L3Out:NodeP:IFP)'] + data = [] + recommended_action = 'Change either the loopback or L3Out interface IP subnet to avoid overlap.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-loopback-ip-overlap-with-l3out-interfaces' + + tn_regex = r'uni/tn-(?P<tenant>[^/]+)/' + path_regex = r'topology/pod-(?P<pod>\d+)/(?:prot)?paths-(?P<node1>\d+)(?:-(?P<node2>\d+))?' + + vrfs = defaultdict(dict) + api = 'l3extOut.json' + api += '?rsp-subtree=full' + api += '&rsp-subtree-class=l3extRsEctx,l3extRsNodeL3OutAtt,l3extLoopBackIfP,l3extRsPathL3OutAtt,l3extMember' + l3outs = icurl('class', api) + for l3out in l3outs: + vrf = "" + loopback_ips = defaultdict(list) + interface_ips = defaultdict(list) + for child in l3out['l3extOut'].get('children', []): + dn = re.search(tn_regex, l3out['l3extOut']['attributes']['dn']) + tenant_name = dn.group('tenant') if dn else "" + l3out_name = l3out['l3extOut']['attributes']['name'] + # Get VRF + if child.get('l3extRsEctx'): + vrf_tdn = re.search(tn_regex, child['l3extRsEctx']['attributes']['tDn']) + if vrf_tdn: + vrf = ':'.join([vrf_tdn.group('tenant'), child['l3extRsEctx']['attributes']['tnFvCtxName']]) + else: + vrf = child['l3extRsEctx']['attributes']['tDn'] + # Get loopback and interface IPs + elif child.get('l3extLNodeP'): + nodep_name = child['l3extLNodeP']['attributes']['name'] + for np_child in child['l3extLNodeP'].get('children', []): + # Get the loopback IP for each node + if np_child.get('l3extRsNodeL3OutAtt'): + node = np_child['l3extRsNodeL3OutAtt'] + m = re.search(node_regex, node['attributes']['tDn']) + if not m: + log.error('Failed to parse tDn - %s', node['attributes']['tDn']) + continue + node_id = m.group('node') + + config = ':'.join([tenant_name, l3out_name, nodep_name]) + if node['attributes']['rtrIdLoopBack'] == 'yes': + loopback_ips[node_id].append({ + 'addr': node['attributes']['rtrId'], + 'config': config, + }) + else: + for lb in node.get('children', []): + # One l3extLoopBackIfP per node for each IPv4/v6 + if not lb.get('l3extLoopBackIfP'): + continue + loopback_ip = lb['l3extLoopBackIfP']['attributes']['addr'] + # Strip the subnet mask (/32, /128) if any + lo_addr = loopback_ip.split("/")[0] + loopback_ips[node_id].append({ + 'addr': lo_addr, + 'config': config, + }) + # Get interface IPs for each node + elif np_child.get('l3extLIfP'): + ifp_name = np_child['l3extLIfP']['attributes']['name'] + for ifp_child in np_child['l3extLIfP'].get('children', []): + if not ifp_child.get('l3extRsPathL3OutAtt'): + continue + port = ifp_child['l3extRsPathL3OutAtt'] + m = re.search(path_regex, port['attributes']['tDn']) + if not m: + log.error('Failed to parse tDn - %s', port['attributes']['tDn']) + continue + node1_id = m.group('node1') + node2_id = m.group('node2') + config = ':'.join([tenant_name, l3out_name, nodep_name, ifp_name]) + # non-vPC port + if not node2_id: + interface_ips[node1_id].append({ + 'addr': port['attributes']['addr'], + 'config': config, + }) + # vPC port + else: + for member in port.get('children', []): + if not member.get('l3extMember'): + continue + node_id = node1_id + if member['l3extMember']['attributes']['side'] == 'B': + node_id = node2_id + interface_ips[node_id].append({ + 'addr': member['l3extMember']['attributes']['addr'], + 'config': config, + }) + for node in loopback_ips: + if not vrfs[vrf].get(node): + vrfs[vrf][node] = {} + vrfs[vrf][node]['loopbacks'] = vrfs[vrf][node].get('loopbacks', []) + loopback_ips[node] + for node in interface_ips: + if not vrfs[vrf].get(node): + vrfs[vrf][node] = {} + vrfs[vrf][node]['interfaces'] = vrfs[vrf][node].get('interfaces', []) + interface_ips[node] + + # Check overlaps + for vrf in vrfs: + for node in vrfs[vrf]: + loopbacks = vrfs[vrf][node].get('loopbacks') + interfaces = vrfs[vrf][node].get('interfaces') + if not loopbacks or not interfaces: + continue + for interface in interfaces: + for loopback in loopbacks: + if IPAddress.ip_in_subnet(loopback['addr'], interface['addr']): + data.append([ + vrf, + node, + '{} ({})'.format(loopback['addr'], loopback['config']), + '{} ({})'.format(interface['addr'], interface['config']), + ]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="BGP Peer Profile at node level without Loopback") +def bgp_peer_loopback_check(**kwargs): + """ Implementation change due to CSCvm28482 - 4.1(2) """ + result = FAIL_O + headers = ["Tenant", "L3Out", "Node Profile", "Pod", "Node"] + data = [] + recommended_action = 'Configure a loopback or configure bgpPeerP under interfaces instead of nodes' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#bgp-peer-profile-at-node-level-without-loopback" + + name_regex = r'uni/tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/lnodep-(?P<nodep>[^/]+)' + l3extLNodePs = icurl('class', + 'l3extLNodeP.json?rsp-subtree=full&rsp-subtree-class=bgpPeerP,l3extRsNodeL3OutAtt,l3extLoopBackIfP') + for l3extLNodeP in l3extLNodePs: + if not l3extLNodeP['l3extLNodeP'].get('children'): + continue + # if the node profile has no bgpPeerP, no need to check loopbacks + bgpPeerPs = [x for x in l3extLNodeP['l3extLNodeP']['children'] if x.get('bgpPeerP')] + if not bgpPeerPs: + continue + for l3extLNodeP_child in l3extLNodeP['l3extLNodeP']['children']: + if not l3extLNodeP_child.get('l3extRsNodeL3OutAtt'): + continue + if l3extLNodeP_child['l3extRsNodeL3OutAtt']['attributes']['rtrIdLoopBack'] == 'yes': + continue + if l3extLNodeP_child['l3extRsNodeL3OutAtt'].get('children'): + for rsnode_child in l3extLNodeP_child['l3extRsNodeL3OutAtt']['children']: + if rsnode_child.get('l3extLoopBackIfP'): + break + else: + # No loopbacks are configured for this node even though it has bgpPeerP + name = re.search(name_regex, l3extLNodeP['l3extLNodeP']['attributes']['dn']) + dn = re.search(node_regex, l3extLNodeP_child['l3extRsNodeL3OutAtt']['attributes']['tDn']) + data.append([ + name.group('tenant'), name.group('l3out'), name.group('nodep'), + dn.group('pod'), dn.group('node')]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Different infra VLAN via LLDP (F0454 infra-vlan-mismatch)") +def lldp_with_infra_vlan_mismatch_check(**kwargs): + result = FAIL_O + headers = ["Fault", "Pod", "Node", "Port"] + data = [] + unformatted_headers = ["Fault", "Fault DN", "Failure Reason"] + unformatted_data = [] + recommended_action = 'Disable LLDP on this port if it is expected to receive LLDP with a mismatched infra VLAN' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#different-infra-vlan-via-lldp" + + dn_regex = node_regex + r'/sys/lldp/inst/if-\[(?P<port>eth\d{1,2}/\d{1,2})\]/fault-F0454' + faultInsts = icurl('class', + 'faultInst.json?query-target-filter=and(eq(faultInst.code,"F0454"),wcard(faultInst.changeSet,"infra-vlan-mismatch"))') + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + if dn: + data.append([fc, dn.group("pod"), dn.group("node"), dn.group("port")]) + else: + unformatted_data.append([fc, faultInst['faultInst']['attributes']['dn']]) + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +# Connection Based Check +@check_wrapper(check_title="APIC Target version image and MD5 hash") +def apic_version_md5_check(tversion, username, password, **kwargs): + result = FAIL_UF + headers = ['APIC', 'Firmware', 'md5sum', 'Failure'] + data = [] + recommended_action = 'Delete the firmware from APIC and re-download' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-target-version-image-and-md5-hash" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + image_validaton = True + mo = icurl('mo', 'fwrepo/fw-aci-apic-dk9.%s.json' % tversion.dot_version) + for fm_mo in mo: + if fm_mo.get("firmwareFirmware"): + desc = fm_mo["firmwareFirmware"]['attributes']["description"] + md5 = fm_mo["firmwareFirmware"]['attributes']["checksum"] + if "Image signing verification failed" in desc: + data.append(["All", str(tversion), md5, 'Target image is corrupted']) + image_validaton = False + + if not image_validaton: + return Result(result=FAIL_UF, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + md5s = [] + md5_names = [] + + has_error = False + prints('') + nodes_response_json = icurl('class', 'topSystem.json') + for node in nodes_response_json: + if node['topSystem']['attributes']['role'] != "controller": + continue + apic_name = node['topSystem']['attributes']['name'] + node_title = 'Checking %s...' % apic_name + print_title(node_title) + try: + c = Connection(node['topSystem']['attributes']['address']) + c.username = username + c.password = password + c.log = LOG_FILE + c.connect() + except Exception as e: + data.append([apic_name, '-', '-', str(e)]) + print_result(node_title, ERROR) + has_error = True + continue + + try: + c.cmd("ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.%s.bin" % + tversion.dot_version) + except Exception as e: + data.append([apic_name, '-', '-', + 'ls command via ssh failed due to:{}'.format(str(e))]) + print_result(node_title, ERROR) + has_error = True + continue + if "No such file or directory" in c.output: + data.append([apic_name, str(tversion), '-', 'image not found']) + print_result(node_title, FAIL_UF) + continue + + try: + c.cmd("cat /firmware/fwrepos/fwrepo/md5sum/aci-apic-dk9.%s.bin" % + tversion.dot_version) + except Exception as e: + data.append([apic_name, str(tversion), '-', + 'failed to check md5sum via ssh due to:{}'.format(str(e))]) + print_result(node_title, ERROR) + has_error = True + continue + if "No such file or directory" in c.output: + data.append([apic_name, str(tversion), '-', 'md5sum file not found']) + print_result(node_title, FAIL_UF) + continue + for line in c.output.split("\n"): + words = line.split() + if ( + len(words) == 2 and + words[1].startswith("/var/run/mgmt/fwrepos/fwrepo/aci-apic") + ): + md5s.append(words[0]) + md5_names.append(apic_name) + break + else: + data.append([apic_name, str(tversion), '-', 'unexpected output when checking md5sum file']) + print_result(node_title, ERROR) + has_error = True + continue + + print_result(node_title, DONE) + if len(set(md5s)) > 1: + for name, md5 in zip(md5_names, md5s): + data.append([name, str(tversion), md5, 'md5sum do not match on all APICs']) + if has_error: + result = ERROR + elif not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url, adjust_title=True) + + +# Connection Based Check +@check_wrapper(check_title="Standby APIC Disk Space Usage") +def standby_apic_disk_space_check(**kwargs): + result = FAIL_UF + msg = '' + headers = ['SN', 'OOB', 'Mount Point', 'Current Usage %', 'Details'] + data = [] + recommended_action = 'Contact Cisco TAC' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#standby-apic-disk-space-usage" + threshold = 75 # usage (%) + + has_error = False + checked_stby = [] + infraSnNodes = icurl('class', 'infraSnNode.json?query-target-filter=eq(infraSnNode.cntrlSbstState,"approved")') + for stby_apic in infraSnNodes: + stb = stby_apic['infraSnNode']['attributes'] + if stb['addr'] in checked_stby: + continue + checked_stby.append(stb['addr']) + try: + c = Connection(stb['addr']) + c.username = "rescue-user" + c.log = LOG_FILE + c.connect() + except Exception as e: + data.append([stb['mbSn'], stb['oobIpAddr'], '-', '-', str(e)]) + has_error = True + continue + + try: + c.cmd("df -h") + except Exception as e: + data.append([stb['mbSn'], stb['oobIpAddr'], '-', '-', str(e)]) + has_error = True + continue + + for line in c.output.split("\n"): + if "Filesystem" not in line and "df" not in line: + fs_regex = r'([^\s]+) +([^\s]+) +([^\s]+) +([^\s]+) +([^\s]+)%' + fs = re.search(fs_regex, line) + if fs is not None: + directory = fs.group(1) + usage = fs.group(5) + if int(usage) >= threshold: + data.append([stb['mbSn'], stb['oobIpAddr'], directory, usage, '-']) + if not infraSnNodes: + result = NA + msg = 'No standby APIC found' + elif has_error: + result = ERROR + elif not data: + result = PASS + msg = 'all below {}%'.format(threshold) + return Result(result=result, msg=msg, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Remote Leaf Compatibility") +def r_leaf_compatibility_check(tversion, **kwargs): + result = PASS + headers = ['Target Version', 'Remote Leaf', 'Direct Traffic Forwarding'] + data = [] + recommended_action_4_2_2 = 'Upgrade remote leaf nodes before spine nodes or\ndisable Direct Traffic Forwarding (CSCvs16767)' + recommended_action_5a = 'Direct Traffic Forwarding is required on 5.0 or later. Enable the feature before the upgrade' + recommended_action_5b = ('Direct Traffic Forwarding is required on 5.0 or later.\n' + 'Upgrade to 4.1(2)-4.2(x) first to enable the feature before upgrading to 5.0 or later.') + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#compatibility-remote-leaf-switch" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + remote_leafs = icurl('class', 'fabricNode.json?&query-target-filter=eq(fabricNode.nodeType,"remote-leaf-wan")') + if not remote_leafs: + return Result(result=NA, msg="No Remote Leaf Found") + + infraSetPols = icurl('mo', 'uni/infra/settings.json') + direct = infraSetPols[0]['infraSetPol']['attributes'].get('enableRemoteLeafDirect') + direct_enabled = 'Not Supported' + if direct: + direct_enabled = direct == 'yes' + + ra = '' + if tversion.simple_version == "4.2(2)" and direct_enabled is True: + ra = recommended_action_4_2_2 + elif int(tversion.major1) >= 5 and direct_enabled is False: + ra = recommended_action_5a + elif int(tversion.major1) >= 5 and direct_enabled == 'Not Supported': + ra = recommended_action_5b + if ra: + result = FAIL_O + data.append([str(tversion), "Present", direct_enabled]) + return Result(result=result, headers=headers, data=data, recommended_action=ra, doc_url=doc_url) + + +@check_wrapper(check_title="EP Announce Compatibility") +def ep_announce_check(cversion, tversion, **kwargs): + result = PASS + headers = ['Susceptible Defect', 'Recommended Action'] + data = [] + recommended_action = ('For fabrics running a pre-12.2(4p) ACI switch release, ' + 'upgrade to 12.2(4r) and then upgrade to the desired destination release.\n' + 'For fabrics running a 12.3(1) ACI switch release, ' + 'upgrade to 13.1(2v) and then upgrade to the desired destination release.') + + fixed_versions = ["2.2(4p)", "2.2(4q)", "2.2(4r)"] + current_version_affected = False + target_version_affected = False + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.version not in fixed_versions and int(cversion.major1) < 3: + current_version_affected = True + + if tversion.major1 == "3": + if int(tversion.major2) >= 2 and int(tversion.maint) >= 2: + target_version_affected = True + elif int(tversion.major1) >= 4: + target_version_affected = True + + if current_version_affected and target_version_affected: + result = FAIL_O + data.append(['CSCvi76161', recommended_action]) + return Result(result=result, headers=headers, data=data) + + +@check_wrapper(check_title="VMM Domain Controller Status") +def vmm_controller_status_check(**kwargs): + result = PASS + headers = ['VMM Domain', 'vCenter IP or Hostname', 'Current State'] + data = [] + recommended_action = 'Check network connectivity to the vCenter.' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vmm-domain-controller-status" + + vmmDoms = icurl('class', 'compCtrlr.json') + if not vmmDoms: + return Result(result=NA, msg='No VMM Domains Found') + for dom in vmmDoms: + if dom['compCtrlr']['attributes']['operSt'] == "offline": + domName = dom['compCtrlr']['attributes']['domName'] + hostOrIp = dom['compCtrlr']['attributes']['hostOrIp'] + result = FAIL_O + data.append([domName, hostOrIp, "offline"]) + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="VMM Domain LLDP/CDP Adjacency Status") +def vmm_controller_adj_check(**kwargs): + result = PASS + msg = '' + headers = ['VMM Domain', 'Host IP or Hostname'] + data = [] + unformatted_headers = ['Fault', 'Fault DN'] + unformatted_data = [] + recommended_action = 'Ensure consistent use of expected Discovery Protocol from Hypervisor to ACI Leaf.' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vmm-domain-lldpcdp-adjacency-status" + + adjFaults = icurl('class', 'faultInst.json?query-target-filter=eq(faultInst.code,"F606391")') + adj_regex = r'adapters on the host: (?P<host>[^\(]+)' + dom_reg = r'comp\/prov-VMware\/ctrlr-\[(?P<dom>.+)\]' + if not adjFaults: + msg = 'No LLDP/CDP Adjacency Failed Faults Found' + else: + for adj in adjFaults: + if adj['faultInst']['attributes']['severity'] != "cleared": + if "prov-VMware" in adj['faultInst']['attributes']['dn']: + r1 = re.search(adj_regex, adj['faultInst']['attributes']['descr']) + r2 = re.search(dom_reg, adj['faultInst']['attributes']['dn']) + result = FAIL_O + if r1 and r2: + host = r1.group("host") + dom = r2.group("dom") + data.append([dom, host]) + else: + unformatted_data.append([adj['faultInst']['attributes']['code'], adj['faultInst']['attributes']['dn']]) + return Result( + result=result, + msg=msg, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="VPC-paired Leaf switches") +def vpc_paired_switches_check(vpc_node_ids, **kwargs): + result = PASS + headers = ["Node ID", "Node Name"] + data = [] + recommended_action = 'Determine if dataplane redundancy is available if these nodes go down.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vpc-paired-leaf-switches' + + top_system = icurl('class', 'topSystem.json') + for node in top_system: + node_id = node['topSystem']['attributes']['id'] + role = node['topSystem']['attributes']['role'] + if role == 'leaf' and (node_id not in vpc_node_ids): + result = MANUAL + name = node['topSystem']['attributes']['name'] + data.append([node_id, name]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="APIC CIMC Compatibility") +def cimc_compatibilty_check(tversion, **kwargs): + result = FAIL_UF + headers = ["Node ID", "Model", "Current CIMC version", "Catalog Recommended CIMC Version", "Warning"] + data = [] + recommended_action = 'Check Release note of APIC Model/version for latest recommendations.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#compatibility-cimc-version' + + apic_obj = icurl('class', 'eqptCh.json?query-target-filter=wcard(eqptCh.descr,"APIC")') + if apic_obj and tversion: + try: + for eqptCh in apic_obj: + if eqptCh['eqptCh']['attributes']['cimcVersion']: + apic_model = eqptCh['eqptCh']['attributes']['descr'] + model = "apic" + apic_model.split('-')[2].lower() + current_cimc = eqptCh['eqptCh']['attributes']['cimcVersion'] + compat_lookup_dn = "uni/fabric/compcat-default/ctlrfw-apic-" + tversion.simple_version + \ + "/rssuppHw-[uni/fabric/compcat-default/ctlrhw-" + model + "].json" + compatMo = icurl('mo', compat_lookup_dn) + if not compatMo: + msg = "No compatibility information found for {}/{}".format(model, tversion.simple_version) + return Result(result=MANUAL, msg=msg, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + recommended_cimc = compatMo[0]['compatRsSuppHw']['attributes']['cimcVersion'] + warning = "" + if compatMo and recommended_cimc: + if not is_firstver_gt_secondver(current_cimc, "3.0(3a)"): + warning = "Multi-step Upgrade may be required, check UCS CIMC Matrix." + if not is_firstver_gt_secondver(current_cimc, recommended_cimc): + nodeid = eqptCh['eqptCh']['attributes']['dn'].split('/')[2] + data.append([nodeid, apic_model, current_cimc, recommended_cimc, warning]) + + if not data: + result = PASS + + except KeyError: + return Result(result=MANUAL, msg="eqptCh does not have cimcVersion parameter on this version", headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + else: + return Result(result=MANUAL, msg=TVER_MISSING) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +# Subprocess Check - icurl +@check_wrapper(check_title="Intersight Device Connector upgrade status") +def intersight_upgrade_status_check(**kwargs): + result = FAIL_UF + msg = '' + headers = ["Connector Status"] + data = [] + recommended_action = 'Wait a few minutes for the upgrade to complete' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#intersight-device-connector-upgrade-status' + + cmd = ['icurl', '-gks', 'https://127.0.0.1/connector/UpgradeStatus'] + + log.info('cmd = ' + ' '.join(cmd)) + response = subprocess.check_output(cmd) + try: + resp_json = json.loads(response) + + try: + if resp_json[0]['Status'] != 'Idle': + data.append([resp_json[0]['UpgradeNotification']]) + except KeyError: + if resp_json['code'] == 'InternalServerError': + msg = 'Connector reporting InternalServerError, Non-Upgrade issue' + + if not data: + result = PASS + + except ValueError: + result = NA + msg = 'Intersight Device Connector not responding' + + return Result(result=result, msg=msg, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="ISIS Redistribution metric for MPod/MSite") +def isis_redis_metric_mpod_msite_check(**kwargs): + result = FAIL_O + headers = ["ISIS Redistribution Metric", "MPod Deployment", "MSite Deployment"] + data = [] + recommended_action = "" + doc_url = 'http://cs.co/9001zNNr7' # "ISIS Redistribution Metric" from ACI Best Practices Quick Summary + + isis_mo = icurl('mo', 'uni/fabric/isisDomP-default.json') + redistribMetric = isis_mo[0]['isisDomPol']['attributes'].get('redistribMetric') + + msite = False + mpod = False + + if not redistribMetric: + recommended_action = 'Upgrade to 2.2(4f)+ or 3.0(1k)+ to support configurable ISIS Redistribution Metric' + else: + if int(redistribMetric) >= 63: + recommended_action = 'Change ISIS Redistribution Metric to less than 63' + + if recommended_action: + mpod_msite_mo = icurl('class', 'fvFabricExtConnP.json?query-target=children') + if mpod_msite_mo: + pods_list = [] + + for mo in mpod_msite_mo: + if mo.get('fvSiteConnP'): + msite = True + elif mo.get('fvPodConnP'): + podid = mo['fvPodConnP']['attributes'].get('id') + if podid and podid not in pods_list: + pods_list.append(podid) + mpod = (len(pods_list) > 1) + if mpod or msite: + data.append([redistribMetric, mpod, msite]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="BGP route target type for GOLF over L2EVPN") +def bgp_golf_route_target_type_check(cversion, tversion, **kwargs): + result = FAIL_O + headers = ["VRF DN", "Global Name", "Route Target"] + data = [] + recommended_action = "Reconfigure extended: RT with prefix route-target: " + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvm23100' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("4.2(1a)") and tversion.newer_than("4.2(1a)"): + fvctx_mo = icurl('class', 'fvCtx.json?rsp-subtree=full&rsp-subtree-class=l3extGlobalCtxName,bgpRtTarget&rsp-subtree-include=required') + + if fvctx_mo: + for vrf in fvctx_mo: + globalname = '' + vrfdn = vrf['fvCtx']['attributes']['dn'] + for child in vrf['fvCtx']['children']: + if child.get('l3extGlobalCtxName'): + globalname = child['l3extGlobalCtxName']['attributes'].get('name') + if globalname != '': + for child in vrf['fvCtx']['children']: + if child.get('bgpRtTargetP'): + for bgprt in child['bgpRtTargetP']['children']: + if bgprt.get('bgpRtTarget') and not bgprt['bgpRtTarget']['attributes']['rt'].startswith('route-target:'): + data.append([vrfdn, globalname, bgprt['bgpRtTarget']['attributes']['rt']]) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="APIC Container Bridge IP Overlap with APIC TEP") +def docker0_subnet_overlap_check(**kwargs): + result = PASS + headers = ["Container Bridge IP", "APIC TEP"] + data = [] + recommended_action = 'Change the container bridge IP via "Apps > Settings" on the APIC GUI' + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-container-bridge-ip-overlap-with-apic-tep" + + containerPols = icurl('mo', 'pluginPolContr/ContainerPol.json') + if not containerPols: + bip = "172.17.0.1/16" + else: + bip = containerPols[0]["apContainerPol"]["attributes"]["containerBip"] + + teps = [] + infraWiNodes = icurl('class', 'infraWiNode.json') + for infraWiNode in infraWiNodes: + if infraWiNode["infraWiNode"]["attributes"]["addr"] not in teps: + teps.append(infraWiNode["infraWiNode"]["attributes"]["addr"]) + + for tep in teps: + if IPAddress.ip_in_subnet(tep, bip): + result = FAIL_UF + data.append([tep, bip]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Eventmgr DB size defect susceptibility") +def eventmgr_db_defect_check(cversion, **kwargs): + result = PASS + headers = ["Potential Defect", "Doc URL"] + data = [] + recommended_action = 'Contact Cisco TAC to check the DB size via root' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#eventmgr-db-size-defect-susceptibility' + + if cversion.older_than('3.2(5d)') or (cversion.major1 == '4' and cversion.older_than('4.1(1i)')): + data.append(['CSCvn20175', 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvn20175']) + if cversion.older_than('4.2(4i)') or (cversion.major1 == '5' and cversion.older_than('5.0(1k)')): + data.append(['CSCvt07565', 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvt07565']) + + if data: + result = FAIL_UF + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Target version compatibility") +def target_version_compatibility_check(cversion, tversion, **kwargs): + result = FAIL_UF + headers = ["Current version", "Target Version", "Warning"] + data = [] + recommended_action = '' + doc_url = 'APIC Upgrade/Downgrade Support Matrix - http://cs.co/9005ydMQP' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if cversion.simple_version != tversion.simple_version: + compatRsUpgRelString = "uni/fabric/compcat-default/ctlrfw-apic-" + cversion.simple_version + \ + "/rsupgRel-[uni/fabric/compcat-default/ctlrfw-apic-" + tversion.simple_version + "].json" + compatRsUpgRel = icurl('mo', compatRsUpgRelString) + if not compatRsUpgRel: + compatRtUpgRelString = "uni/fabric/compcat-default/ctlrfw-apic-" + cversion.simple_version + \ + "/rtupgRel-[uni/fabric/compcat-default/ctlrfw-apic-" + tversion.simple_version + "].json" + compatRtUpgRel = icurl('mo', compatRtUpgRelString) + if not compatRtUpgRel: + data.append([str(cversion), str(tversion), 'Target version not a supported hop']) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Gen 1 switch compatibility") +def gen1_switch_compatibility_check(tversion, **kwargs): + result = FAIL_UF + headers = ["Target Version", "Node ID", "Model", "Warning"] + gen1_models = ["N9K-C9336PQ", "N9K-X9736PQ", "N9K-C9504-FM", "N9K-C9508-FM", "N9K-C9516-FM", "N9K-C9372PX-E", + "N9K-C9372TX-E", "N9K-C9332PQ", "N9K-C9372PX", "N9K-C9372TX", "N9K-C9396PX", "N9K-C9396TX", + "N9K-C93128TX"] + data = [] + recommended_action = 'Select supported target version or upgrade hardware' + doc_url = 'http://cs.co/9001ydKCV' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if tversion.newer_than("5.0(1a)"): + fabric_node = icurl('class', 'fabricNode.json') + for node in fabric_node: + if node['fabricNode']['attributes']['model'] in gen1_models: + data.append([str(tversion), node['fabricNode']['attributes']['id'], + node['fabricNode']['attributes']['model'], 'Not supported on 5.x+']) + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Contract Port 22 Defect") +def contract_22_defect_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Potential Defect", "Reason"] + data = [] + recommended_action = 'Review Cisco Software Advisory Notices for CSCvz65560' + doc_url = 'http://cs.co/9007yh22H' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("5.0(1a)") and (tversion.newer_than("5.0(1a)") and + tversion.older_than("5.2(2g)")): + result = FAIL_O + data.append(["CSCvz65560", "Target Version susceptible to Defect"]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Link Level Flow Control") +def llfc_susceptibility_check(cversion, tversion, vpc_node_ids, **kwargs): + result = PASS + headers = ["Pod", "NodeId", "Int", "Type", "BugId", "Warning"] + data = [] + sx_affected = t_affected = False + recommended_action = 'Manually change Peer devices Transmit(send) Flow Control to off prior to switch Upgrade' + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvo27498' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if not vpc_node_ids: + return Result(result=PASS, msg="No VPC Nodes found. Not susceptible.") + + # Check for Fiber 1000base-SX, CSCvv33100 + if cversion.older_than("4.2(6d)") and tversion.newer_than("4.2(6c)"): + sx_affected = True + + # Check for Copper 1000base-T, CSCvj67507 fixed by CSCwd37387 + if cversion.older_than("4.1(1i)") and tversion.newer_than("4.1(1h)") and tversion.older_than("5.2(7f)"): + t_affected = True + + if sx_affected or t_affected: + ethpmFcot = icurl('class', 'ethpmFcot.json?query-target-filter=and(eq(ethpmFcot.type,"sfp"),eq(ethpmFcot.state,"inserted"))') + + for fcot in ethpmFcot: + typeName = fcot['ethpmFcot']['attributes']['typeName'] + dn = fcot['ethpmFcot']['attributes']['dn'] + + m = re.match(r'topology/pod-(?P<podid>\d+)/node-(?P<nodeid>\d+)/.+/phys-\[(?P<int>eth\d/\d+)\]', dn) + podid = m.group('podid') + nodeid = m.group('nodeid') + int = m.group('int') + + if sx_affected and typeName == "1000base-SX": + data.append([podid, nodeid, int, typeName, 'CSCvv33100', 'Check Peer Device LLFC behavior']) + + if t_affected and typeName == "1000base-T": + data.append([podid, nodeid, int, typeName, 'CSCwd37387', 'Check Peer Device LLFC behavior']) + + if data: + result = MANUAL + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="telemetryStatsServerP Object") +def telemetryStatsServerP_object_check(sw_cversion, tversion, **kwargs): + result = PASS + headers = ["Current version", "Target Version", "Warning"] + data = [] + recommended_action = 'Change telemetryStatsServerP.collectorLocation to "none" prior to upgrade' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#telemetrystatserverp-object' + + if not sw_cversion: + return Result(result=MANUAL, msg="Current switch version not found. Check switch health.") + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if sw_cversion.older_than("4.2(4d)") and tversion.newer_than("5.2(2d)"): + telemetryStatsServerP_json = icurl('class', 'telemetryStatsServerP.json') + for serverp in telemetryStatsServerP_json: + if serverp["telemetryStatsServerP"]["attributes"].get("collectorLocation") == "apic": + result = FAIL_O + data.append([str(sw_cversion), str(tversion), 'telemetryStatsServerP.collectorLocation = "apic" Found']) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Internal VLAN Pool") +def internal_vlanpool_check(tversion, **kwargs): + result = PASS + headers = ["VLAN Pool", "Internal VLAN Block(s)", "Non-AVE Domain", "Warning"] + data = [] + recommended_action = 'Ensure Leaf Front-Panel VLAN Blocks are explicitly set to "external (on the wire)"' + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvw33061' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("4.2(6a)"): + fvnsVlanInstP_json = icurl('class', 'fvnsVlanInstP.json?rsp-subtree=children&rsp-subtree-class=fvnsRtVlanNs,fvnsEncapBlk&rsp-subtree-include=required') + # Dict with key = vlan pool name, values = list of associated domains + dom_rel = {} + # List of vlanInstP which contain fvnsEncapBlk.role = "internal" + encap_list = [] + encap_blk_dict = {} + for vlanInstP in fvnsVlanInstP_json: + encap_blk_list = [] + vlanInstP_name = vlanInstP['fvnsVlanInstP']["attributes"]["name"] + dom_list = [] + for vlan_child in vlanInstP['fvnsVlanInstP']['children']: + if vlan_child.get('fvnsRtVlanNs'): + dom_list.append({"dn": vlan_child['fvnsRtVlanNs']['attributes']['tDn'], "tCl": vlan_child['fvnsRtVlanNs']['attributes']['tCl']}) + elif vlan_child.get('fvnsEncapBlk'): + if vlan_child['fvnsEncapBlk']['attributes']['role'] == "internal": + encap_list.append(vlanInstP_name) + encap_blk_list.append(vlan_child['fvnsEncapBlk']['attributes']['rn']) + dom_rel[vlanInstP_name] = dom_list + if encap_blk_list != []: + encap_blk_dict[vlanInstP_name] = encap_blk_list + if len(encap_list) > 0: + # Check if internal vlan pool is associated to a domain which isnt AVE + # List of domains which are associated to a vlan pool that contains an encap block with role "internal" + assoc_doms = [] + for vlanInstP_name in encap_list: + for dom in dom_rel[vlanInstP_name]: + if dom["tCl"] != "vmmDomP": + result = FAIL_O + # Deduplicate results for multiple encap blks and/or multiple domains + if [vlanInstP_name, ', '.join(encap_blk_dict[vlanInstP_name]), dom["dn"], 'VLANs in this Block will be removed from switch Front-Panel if not corrected'] not in data: + data.append([vlanInstP_name, ', '.join(encap_blk_dict[vlanInstP_name]), dom["dn"], 'VLANs in this Block will be removed from switch Front-Panel if not corrected']) + assoc_doms.append(dom["dn"]) + vmmDomP_json = icurl('class', 'vmmDomP.json') + for vmmDomP in vmmDomP_json: + if vmmDomP["vmmDomP"]["attributes"]["dn"] in assoc_doms: + if vmmDomP["vmmDomP"]["attributes"]["enableAVE"] != "yes": + result = FAIL_O + # For each non-AVE vmm domain, check if vmm dom is associated to an internal pool + for vlanInstP_name in encap_list: + for dom in dom_rel[vlanInstP_name]: + if vmmDomP["vmmDomP"]["attributes"]["dn"] == dom["dn"]: + # Deduplicate results for multiple encap blks and/or multiple domains + if [vlanInstP_name, ', '.join(encap_blk_dict[vlanInstP_name]), vmmDomP["vmmDomP"]["attributes"]["dn"], 'VLANs in this Block will be removed from switch Front-Panel if not corrected'] not in data: + data.append([vlanInstP_name, ', '.join(encap_blk_dict[vlanInstP_name]), vmmDomP["vmmDomP"]["attributes"]["dn"], 'VLANs in this Block will be removed from switch Front-Panel if not corrected']) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +# Subprocess check - openssl +@check_wrapper(check_title="APIC CA Cert Validation") +def apic_ca_cert_validation(**kwargs): + result = FAIL_O + headers = ["Certreq Response"] + data = [] + recommended_action = "Contact Cisco TAC to fix APIC CA Certs" + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCvy35257' + + certreq_out = kwargs.get("certreq_out") + if not certreq_out: + pki_fabric_ca_mo = icurl('class', 'pkiFabricSelfCAEp.json') + if pki_fabric_ca_mo: + # Prep csr + passphrase = pki_fabric_ca_mo[0]['pkiFabricSelfCAEp']['attributes']['currCertReqPassphrase'] + cert_gen_filename = "preupgrade_gen.cnf" + key_pem = 'preupgrade_temp.key.pem' + csr_pem = 'preupgrade_temp.csr.pem' + sign = 'preupgrade_temp.sign' + cert_gen_cnf = ''' + [ req ] + default_bits = 2048 + distinguished_name = req_distinguished_name + string_mask = utf8only + default_md = sha512 + prompt = no + + [ req_distinguished_name ] + commonName = aci_pre_upgrade + ''' + # Re-run cleanup for Issue #120 + if os.path.exists(cert_gen_filename): + os.remove(cert_gen_filename) + if os.path.exists(key_pem): + os.remove(key_pem) + if os.path.exists(csr_pem): + os.remove(csr_pem) + if os.path.exists(sign): + os.remove(sign) + + with open(cert_gen_filename, 'w') as f: + f.write(cert_gen_cnf) + + # Generate csr for certreq + cmd = 'openssl genrsa -out ' + key_pem + ' 2048' + cmd = cmd + ' && openssl req -config ' + cert_gen_filename + ' -new -key ' + key_pem + ' -out ' + csr_pem + cmd = cmd + ' && openssl dgst -sha256 -hmac ' + passphrase + ' -out ' + sign + ' ' + csr_pem + log.debug('cmd = '+''.join(cmd)) + genrsa_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + genrsa_proc.communicate()[0].strip() + if genrsa_proc.returncode != 0: + return Result(result=ERROR, msg="openssl cmd issue, send logs to TAC.") + + # Prep certreq + with open(sign) as f: + hmac = f.read().strip().split(' ')[-1] + with open(csr_pem) as f: + certreq = f.read().strip() + + # file cleanup + subprocess.check_output(['rm', '-rf', sign, csr_pem, key_pem, cert_gen_filename]) + + # Perform test certreq + url = 'https://127.0.0.1/raca/certreq.json' + payload = '{"aaaCertGenReq":{"attributes":{"type":"csvc","hmac":"%s", "certreq": "%s", ' \ + '"podip": "None", "podmac": "None", "podname": "None"}}}' % (hmac, certreq) + cmd = 'icurl -kX POST %s -d \' %s \'' % (url, payload) + log.debug('cmd = ' + ''.join(cmd)) + certreq_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + certreq_out = certreq_proc.communicate()[0].strip() + + log.debug(certreq_out) + if '"error":{"attributes"' in str(certreq_out): + # Spines can crash on 5.2(6e)+, but APIC CA Certs should be fixed regardless of tver + data.append([certreq_out]) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="FabricDomain Name") +def fabricdomain_name_check(cversion, tversion, **kwargs): + result = FAIL_O + headers = ["FabricDomain", "Reason"] + data = [] + recommended_action = "Do not upgrade to 6.0(2)" + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCwf80352' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.same_as("6.0(2h)"): + controller = icurl('class', 'topSystem.json?query-target-filter=eq(topSystem.role,"controller")') + if not controller: + return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') + + fabricDomain = controller[0]['topSystem']['attributes']['fabricDomain'] + if re.search(r'#|;', fabricDomain): + data.append([fabricDomain, "Contains a special character"]) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Spine SUP HW Revision") +def sup_hwrev_check(cversion, tversion, **kwargs): + result = FAIL_O + headers = ["Pod", "Node", "Sup Slot", "Part Number", "VRM Concern", "FPGA Concern"] + data = [] + recommended_action = "Review Field Notice FN74050 within Reference Document for all details." + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#spine-sup-hw-revision' + vrm_concern = False + fpga_concern = False + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("5.2(8f)"): + vrm_concern = True + recommended_action += "\n\tFor VRM Concern: Consider vrm_update script within FN74050" + + if ( + cversion.newer_than("5.2(1a)") and cversion.older_than("6.0(1a)") + and tversion.older_than("5.2(8f)") or (tversion.major1 == "6" and tversion.older_than("6.0(3d)")) + ): + fpga_concern = True + recommended_action += "\n\tFor FPGA Concern: Consider a target version with fix for CSCwb86706" + + if vrm_concern or fpga_concern: + sup_re = r'/.+(?P<supslot>supslot-\d+)' + sups = icurl('class', 'eqptSpCmnBlk.json?&query-target-filter=wcard(eqptSpromSupBlk.dn,"sup")') + if not sups: + return Result(result=ERROR, msg='No sups found. This is unlikely.') + + for sup in sups: + prtNum = sup['eqptSpCmnBlk']['attributes']['prtNum'] + if prtNum in ['73-18562-02', '73-18562-03', '73-18570-02', '73-18570-03']: + dn = re.search(node_regex+sup_re, sup['eqptSpCmnBlk']['attributes']['dn']) + pod_id = dn.group("pod") + node_id = dn.group("node") + supslot = dn.group("supslot") + data.append([pod_id, node_id, supslot, prtNum, vrm_concern, fpga_concern]) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Per-Leaf Fabric Uplink Limit") +def uplink_limit_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Node", "Uplink Count"] + data = [] + recommended_action = "Reduce Per-Leaf Port Profile Uplinks to supported scale; 56 or less." + doc_url = 'http://cs.co/ACI_Access_Interfaces_Config_Guide' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("6.0(1a)") and tversion.newer_than("6.0(1a)"): + port_profiles = icurl('class', 'eqptPortP.json?query-target-filter=eq(eqptPortP.ctrl,"uplink")') + if len(port_profiles) > 56: + node_count = {} + for pp in port_profiles: + dn = re.search(node_regex, pp['eqptPortP']['attributes']['dn']) + node_id = dn.group("node") + node_count.setdefault(node_id, 0) + node_count[node_id] += 1 + + for node, count in node_count.items(): + if count > 56: + data.append([node, count]) + + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="OoB Mgmt Security") +def oob_mgmt_security_check(cversion, tversion, **kwargs): + """Implementation change due to CSCvx29282/CSCvz96117""" + result = PASS + headers = ["ACI Node EPG", "External Instance (Subnets)", "OoB Contracts"] + data = [] + recommended_action = ( + "\n\tEnsure that ICMP, SSH and HTTPS access are allowed for the required subnets with the above config." + "\n\tOtherwise, APIC access will be limited to the above subnets and the same subnet as APIC OoB after the upgrade." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#oob-mgmt-security" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + affected_versions = ["4.2(7)", "5.2(1)", "5.2(2)"] + if cversion.simple_version not in affected_versions or ( + cversion.simple_version in affected_versions + and tversion.simple_version in affected_versions + ): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + # ACI Node EPGs (providers) + mgmtOoBs = icurl("class", "mgmtOoB.json?rsp-subtree=children") + # External Instant Profiles (consumers) + mgmtInstPs = icurl("class", "mgmtInstP.json?rsp-subtree=children") + + contract_to_providers = defaultdict(list) + for mgmtOoB in mgmtOoBs: + for child in mgmtOoB["mgmtOoB"].get("children", []): + if child.get("mgmtRsOoBProv"): + epg_name = mgmtOoB["mgmtOoB"]["attributes"]["name"] + contract_name = child["mgmtRsOoBProv"]["attributes"]["tnVzOOBBrCPName"] + contract_to_providers[contract_name].append(epg_name) + + for mgmtInstP in mgmtInstPs: + consumer = mgmtInstP["mgmtInstP"]["attributes"]["name"] + providers = defaultdict(list) + subnets = [] + for child in mgmtInstP["mgmtInstP"].get("children", []): + if child.get("mgmtRsOoBCons"): + contract = child["mgmtRsOoBCons"]["attributes"]["tnVzOOBBrCPName"] + for prov in contract_to_providers.get(contract, []): + providers[prov].append(contract) + elif child.get("mgmtSubnet"): + subnets.append(child["mgmtSubnet"]["attributes"]["ip"]) + + if not subnets or not providers: + continue + + for provider, contracts in providers.items(): + data.append([ + provider, + "{} ({})".format(consumer, ", ".join(subnets)), + ", ".join(contracts) + ]) + + if data: + result = MANUAL + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Mini ACI Upgrade to 6.0(2)+") +def mini_aci_6_0_2_check(cversion, tversion, **kwargs): + result = FAIL_UF + headers = ["Pod ID", "Node ID", "APIC Type", "Failure Reason"] + data = [] + recommended_action = "All virtual APICs must be removed from the cluster prior to upgrading to 6.0(2)+." + doc_url = 'Upgrading Mini ACI - http://cs.co/9009bBTQB' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("6.0(2a)") and tversion.newer_than("6.0(2a)"): + topSystem = icurl('class', 'topSystem.json?query-target-filter=wcard(topSystem.role,"controller")') + if not topSystem: + return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') + for controller in topSystem: + if controller['topSystem']['attributes']['nodeType'] == "virtual": + pod_id = controller["topSystem"]["attributes"]["podId"] + node_id = controller['topSystem']['attributes']['id'] + data.append([pod_id, node_id, "virtual", "Virtual APIC must be removed prior to upgrade to 6.0(2)+"]) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="SUP-A/A+ High Memory Usage") +def sup_a_high_memory_check(tversion, **kwargs): + result = PASS + headers = ["Pod ID", "Node ID", "SUP Model", "Active/Standby"] + data = [] + recommended_action = "Change the target version to the one with memory optimization in a near-future 6.0 release." + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#sup-aa-high-memory-usage" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + affected_versions = ["6.0(3)", "6.0(4)", "6.0(5)"] + if tversion.simple_version in affected_versions: + eqptSupCs = icurl("class", "eqptSupC.json") + for eqptSupC in eqptSupCs: + model = eqptSupC["eqptSupC"]["attributes"]["model"] + if model in ["N9K-SUP-A", "N9K-SUP-A+"]: + dn = re.search(node_regex, eqptSupC["eqptSupC"]["attributes"]["dn"]) + pod_id = dn.group("pod") + node_id = dn.group("node") + act_stb = eqptSupC["eqptSupC"]["attributes"]["rdSt"] + data.append([pod_id, node_id, model, act_stb]) + + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Access (Untagged) Port Config (F0467 native-or-untagged-encap-failure)") +def access_untagged_check(**kwargs): + result = FAIL_O + headers = ["Fault", "POD ID", "Node ID", "Port", "Tenant", "Application Profile", "Application EPG", "Recommended Action"] + unformatted_headers = ['Fault', 'Fault Description', 'Recommended Action'] + unformatted_data = [] + data = [] + recommended_action = 'Resolve the conflict by removing this config or other configs using this port in Access(untagged) or native mode.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#access-untagged-port-config' + + faultInsts = icurl('class', 'faultInst.json?&query-target-filter=wcard(faultInst.changeSet,"native-or-untagged-encap-failure")') + fault_dn_regex = r"topology/pod-(?P<podid>\d+)/node-(?P<nodeid>[^/]+)/[^/]+/[^/]+/uni/epp/fv-\[uni/tn-(?P<tenant>[^/]+)/ap-(?P<app_profile>[^/]+)/epg-(?P<epg_name>[^/]+)\]/[^/]+/stpathatt-\[(?P<port>.+)\]/nwissues/fault-F0467" + + if faultInsts: + fc = faultInsts[0]['faultInst']['attributes']['code'] + for faultInst in faultInsts: + m = re.search(fault_dn_regex, faultInst['faultInst']['attributes']['dn']) + if m: + podid = m.group('podid') + nodeid = m.group('nodeid') + port = m.group('port') + tenant = m.group('tenant') + app_profile = m.group('app_profile') + epg_name = m.group('epg_name') + data.append([fc, podid, nodeid, port, tenant, app_profile, epg_name, recommended_action]) + else: + unformatted_data.append(fc, faultInst['faultInst']['attributes']['descr'], recommended_action) + + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title="Post Upgrade Callback Integrity") +def post_upgrade_cb_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Missed Objects", "Impact"] + data = [] + recommended_action = 'Contact Cisco TAC with Output' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#post-upgrade-callback-integrity' + + new_mo_dict = { + "infraImplicitSetPol": { + "CreatedBy": "", + "SinceVersion": ["3.2(10e)"], + "Impact": "Infra implicit settings will not be deployed", + }, + "infraRsToImplicitSetPol": { + "CreatedBy": "infraImplicitSetPol", + "SinceVersion": ["3.2(10e)"], + "Impact": "Infra implicit settings will not be deployed", + }, + "fvSlaDef": { + "CreatedBy": "fvIPSLAMonitoringPol", + "SinceVersion": ["4.1(1i)"], + "Impact": "IPSLA monitor policy will not be deployed", + }, + "infraRsConnectivityProfileOpt": { + "CreatedBy": "infraRsConnectivityProfile", + "SinceVersion": ["5.2(4d)"], + "Impact": "VPC for missing Mo will not be deployed to leaf", + }, + "infraAssocEncapInstDef": { + "CreatedBy": "infraRsToEncapInstDef", + "SinceVersion": ["5.2(4d)"], + "Impact": "VLAN for missing Mo will not be deployed to leaf", + }, + "infraRsToInterfacePolProfileOpt": { + "CreatedBy": "infraRsToInterfacePolProfile", + "SinceVersion": ["5.2(8d)", "6.0(3d)"], + "Impact": "VLAN for missing Mo will not be deployed to leaf", + }, + "compatSwitchHw": { + "CreatedBy": "", # suppBit attribute is available from 6.0(2h) + "SinceVersion": ["6.0(2h)"], + "Impact": "Unexpected 64/32 bit image can deploy to switches", + }, + } + if not tversion or (tversion and cversion.older_than(str(tversion))): + return Result(result=POST, msg="Re-run script after APICs are upgraded and back to Fully-Fit") + + for new_mo in new_mo_dict: + skip_current_mo = False + if cversion.older_than(new_mo_dict[new_mo]['SinceVersion'][0]): + continue + for version in new_mo_dict[new_mo]['SinceVersion']: + if version[0] == str(cversion)[0]: + if AciVersion(version).newer_than(str(cversion)): + skip_current_mo = True + if skip_current_mo: + continue + created_by_mo = new_mo_dict[new_mo]['CreatedBy'] + api = "{}.json?rsp-subtree-include=count" + if new_mo == "compatSwitchHw": + # Expected to see suppBit in 32 or 64. Zero 32 means a failed postUpgradeCb. + api += '&query-target-filter=eq(compatSwitchHw.suppBit,"32")' + + temp_new_mo_count = icurl("class", api.format(new_mo)) + new_mo_count = int(temp_new_mo_count[0]['moCount']['attributes']['count']) + if created_by_mo == "": + if new_mo_count == 0: + data.append([new_mo, new_mo_dict[new_mo]["Impact"]]) + else: + temp_createdby_mo_count = icurl('class', api.format(created_by_mo)) + created_by_mo_count = int(temp_createdby_mo_count[0]['moCount']['attributes']['count']) + if created_by_mo_count != new_mo_count: + data.append([new_mo, new_mo_dict[new_mo]["Impact"]]) + + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="EECDH SSL Cipher") +def eecdh_cipher_check(cversion, **kwargs): + result = FAIL_UF + headers = ["DN", "Cipher", "State", "Failure Reason"] + data = [] + recommended_action = "Re-enable EECDH key exchange prior to APIC upgrade." + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#eecdh-ssl-cipher' + + if cversion.newer_than("4.2(1a)"): + commCipher = icurl('class', 'commCipher.json') + for cipher in commCipher: + if cipher['commCipher']['attributes']['id'] == "EECDH" and cipher['commCipher']['attributes']['state'] == "disabled": + data.append([cipher['commCipher']['attributes']['dn'], "EECDH", "disabled", "Secure key exchange is disabled which may cause APIC GUI to be down after upgrade."]) + + if not data: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="fvUplinkOrderCont with blank active uplinks definition") +def vmm_active_uplinks_check(**kwargs): + result = PASS + headers = ["Tenant", "Application Profile", "Application EPG", "VMM Domain"] + data = [] + recommended_action = 'Identify Active Uplinks and apply this to the VMM domain association of each EPG' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#vmm-uplink-container-with-empty-actives' + + uplink_api = 'fvUplinkOrderCont.json' + uplink_api += '?query-target-filter=eq(fvUplinkOrderCont.active,"")' + vmm_epg_regex = r"uni/tn-(?P<tenant>[^/]+)/ap-(?P<ap>[^/]+)/epg-(?P<epg>[^/]+)/rsdomAtt-\[uni/vmmp-.+/dom-(?P<dom>.+)\]" + + try: + affected_uplinks = icurl('class', uplink_api) + except OldVerClassNotFound: + # Pre 4.x did not have this class + return Result(result=NA, msg="cversion does not have class fvUplinkOrderCont") + + if affected_uplinks: + result = FAIL_O + for uplink in affected_uplinks: + dn = re.search(vmm_epg_regex, uplink['fvUplinkOrderCont']['attributes']['dn']) + data.append([dn.group("tenant"), dn.group("ap"), dn.group("epg"), dn.group("dom")]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Fabric Port Status (F1394 ethpm-if-port-down-fabric)") +def fabric_port_down_check(**kwargs): + result = FAIL_O + headers = ["Pod", "Node", "Int", "Reason", "Lifecycle"] + unformatted_headers = ['dn', 'Fault Description', 'Lifecycle'] + unformatted_data = [] + data = [] + recommended_action = 'Identify if these ports are needed for redundancy and reason for being down' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#fabric-port-status' + + fault_api = 'faultInst.json' + fault_api += '?&query-target-filter=and(eq(faultInst.code,"F1394")' + fault_api += ',eq(faultInst.rule,"ethpm-if-port-down-fabric"))' + + faultInsts = icurl('class', fault_api) + dn_re = node_regex + r'/.+/phys-\[(?P<int>eth\d/\d+)\]' + + for faultInst in faultInsts: + m = re.search(dn_re, faultInst['faultInst']['attributes']['dn']) + if m: + podid = m.group('pod') + nodeid = m.group('node') + port = m.group('int') + reason = faultInst['faultInst']['attributes']['descr'].split("reason:")[1] + lc = faultInst['faultInst']['attributes']['lc'] + data.append([podid, nodeid, port, reason, lc]) + else: + unformatted_data.append([faultInst['faultInst']['attributes']['dn'], faultInst['faultInst']['attributes']['descr'], faultInst['faultInst']['attributes']['lc']]) + + if not data and not unformatted_data: + result = PASS + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title='CoS 3 with Dynamic Packet Prioritization') +def fabric_dpp_check(tversion, **kwargs): + result = PASS + headers = ["Potential Defect", "Reason"] + data = [] + recommended_action = 'Change the target version to the fixed version of CSCwf05073' + doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCwf05073' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + lbpol_api = 'lbpPol.json' + lbpol_api += '?query-target-filter=eq(lbpPol.pri,"on")' + + lbpPol = icurl('class', lbpol_api) + if lbpPol: + if ( + (tversion.newer_than("5.1(1h)") and tversion.older_than("5.2(8e)")) or + (tversion.major1 == "6" and tversion.older_than("6.0(3d)")) + ): + result = FAIL_O + data.append(["CSCwf05073", "Target Version susceptible to Defect"]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='N9K-C93108TC-FX3P/FX3H Interface Down') +def n9k_c93108tc_fx3p_interface_down_check(tversion, **kwargs): + result = PASS + headers = ["Node ID", "Node Name", "Product ID"] + data = [] + recommended_action = 'Change the target version to the fixed version of CSCwh81430' + doc_url = 'https://www.cisco.com/c/en/us/support/docs/field-notices/740/fn74085.html' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if ( + tversion.older_than("5.2(8h)") + or tversion.same_as("5.3(1d)") + or (tversion.major1 == "6" and tversion.older_than("6.0(4a)")) + ): + api = 'fabricNode.json' + api += '?query-target-filter=or(' + api += 'eq(fabricNode.model,"N9K-C93108TC-FX3P"),' + api += 'eq(fabricNode.model,"N9K-C93108TC-FX3H"))' + nodes = icurl('class', api) + for node in nodes: + nodeid = node["fabricNode"]["attributes"]["id"] + name = node["fabricNode"]["attributes"]["name"] + pid = node["fabricNode"]["attributes"]["model"] + data.append([nodeid, name, pid]) + + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='BD and EPG Subnet Scope Consistency') +def subnet_scope_check(cversion, **kwargs): + result = PASS + headers = ["BD DN", "BD Scope", "EPG DN", "EPG Scope"] + data = [] + recommended_action = 'Configure the same Scope for the identified subnet pairings' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#bd-and-epg-subnet-scope-consistency' + + if cversion.older_than("4.2(6d)") or (cversion.major1 == "5" and cversion.older_than("5.1(1h)")): + epg_api = 'fvAEPg.json?' + epg_api += 'rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required' + + fvAEPg = icurl('class', epg_api) + if not fvAEPg: + return Result(result=NA, msg="No EPG Subnets found. Skipping.") + + bd_api = 'fvBD.json' + bd_api += '?rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required' + + fvBD = icurl('class', bd_api) + fvRsBd = icurl('class', 'fvRsBd.json') + + epg_to_subnets = {} + # EPG subnets *tend* to be fewer, build out lookup dict by EPG first + # {"epg_dn": {subnet1: scope, subnet2: scope},...} + for epg in fvAEPg: + subnet_scopes = {} + for subnet in epg['fvAEPg']['children']: + subnet_scopes[subnet["fvSubnet"]["attributes"]["ip"]] = subnet["fvSubnet"]["attributes"]["scope"] + epg_to_subnets[epg['fvAEPg']['attributes']['dn']] = subnet_scopes + + bd_to_epg = {} + # Build out BD to epg lookup, if EPG has a subnet (entry in epg_to_subnets) + # {bd_tdn: [epg1, epg2, epg3...]} + for reln in fvRsBd: + epg_dn = reln["fvRsBd"]["attributes"]["dn"].replace('/rsbd', '') + bd_tdn = reln["fvRsBd"]["attributes"]["tDn"] + if epg_to_subnets.get(epg_dn): + bd_to_epg.setdefault(bd_tdn, []).append(epg_dn) + + # walk through BDs and lookup EPG subnets to check scope + for bd in fvBD: + bd_dn = bd["fvBD"]["attributes"]["dn"] + epgs_to_check = bd_to_epg.get(bd_dn) + if epgs_to_check: + for fvSubnet in bd['fvBD']['children']: + bd_subnet = fvSubnet["fvSubnet"]["attributes"]["ip"] + bd_scope = fvSubnet["fvSubnet"]["attributes"]["scope"] + for epg_dn in epgs_to_check: + epg_scope = epg_to_subnets[epg_dn].get(bd_subnet) + if bd_scope != epg_scope: + data.append([bd_dn, bd_scope, epg_dn, epg_scope]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Route-map Community Match Defect') +def rtmap_comm_match_defect_check(tversion, **kwargs): + result = PASS + headers = ["Route-map DN", "Route-map Match DN", "Failure Reason"] + data = [] + recommended_action = 'Add a prefix list match to each route-map prior to upgrading.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#route-map-community-match' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if (tversion.major1 == "5" and tversion.major2 == "2" and tversion.older_than("5.2(8a)")): + rtctrlSubjPs = icurl('class', 'rtctrlSubjP.json?rsp-subtree=full&rsp-subtree-class=rtctrlMatchCommFactor,rtctrlMatchRtDest&rsp-subtree-include=required') + if rtctrlSubjPs: + subj_dn_list = [] + for rtctrlSubjP in rtctrlSubjPs: + has_comm = False + has_dest = False + dn = rtctrlSubjP['rtctrlSubjP']['attributes']['dn'] + for child in rtctrlSubjP['rtctrlSubjP']['children']: + if child.get("rtctrlMatchCommTerm"): + has_comm = True + elif child.get("rtctrlMatchRtDest"): + has_dest = True + if has_comm and not has_dest: + subj_dn_list.append(dn) + + # Now check if affected match statement is in use by any route-map + if len(subj_dn_list) > 0: + rtctrlCtxPs = icurl('class', 'rtctrlCtxP.json?rsp-subtree=full&rsp-subtree-class=rtctrlRsCtxPToSubjP,rtctrlRsScopeToAttrP&rsp-subtree-include=required') + if rtctrlCtxPs: + for rtctrlCtxP in rtctrlCtxPs: + has_affected_subj = False + has_set = False + for child in rtctrlCtxP['rtctrlCtxP']['children']: + if child.get("rtctrlRsCtxPToSubjP") and child['rtctrlRsCtxPToSubjP']['attributes']['tDn'] in subj_dn_list: + has_affected_subj = True + subj_dn = child['rtctrlRsCtxPToSubjP']['attributes']['tDn'] + if child.get("rtctrlScope"): + for subchild in child['rtctrlScope']['children']: + if subchild.get("rtctrlRsScopeToAttrP"): + has_set = True + + if has_affected_subj and has_set: + dn = rtctrlCtxP['rtctrlCtxP']['attributes']['dn'] + parent_dn = '/'.join(dn.rsplit('/', 1)[:-1]) + data.append([parent_dn, subj_dn, "Route-map has community match statement but no prefix list."]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Invalid fabricPathEp Targets') +def fabricPathEp_target_check(**kwargs): + result = PASS + headers = ["Invalid DN", "Reason"] + data = [] + recommended_action = 'Contact TAC for cleanup procedure' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#invalid-fex-fabricpathep-dn-references' + fabricPathEp_regex = r"topology/pod-\d+/(?:\w+)?paths-\d+(?:-\d+)?(?:/ext(?:\w+)?paths-(?P<fexA>\d+)(?:-(?P<fexB>\d+))?)?/pathep-\[(?P<path>.+)\]" + eth_regex = r'eth(?P<first>\d+)/(?P<second>\d+)(?:/(?P<third>\d+))?' + + hpath_api = 'infraRsHPathAtt.json' + oosPorts_api = 'fabricRsOosPath.json' + infraRsHPathAtt = icurl('class', hpath_api) + fabricRsOosPath = icurl('class', oosPorts_api) + + all_objects = infraRsHPathAtt + fabricRsOosPath + for obj in all_objects: + dn = obj.get('infraRsHPathAtt', {}).get('attributes', {}).get('dn', '') or obj.get('fabricRsOosPath', {}).get('attributes', {}).get('dn', '') + tDn = obj.get('infraRsHPathAtt', {}).get('attributes', {}).get('tDn', '') or obj.get('fabricRsOosPath', {}).get('attributes', {}).get('tDn', '') + + # CHECK ensure tDn looks like a valid fabricPathEp + fabricPathep_match = re.search(fabricPathEp_regex, tDn) + if fabricPathep_match: + groups = fabricPathep_match.groupdict() + fex_a = groups.get("fexA") + fex_b = groups.get("fexB") + path = groups.get("path") + + # CHECK FEX ID(s) of extpath(s) is 101 or greater + if fex_a: + if int(fex_a) < 101: + data.append([dn, "FEX ID A {} is invalid (101+ expected)".format(fex_a)]) + if fex_b: + if int(fex_b) < 101: + data.append([dn, "FEX ID B {} is invalid (101+ expected)".format(fex_b)]) + + # There should always be path... so will assume we always have it + if 'eth' in path.lower(): + # CHECK path has proper ethx/y or ethx/y/z formatting + eth_match = re.search(eth_regex, path) + if eth_match: + groups = eth_match.groupdict() + first = groups.get("first") + second = groups.get("second") + third = groups.get("third") + + # CHECK eth looks like FEX (FIRST is 101 or greater) + if first: + if int(first) > 100: + data.append([dn, "eth module {} like FEX ID".format(first)]) + # CHECK eth is non-zero + if second: + if int(second) == 0: + data.append([dn, "eth port cannot be 0"]) + # CHECK eth is non-0 or not greater than 16 for breakout + if third: + if int(third) == 0: + data.append([dn, "eth port cannot be 0 for breakout ports"]) + elif int(third) > 16: + data.append([dn, "eth port {} is invalid (1-16 expected) for breakout ports".format(third)]) + else: + data.append([dn, "PathEp 'eth' syntax is invalid"]) + else: + data.append([dn, "target is not a valid fabricPathEp DN"]) + + if data: + result = FAIL_UF + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='LLDP Custom Interface Description Defect') +def lldp_custom_int_description_defect_check(tversion, **kwargs): + result = PASS + headers = ["Potential Defect"] + data = [] + recommended_action = 'Target version is not recommended; Custom interface descriptions and lazy VMM domain attachments found.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#lldp-custom-interface-description' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.major1 == '6' and tversion.older_than('6.0(3a)'): + custom_int_count = icurl('class', 'infraPortBlk.json?query-target-filter=ne(infraPortBlk.descr,"")&rsp-subtree-include=count')[0]['moCount']['attributes']['count'] + lazy_vmm_count = icurl('class', 'fvRsDomAtt.json?query-target-filter=and(eq(fvRsDomAtt.tCl,"vmmDomP"),eq(fvRsDomAtt.resImedcy,"lazy"))&rsp-subtree-include=count')[0]['moCount']['attributes']['count'] + + if int(custom_int_count) > 0 and int(lazy_vmm_count) > 0: + result = FAIL_O + data.append(['CSCwf00416']) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Unsupported FEC Configuration For N9K-C93180YC-EX') +def unsupported_fec_configuration_ex_check(sw_cversion, tversion, **kwargs): + result = PASS + headers = ["Pod ID", "Node ID", "Switch Model", "Interface", "FEC Mode"] + data = [] + recommended_action = 'Nexus C93180YC-EX switches do not support IEEE-RS-FEC or CONS16-RS-FEC mode. Misconfigured ports will be hardware disabled upon upgrade. Remove unsupported FEC configuration prior to upgrade.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#unsupported-fec-configuration-for-n9k-c93180yc-ex' + + if not sw_cversion: + return Result(result=MANUAL, msg="Current switch version not found. Check switch health.") + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if sw_cversion.older_than('5.0(1a)') and tversion.newer_than("5.0(1a)"): + api = 'topSystem.json' + api += '?rsp-subtree=children&rsp-subtree-class=l1PhysIf,eqptCh' + api += '&rsp-subtree-filter=or(eq(l1PhysIf.fecMode,"ieee-rs-fec"),eq(l1PhysIf.fecMode,"cons16-rs-fec"),eq(eqptCh.model,"N9K-C93180YC-EX"))' + api += '&rsp-subtree-include=required' + topSystems = icurl('class', api) + for topSystem in topSystems: + model = None + l1PhysIfs = [] + for child in topSystem['topSystem']['children']: + if child.get("eqptCh"): + model = child['eqptCh']['attributes']['model'] + elif child.get("l1PhysIf"): + interface = child['l1PhysIf']['attributes']['id'] + fecMode = child['l1PhysIf']['attributes']['fecMode'] + l1PhysIfs.append({"interface": interface, "fecMode": fecMode}) + if model and l1PhysIfs: + pod_id = topSystem['topSystem']['attributes']['podId'] + node_id = topSystem['topSystem']['attributes']['id'] + for l1PhysIf in l1PhysIfs: + data.append([pod_id, node_id, model, l1PhysIf['interface'], l1PhysIf['fecMode']]) + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='L3out /32 Static Route and BD Subnet Overlap') +def static_route_overlap_check(cversion, tversion, **kwargs): + result = PASS + headers = ['L3out', '/32 Static Route', 'BD', 'BD Subnet'] + data = [] + recommended_action = 'Change /32 static route design or target a fixed version' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#l3out-32-overlap-with-bd-subnet' + iproute_regex = r'uni/tn-(?P<tenant>[^/]+)/out-(?P<l3out>[^/]+)/lnodep-(?P<nodeprofile>[^/]+)/rsnodeL3OutAtt-\[topology/pod-(?P<pod>[^/]+)/node-(?P<node>\d{3,4})\]/rt-\[(?P<addr>[^/]+)/(?P<netmask>\d{1,2})\]' + bd_subnet_regex = r'uni/tn-(?P<tenant>[^/]+)/BD-(?P<bd>[^/]+)/subnet-\[(?P<subnet>[^/]+/\d{2})\]' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if (cversion.older_than("5.2(6e)") and tversion.newer_than("5.0(1a)") and tversion.older_than("5.2(6e)")): + slash32filter = 'ipRouteP.json?query-target-filter=and(wcard(ipRouteP.dn,"/32"))' + staticRoutes = icurl('class', slash32filter) + if staticRoutes: + staticroute_vrf = icurl('class', 'l3extRsEctx.json') + staticR_to_vrf = {} + for staticRoute in staticRoutes: + staticroute_array = re.search(iproute_regex, staticRoute['ipRouteP']['attributes']['dn']) + l3out_dn = 'uni/tn-' + staticroute_array.group("tenant") + '/out-' + staticroute_array.group("l3out") + '/rsectx' + + for l3outCtx in staticroute_vrf: + l3outCtx_Vrf = {} + if l3outCtx['l3extRsEctx']['attributes']['dn'] == l3out_dn: + l3outCtx_Vrf['vrf'] = l3outCtx['l3extRsEctx']['attributes']['tDn'] + l3outCtx_Vrf['l3out'] = l3outCtx['l3extRsEctx']['attributes']['dn'].replace('/rsectx', '') + staticR_to_vrf[staticroute_array.group("addr")] = l3outCtx_Vrf + + bds_in_vrf = icurl('class', 'fvRsCtx.json') + vrf_to_bd = {} + for bd_ref in bds_in_vrf: + vrf_name = bd_ref['fvRsCtx']['attributes']['tDn'] + bd_list = vrf_to_bd.get(vrf_name, []) + bd_name = bd_ref['fvRsCtx']['attributes']['dn'].replace('/rsctx', '') + bd_list.append(bd_name) + vrf_to_bd[vrf_name] = bd_list + + subnets_in_bd = icurl('class', 'fvSubnet.json') + bd_to_subnet = {} + for subnet in subnets_in_bd: + bd_subnet_re = re.search(bd_subnet_regex, subnet['fvSubnet']['attributes']['dn']) + if bd_subnet_re: + bd_dn = 'uni/tn-' + bd_subnet_re.group("tenant") + '/BD-' + bd_subnet_re.group("bd") + subnet_list = bd_to_subnet.get(bd_dn, []) + subnet_list.append(bd_subnet_re.group("subnet")) + bd_to_subnet[bd_dn] = subnet_list + + for static_route, info in staticR_to_vrf.items(): + for bd in vrf_to_bd[info['vrf']]: + for subnet in bd_to_subnet[bd]: + if IPAddress.ip_in_subnet(static_route, subnet): + data.append([info['l3out'], static_route, bd, subnet]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='vzAny-to-vzAny Service Graph when crossing 5.0 release') +def vzany_vzany_service_epg_check(cversion, tversion, **kwargs): + result = PASS + headers = ["VRF (Tn:VRF)", "Contract (Tn:Contract)", "Service Graph (Tn:SG)"] + data = [] + recommended_action = "Be aware of transient traffic disruption for vzAny-to-vzAny Service Graph during APIC upgrade." + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vzany-to-vzany-service-graph-when-crossing-50-release" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if not (cversion.older_than("5.0(1a)") and tversion.newer_than("5.0(1a)")): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + tn_regex = r"uni/tn-(?P<tn>[^/]+)" + vrf_regex = tn_regex + r"/ctx-(?P<vrf>[^/]+)" + brc_regex = tn_regex + r"/brc-(?P<brc>[^/]+)" + sg_regex = tn_regex + r"/AbsGraph-(?P<sg>[^/]+)" + + # check if a SG is attached to a contract + vzRsSubjGraphAtts = icurl("class", "vzRsSubjGraphAtt.json") + for vzRsSubjGraphAtt in vzRsSubjGraphAtts: + graphAtt_rns = vzRsSubjGraphAtt["vzRsSubjGraphAtt"]["attributes"]["dn"].split("/") + if len(graphAtt_rns) < 3: + return Result(result=ERROR, msg="Failed to get contract DN from vzRsSubjGraphAtt DN") + + # Get vzAny(VRF) relations of the contract. There can be multiple VRFs per contract. + vrfs = defaultdict(set) # key: VRF, value: vzRtAnyToCons, vzRtAnyToProv + vzBrCP_dn = "/".join(graphAtt_rns[:3]) # Contract DN (uni/tn-xx/brc.xxx) + vzBrCP_api = vzBrCP_dn + ".json" + vzBrCP_api += "?query-target=children&target-subtree-class=vzRtAnyToCons,vzRtAnyToProv" + vzRtAnys = icurl("mo", vzBrCP_api) + for vzRtAny in vzRtAnys: + if "vzRtAnyToCons" in vzRtAny: + rel_class = "vzRtAnyToCons" + elif "vzRtAnyToProv" in vzRtAny: + rel_class = "vzRtAnyToProv" + else: + log.warning("Unexpected class - %s", vzRtAny.keys()) + continue + vrf_tdn = vzRtAny[rel_class]["attributes"]["tDn"] + vrf_match = re.search(vrf_regex, vrf_tdn) + if vrf_match: + vrf = vrf_match.group("tn") + ":" + vrf_match.group("vrf") + else: + vrf = vrf_tdn + vrfs[vrf].add(rel_class) + for vrf, relations in vrfs.items(): + if len(relations) == 2: # both cons and prov mean vzAny-to-vzAny + brc_match = re.search(brc_regex, vzBrCP_dn) + if brc_match: + contract = brc_match.group("tn") + ":" + brc_match.group("brc") + else: + contract = vzBrCP_dn + sg_dn = vzRsSubjGraphAtt["vzRsSubjGraphAtt"]["attributes"]["tDn"] + sg_match = re.search(sg_regex, sg_dn) + if sg_match: + sg = sg_match.group("tn") + ":" + sg_match.group("sg") + else: + sg = sg_dn + data.append([vrf, contract, sg]) + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title="Shared Services with vzAny Consumers") +def consumer_vzany_shared_services_check(cversion, tversion, **kwargs): + headers = ["Contract(Tn:Contract)", "Consumer VRF(Tn:VRF)", "Provider VRF(Tn:VRF)", "Provider DN", "Provider Type"] + data = [] + recommended_action = ( + "Policy TCAM entries used by these contracts may increase after the upgrade.\n" + "\tThis may cause overflow of the TCAM space and some contracts may stop working after the upgrade.\n" + "\tTo avoid such a risk, refer to the provided document and consider enabling Policy Compression as needed." + ) + recommended_action_for_pbr = ( # added only when it matters (PBR present and tver is pre-6.1.4) + "\n\tNote that Policy Compression for contracts with PBR (Policy Based Redirection) is not supported prior to 6.1(4). " + "Change the target version to a newer one if it is required." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#shared-service-with-vzany-consumer" + + # Ignore if target version is missing or older than 5.3(2d) + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if tversion.older_than("5.3(2d)"): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + # Check if we cross any version lines where additional rule expansion may happen + should_check_epg_expansion = False + should_check_esg_expansion = False + should_check_pbr = False + + # Rule expansion for EPG/External EPG providers with vzAny consumers + # For upgrades from pre-5.3(2d) to 5.3(2d)+ except for 6.0(1) and 6.0(2) + if cversion.older_than("5.3(2d)") and ( + (tversion.major1 == "5" and not tversion.older_than("5.3(2d)")) + or tversion.newer_than("6.0(3a)") + ): + should_check_epg_expansion = True + # For upgrades from 6.0(1)/6.0(2) to 6.0(3) or newer release + if cversion.newer_than("6.0(1a)") and cversion.older_than("6.0(3a)") and tversion.newer_than("6.0(3a)"): + should_check_epg_expansion = True + + # Rule expansion for ESG providers with vzAny consumers + if cversion.older_than("6.1(2a)") and tversion.newer_than("6.1(2a)"): + should_check_esg_expansion = True + + # Look for PBR enabled contracts that undergo rule expansion since enabling + # compression on them will have no effect in pre-6.1(4) releases + if tversion.older_than("6.1(4a)"): + should_check_pbr = True + + # If no expansion, upgrade path is unaffected + if not (should_check_epg_expansion or should_check_esg_expansion): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + # Helper functions + def vrf_tn_name_from_dn(vrf_dn): + m = re.search(r"uni/tn-([^/]+)/ctx-([^/]+)", vrf_dn or "") + return "{}:{}".format(m.group(1), m.group(2)) if m else vrf_dn or "?" + + def contract_tn_name_from_dn(c_dn): + m = re.search(r"uni/tn-([^/]+)/brc-([^/]+)", c_dn or "") + return "{}:{}".format(m.group(1), m.group(2)) if m else c_dn or "?" + + def provider_class_from_parent(p_dn, pretty=False): + if "/ap-" in p_dn and "/epg-" in p_dn: + return "EPG" if pretty else "fvAEPg" + if "/ap-" in p_dn and "/esg-" in p_dn: + return "ESG" if pretty else "fvESg" + if "/out-" in p_dn and "/instP-" in p_dn: + return "External EPG" if pretty else "l3extInstP" + return "Unknown" + + # Resolve provider VRF VNID. + # Performs at most one query per provider class (EPG, External EPG, ESG) + # and caches all results. Subsequent lookups are O(1). + _provider_dn_to_vrf_vnid = {} + _queried = {"fvAEPg": False, "l3extInstP": False, "fvESg": False} + + def get_provider_vrf_vnid(p_dn): + if p_dn in _provider_dn_to_vrf_vnid: + return _provider_dn_to_vrf_vnid[p_dn] + + p_classname = provider_class_from_parent(p_dn) + + if p_classname == "Unknown": + _provider_dn_to_vrf_vnid[p_dn] = None + elif not _queried.get(p_classname): + for mo in icurl("class", p_classname + ".json") or []: + attr = mo.get(p_classname, {}).get("attributes", {}) + dn = attr.get("dn") + vnid = attr.get("scope") + _provider_dn_to_vrf_vnid[dn] = vnid + _queried[p_classname] = True + + return _provider_dn_to_vrf_vnid[p_dn] + + _pbr_enabled_contracts = set() + + def populate_pbr_enabled_contracts(): + # Query all applied service graph instances, inspect node instances + # for routingMode=Redirect. Any contract DN (ctrctDn) with a redirect + # node is considered PBR-enabled. + # Relevant only if we are crossing 6.1(4) version in upgrade path. + graph_api = ( + "vnsGraphInst.json?" + "query-target-filter=eq(vnsGraphInst.configSt,\"applied\")" + "&rsp-subtree=children&rsp-subtree-class=vnsNodeInst&rsp-subtree-include=required" + ) + graph_insts = icurl("class", graph_api) or [] + if not graph_insts: + return + for gi in graph_insts: + gi_mo = gi.get("vnsGraphInst") + if not gi_mo: + continue + ctrct_dn = gi_mo["attributes"].get("ctrctDn") + if not ctrct_dn: + continue + redirect = False + for child in gi_mo.get("children", []) or []: + node_inst = child.get("vnsNodeInst") + if not node_inst: + continue + if node_inst["attributes"].get("routingMode") == "Redirect": + redirect = True + break + if redirect: + _pbr_enabled_contracts.add(ctrct_dn) + + def is_contract_pbr_enabled(contract_dn): + return contract_dn in _pbr_enabled_contracts + + # Gather all VRF VNIDs and look for vzAny consumers + all_vrfs = icurl("class", "fvCtx.json?rsp-subtree=full&rsp-subtree-class=vzRsAnyToCons") or [] + vnid_to_vrf_dn = {} + contract_to_vzany_cons_vnids = defaultdict(list) + for vrf_entry in all_vrfs: + fvctx = vrf_entry.get("fvCtx", {}) + attr = fvctx.get("attributes", {}) + vrf_dn = attr.get("dn") + vrf_vnid = attr.get("scope") + if vrf_dn and vrf_vnid: + vnid_to_vrf_dn[vrf_vnid] = vrf_dn + for child in fvctx.get("children", []) or []: + vzany = child.get("vzAny") + if not vzany: + continue + for vzany_child in vzany.get("children", []) or []: + if vzany_child.get("vzRsAnyToCons"): + contract_dn = vzany_child["vzRsAnyToCons"]["attributes"]["tDn"] + contract_to_vzany_cons_vnids[contract_dn].append(vrf_vnid) + + # Return if there are no vzAny consumers + if not contract_to_vzany_cons_vnids: + return Result(result=PASS, msg="No vzAny consumers") + + # Look for contracts with global scope + global_contract_api = ( + 'vzBrCP.json?query-target-filter=eq(vzBrCP.scope,"global")' + '&rsp-subtree=children' + '&rsp-subtree-class=vzRtProv' + '&rsp-subtree-include=required' + ) + global_contracts = icurl("class", global_contract_api) or [] + + if not global_contracts: + return Result(result=PASS, msg="No contracts with global scope") + + if should_check_pbr: + populate_pbr_enabled_contracts() + + # Go through contract relations + found_pbr = False + for entry in global_contracts: + brc = entry.get("vzBrCP") + if not brc: + continue + contract_dn = brc["attributes"]["dn"] + # Check consumers (vzAny) + c_vrf_vnids = contract_to_vzany_cons_vnids.get(contract_dn) + if not c_vrf_vnids: + continue # No vzAny consumers for this contract. Skip. + # Check providers ("fvAEPg", "l3extInstP", "fvESg") + providers = set() + for ch in (brc.get("children") or []): + if ch.get("vzRtProv"): + p_dn = ch["vzRtProv"]["attributes"].get("tDn") + p_cl = ch["vzRtProv"]["attributes"].get("tCl") + if p_dn: + if should_check_epg_expansion and p_cl in ("fvAEPg", "l3extInstP"): + providers.add(p_dn) + elif should_check_esg_expansion and p_cl == "fvESg": + providers.add(p_dn) + # Populate data with cons/prov of contract affected by rule expansion + for c_vrf_vnid in c_vrf_vnids: + for p_dn in providers: + p_vrf_vnid = get_provider_vrf_vnid(p_dn) + if not p_vrf_vnid or p_vrf_vnid == c_vrf_vnid: + continue # global contract but used within the same VRF. Skip. + contract_name = contract_tn_name_from_dn(contract_dn) + if should_check_pbr and is_contract_pbr_enabled(contract_dn): + contract_name += " [PBR]" + found_pbr = True + data.append([ + contract_name, + vrf_tn_name_from_dn(vnid_to_vrf_dn[c_vrf_vnid]), + vrf_tn_name_from_dn(vnid_to_vrf_dn[p_vrf_vnid]), + p_dn, + provider_class_from_parent(p_dn, pretty=True), + ]) + + if found_pbr: + recommended_action += recommended_action_for_pbr + + if data: + return Result(result=MANUAL, + headers=headers, + data=data, + recommended_action=recommended_action, + doc_url=doc_url) + else: + return Result(result=PASS, + msg="No shared-service vzAny consumers affected by rule expansion", + headers=headers, + data=data, + doc_url=doc_url) + + +@check_wrapper(check_title='32 and 64-Bit Firmware Image for Switches') +def validate_32_64_bit_image_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Target Switch Version", "32-Bit Image Result", "64-Bit Image Result"] + data = [] + recommended_action = 'Upload the missing 32 or 64 bit Switch Image to the Firmware repository' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#602-requires-32-and-64-bit-switch-images' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if cversion.older_than("6.0(2a)") and tversion.newer_than("6.0(2a)"): + return Result(result=POST, msg="Re-run after APICs are upgraded to 6.0(2) or later") + + if cversion.newer_than("6.0(2a)") and tversion.newer_than("6.0(2a)"): + result_32 = result_64 = "Not Found" + target_sw_ver = 'n9000-1' + tversion.version + firmware_api = 'firmwareFirmware.json' + firmware_api += '?query-target-filter=eq(firmwareFirmware.fullVersion,"%s")' % (target_sw_ver) + firmwares = icurl('class', firmware_api) + + for firmware in firmwares: + name = firmware['firmwareFirmware']['attributes']['name'] + if firmware['firmwareFirmware']['attributes']['bitInfo'] == '32': + result_32 = "Found" + elif firmware['firmwareFirmware']['attributes']['bitInfo'] == '64': + result_64 = "Found" + elif firmware['firmwareFirmware']['attributes']['bitInfo'] == 'NA': + if "cs_64" in name: + result_64 = "INVALID" + recommended_action += '\n\t\tInvalid 64-bit switch image found, remove and reupload to APIC fwrepo' + else: + result_32 = "INVALID" + recommended_action += '\n\t\tInvalid 32-bit switch image found, remove and reupload to APIC fwrepo' + + if result_32 in ["Not Found", "INVALID"] or result_64 in ["Not Found", "INVALID"]: + result = FAIL_UF + data.append([target_sw_ver, result_32, result_64]) + else: + return Result(result=NA, msg="Target version below 6.0(2)") + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Fabric Link Redundancy') +def fabric_link_redundancy_check(**kwargs): + result = PASS + headers = ["Leaf Name", "Fabric Link Adjacencies", "Problem"] + data = [] + recommended_action = "" + sp_recommended_action = "Connect the leaf switch(es) to multiple spine switches for redundancy" + t1_recommended_action = "Connect the tier 2 leaf switch(es) to multiple tier1 leaf switches for redundancy" + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#fabric-link-redundancy" + + fabric_nodes_api = 'fabricNode.json' + fabric_nodes_api += '?query-target-filter=and(or(eq(fabricNode.role,"leaf"),eq(fabricNode.role,"spine")),eq(fabricNode.fabricSt,"active"))' + + lldp_adj_api = 'lldpAdjEp.json' + lldp_adj_api += '?query-target-filter=wcard(lldpAdjEp.sysDesc,"topology/pod")' + + fabricNodes = icurl("class", fabric_nodes_api) + spines = {} + leafs = {} + t2leafs = {} + for node in fabricNodes: + if node["fabricNode"]["attributes"]["nodeType"] == "remote-leaf-wan": + # Not applicable to remote leafs, skip + continue + dn = node["fabricNode"]["attributes"]["dn"] + name = node["fabricNode"]["attributes"]["name"] + if node["fabricNode"]["attributes"]["role"] == "spine": + spines[dn] = name + elif node["fabricNode"]["attributes"]["role"] == "leaf": + leafs[dn] = name + if node["fabricNode"]["attributes"]["nodeType"] == "tier-2-leaf": + t2leafs[dn] = name + + t1_missing = sp_missing = False + lldp_adjs = icurl("class", lldp_adj_api) + for leaf_dn, leaf_name in iteritems(leafs): + is_tier2 = True if leaf_dn in t2leafs else False + neighbors = set() + for lldp_adj in lldp_adjs: + lldp_dn = lldp_adj["lldpAdjEp"]["attributes"]["dn"] + if not lldp_dn.startswith(leaf_dn + "/"): + continue + adj_name = lldp_adj["lldpAdjEp"]["attributes"]["sysName"] + adj_dn = lldp_adj["lldpAdjEp"]["attributes"]["sysDesc"].replace("\\", "") + # t1leaf look for spines + if not is_tier2 and adj_dn in spines: + neighbors.add(adj_name) + # t2leaf look for t1leafs + elif is_tier2 and adj_dn in leafs and adj_dn not in t2leafs: + neighbors.add(adj_name) + if len(neighbors) > 1: + break + + if len(neighbors) > 1: + continue + + if is_tier2: + adj_type = "tier 1 leaf" + t1_missing = True + else: + adj_type = "spine" + sp_missing = True + if len(neighbors) == 1: + data.append([leaf_name, "".join(neighbors), "Only one {} adjacency".format(adj_type)]) + elif not neighbors: + data.append([leaf_name, "", "No {} adjacency".format(adj_type)]) + + if data: + result = FAIL_O + if sp_missing and t1_missing: + recommended_action = "\n\t" + sp_recommended_action + "\n\t" + t1_recommended_action + elif sp_missing and not t1_missing: + recommended_action = sp_recommended_action + elif not sp_missing and t1_missing: + recommended_action = t1_recommended_action + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='CloudSec Encryption Deprecated') +def cloudsec_encryption_depr_check(tversion, **kwargs): + result = NA + headers = ["Findings"] + data = [] + recommended_action = 'Validate if CloudSec Encryption is enabled within Nexus Dashboard Orchestrator' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#cloudsec-encryption-deprecated' + + cloudsec_api = 'cloudsecPreSharedKey.json' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + try: + cloudsecPreSharedKey = icurl('class', cloudsec_api) + except OldVerClassNotFound: + return Result(result=NA, msg="cversion does not have class cloudsecPreSharedKey") + + if tversion.newer_than("6.0(6a)"): + if len(cloudsecPreSharedKey) > 1: + data.append(['Multiple CloudSec Encryption Keys found']) + result = MANUAL + elif len(cloudsecPreSharedKey) == 1: + data.append(['Single CloudSec Encryption Key found']) + result = MANUAL + else: + result = PASS + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Out-of-Service Ports') +def out_of_service_ports_check(**kwargs): + result = PASS + headers = ["Pod ID", "Node ID", "Port ID", "Operational State", "Usage"] + data = [] + recommended_action = 'Remove Out-of-service Policy on identified "up" ports or they will remain "down" after switch Upgrade' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#out-of-service-ports' + + ethpmPhysIf_api = 'ethpmPhysIf.json' + ethpmPhysIf_api += '?query-target-filter=and(eq(ethpmPhysIf.operSt,"2"),bw(ethpmPhysIf.usage,"32","34"))' + + ethpmPhysIf = icurl('class', ethpmPhysIf_api) + + if ethpmPhysIf: + for port in ethpmPhysIf: + port_dn = port['ethpmPhysIf']['attributes']['dn'] + oper_st = port['ethpmPhysIf']['attributes']['operSt'] + usage = port['ethpmPhysIf']['attributes']['usage'] + node_data = re.search(port_regex, port_dn) + data.append([node_data.group("pod"), node_data.group("node"), node_data.group("port"), oper_st, usage]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='FC/FCOE support removed for -EX platforms') +def fc_ex_model_check(tversion, **kwargs): + result = PASS + headers = ["FC/FCOE Node ID", "Model"] + data = [] + recommended_action = 'Select a different target version. Refer to the doc for additional details.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#fcfcoe-support-for-ex-switches' + + fcEntity_api = "fcEntity.json" + fabricNode_api = 'fabricNode.json' + fabricNode_api += '?query-target-filter=wcard(fabricNode.model,".*EX")' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if (tversion.newer_than("6.0(7a)") and tversion.older_than("6.0(9c)")) or tversion.same_as("6.1(1f)"): + fcEntitys = icurl('class', fcEntity_api) + fc_nodes = [] + if fcEntitys: + for fcEntity in fcEntitys: + fc_nodes.append(fcEntity['fcEntity']['attributes']['dn'].split('/sys')[0]) + + if fc_nodes: + fabricNodes = icurl('class', fabricNode_api) + for node in fabricNodes: + node_dn = node['fabricNode']['attributes']['dn'] + if node_dn in fc_nodes: + model = node['fabricNode']['attributes']['model'] + if model in ["N9K-C93180YC-EX", "N9K-C93108TC-EX", "N9K-C93108LC-EX"]: + data.append([node_dn, model]) + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='TEP-to-TEP Atomic Counter scalability') +def tep_to_tep_ac_counter_check(**kwargs): + result = NA + headers = ["dbgAcPath Count", "Supported Maximum"] + data = [] + recommended_action = 'Assess and cleanup dbgAcPath policies to drop below the supported maximum' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#tep-to-tep-atomic-counters-scalability' + + ac_limit = 1600 + atomic_counter_api = 'dbgAcPath.json' + atomic_counter_api += '?rsp-subtree-include=count' + + atomic_counter_number = icurl('class', atomic_counter_api) + atomic_counter_number = int(atomic_counter_number[0]['moCount']['attributes']['count']) + + if atomic_counter_number >= ac_limit: + data.append([atomic_counter_number, str(ac_limit)]) + elif atomic_counter_number > 0 and atomic_counter_number < ac_limit: + result = PASS + + if data: + result = FAIL_UF + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Nexus 950X FM or LC Might Fail to boot after reload') +def clock_signal_component_failure_check(**kwargs): + result = PASS + headers = ['Pod', "Node", "Slot", "Model", "Serial Number"] + data = [] + recommended_action = 'Run the SN string through the Serial Number Validation tool (linked within doc url) to check for FN64251.\n\tSN String:\n\t' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#nexus-950x-fm-or-lc-might-fail-to-boot-after-reload' + + eqptFC_api = 'eqptFC.json' + eqptFC_api += '?query-target-filter=or(eq(eqptFC.model,"N9K-C9504-FM-E"),eq(eqptFC.model,"N9K-C9508-FM-E"))' + + eqptLC_api = 'eqptLC.json' + eqptLC_api += '?query-target-filter=eq(eqptLC.model,"N9K-X9732C-EX")' + + eqptFC = icurl('class', eqptFC_api) + eqptLC = icurl('class', eqptLC_api) + + sn_string = "" + if eqptFC or eqptLC: + full = eqptFC + eqptLC + for card in full: + dn = card.get('eqptLC', {}).get('attributes', {}).get('dn', '') or card.get('eqptFC', {}).get('attributes', {}).get('dn', '') + slot_regex = node_regex + r"/sys/ch/(?P<slot>.+)/" + match = re.search(slot_regex, dn) + if match: + pod = match.group("pod") + node = match.group("node") + slot = match.group("slot") + + model = card.get('eqptLC', {}).get('attributes', {}).get('model', '') or card.get('eqptFC', {}).get('attributes', {}).get('model', '') + sn = card.get('eqptLC', {}).get('attributes', {}).get('ser', '') or card.get('eqptFC', {}).get('attributes', {}).get('ser', '') + data.append([pod, node, slot, model, sn]) + sn_string += "{},".format(sn) + + if data: + result = MANUAL + recommended_action += sn_string[:-1] + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Stale Decomissioned Spine') +def stale_decomissioned_spine_check(tversion, **kwargs): + result = PASS + headers = ["Susceptible Spine Node Id", "Spine Name", "Current Node State"] + data = [] + recommended_action = 'Remove fabricRsDecommissionNode objects pointing to above Spine Nodes before APIC upgrade' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#stale-decommissioned-spine' + + decomissioned_api = 'fabricRsDecommissionNode.json' + active_spine_api = 'topSystem.json' + active_spine_api += '?query-target-filter=eq(topSystem.role,"spine")' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("5.2(3d)") and tversion.older_than("6.0(3d)"): + decomissioned_switches = icurl('class', decomissioned_api) + if decomissioned_switches: + decommissioned_node_ids = [node['fabricRsDecommissionNode']['attributes']['targetId'] for node in decomissioned_switches] + + active_spine_mo = icurl('class', active_spine_api) + for spine in active_spine_mo: + node_id = spine['topSystem']['attributes']['id'] + name = spine['topSystem']['attributes']['name'] + state = spine['topSystem']['attributes']['state'] + if node_id in decommissioned_node_ids: + data.append([node_id, name, state]) + if data: + result = FAIL_O + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='N9K-C9408 Platform Model') +def n9408_model_check(tversion, **kwargs): + result = PASS + headers = ["Node ID", "Model"] + data = [] + recommended_action = 'Identified N9K-C9408 must be decommissioned then recomissioned after upgrade to 6.1(3)' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#n9k-c9408-platform-model' + + eqptCh_api = 'eqptCh.json' + eqptCh_api += '?query-target-filter=eq(eqptCh.model,"N9K-C9400-SW-GX2A")' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("6.1(3a)"): + eqptCh = icurl('class', eqptCh_api) + for node in eqptCh: + node_dn = node['eqptCh']['attributes']['dn'] + model = node['eqptCh']['attributes']['model'] + data.append([node_dn, model]) + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='PBR High Scale') +def pbr_high_scale_check(tversion, **kwargs): + result = PASS + headers = ["Fabric-Wide PBR Object Count"] + data = [] + recommended_action = 'High PBR scale detected, target a fixed version for CSCwi66348' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#pbr-high-scale' + + # Not querying fvAdjDefCons as it fails from APIC + vnsAdjacencyDefCont_api = 'vnsAdjacencyDefCont.json' + vnsSvcRedirEcmpBucketCons_api = 'vnsSvcRedirEcmpBucketCons.json' + count_filter = '?rsp-subtree-include=count' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.older_than("5.3(2c)"): + vnsAdj = icurl('class', vnsAdjacencyDefCont_api+count_filter) + vnsSvc = icurl('class', vnsSvcRedirEcmpBucketCons_api+count_filter) + + vnsAdj_count = int(vnsAdj[0]['moCount']['attributes']['count']) + vnsSvc_count = int(vnsSvc[0]['moCount']['attributes']['count']) + total = vnsAdj_count + vnsSvc_count + if total > 100000: + data.append([total]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='HTTPS Request Throttle Rate') +def https_throttle_rate_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Mgmt Access Policy", "HTTPS Throttle Rate"] + data = [] + recommended_action = "Reduce the throttle rate to 40 (req/sec), 2400 (req/min) or lower." + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#https-request-throttle-rate" + + # Applicable only when crossing 6.1(2) as upgrade instead of downgrade. + if cversion.newer_than("6.1(2a)"): + return Result(result=NA, msg=VER_NOT_AFFECTED) + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + commHttpses = icurl("class", "commHttps.json") + for commHttps in commHttpses: + if commHttps["commHttps"]["attributes"].get("globalThrottleSt", "disabled") == "disabled": + continue + if (( + commHttps["commHttps"]["attributes"]["globalThrottleUnit"] == "r/s" and + int(commHttps["commHttps"]["attributes"]["globalThrottleRate"]) > 40 + ) or ( + commHttps["commHttps"]["attributes"]["globalThrottleUnit"] == "r/m" and + int(commHttps["commHttps"]["attributes"]["globalThrottleRate"]) > 2400 + )): + # Get `default` of `uni/fabric/comm-default/https` + commPol_rn = commHttps["commHttps"]["attributes"]["dn"].split("/")[2] + commPol_name = commPol_rn.split("-")[1] + rate = "{} ({})".format( + commHttps["commHttps"]["attributes"]["globalThrottleRate"], + commHttps["commHttps"]["attributes"]["globalThrottleUnit"], + ) + data.append([commPol_name, rate]) + + if data: + if tversion.older_than("6.1(2a)"): + result = MANUAL + recommended_action = "6.1(2)+ will reject this config. " + recommended_action + else: + result = FAIL_UF + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Standby SUP Image Sync') +def standby_sup_sync_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Pod ID", "Node ID", "Standby SUP Slot"] + data = [] + recommended_action = 'Target an interim image with fix for CSCwa44220 that is smaller than 2Gigs, such as 5.2(8i)' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#standby-sup-image-sync' + + sup_regex = node_regex + r'/sys/ch/supslot-(?P<slot>\d)' + eqptSupC_api = 'eqptSupC.json' + eqptSupC_api += '?query-target-filter=eq(eqptSupC.rdSt,"standby")' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if ( + (cversion.older_than("4.2(7t)") or (cversion.major_version == "5.2" and cversion.older_than("5.2(5d)"))) + and ((tversion.major_version == "5.2" and tversion.older_than("5.2(7f)")) or tversion.newer_than("6.0(2h)")) + ): + eqptSupC = icurl('class', eqptSupC_api) + for node in eqptSupC: + node_dn = node['eqptSupC']['attributes']['dn'] + match = re.search(sup_regex, node_dn) + if match: + pod = match.group("pod") + node = match.group("node") + slot = match.group("slot") + data.append([pod, node, slot]) + + if data: + result = FAIL_UF + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Equipment Disk Limits') +def equipment_disk_limits_exceeded(**kwargs): + result = PASS + headers = ['Pod', 'Node', 'Code', '%', 'Description'] + data = [] + unformatted_headers = ['Fault DN', '%', 'Recommended Action'] + unformatted_data = [] + recommended_action = 'Review the reference document for commands to validate disk usage' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#equipment-disk-limits' + + usage_regex = r"avail \(New: (?P<avail>\d+)\).+used \(New: (?P<used>\d+)\)" + f182x_api = 'faultInst.json' + f182x_api += '?query-target-filter=or(eq(faultInst.code,"F1820"),eq(faultInst.code,"F1821"),eq(faultInst.code,"F1822"))' + faults = icurl('class', f182x_api) + + for faultInst in faults: + percent = "NA" + attributes = faultInst['faultInst']['attributes'] + + usage_match = re.search(usage_regex, attributes['changeSet']) + if usage_match: + avail = int(usage_match.group('avail')) + used = int(usage_match.group('used')) + percent = round((used / (avail + used)) * 100) + + dn_match = re.search(node_regex, attributes['dn']) + if dn_match: + data.append([dn_match.group('pod'), dn_match.group('node'), attributes['code'], percent, attributes['descr']]) + else: + unformatted_data.append([attributes['dn'], percent, attributes['descr']]) + + if data or unformatted_data: + result = FAIL_UF + + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +@check_wrapper(check_title='Global AES Encryption') +def aes_encryption_check(tversion, **kwargs): + result = FAIL_UF + headers = ["Target Version", "Global AES Encryption", "Impact"] + data = [] + recommended_action = ( + "\n\tEnable Global AES Encryption before upgrading your APIC (and take a configuration backup)." + "\n\tGlobal AES Encryption ensures that all configurations are included in the backup securely." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#global-aes-encryption" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("6.1(2a)"): + impact = "Upgrade Failure" + result = FAIL_UF + recommended_action += "\n\tUpgrade to 6.1(2) or later will fail when it is not enabled." + else: + impact = "Your config backup may not contain all data" + result = MANUAL + + cryptkeys = icurl("mo", "uni/exportcryptkey.json") + if not cryptkeys: + data = [[str(tversion), "Object Not Found", impact]] + elif cryptkeys[0]["pkiExportEncryptionKey"]["attributes"]["strongEncryptionEnabled"] != "yes": + data = [[str(tversion), "Disabled", impact]] + else: + result = PASS + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Service Graph BD Forceful Routing') +def service_bd_forceful_routing_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Bridge Domain (Tenant:BD)", "Service Graph Device (Tenant:Device)"] + data = [] + unformatted_headers = ["DN of fvRtEPpInfoToBD"] + unformatted_data = [] + recommended_action = ( + "\n\tConfirm that within these BDs there is no bridging traffic with the destination IP that doesn't belong to them." + "\n\tPlease check the reference document for details." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#service-graph-bd-forceful-routing" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if not (cversion.older_than("6.0(2a)") and tversion.newer_than("6.0(2a)")): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + dn_regex = r"uni/tn-(?P<bd_tn>[^/]+)/BD-(?P<bd>[^/]+)/" + dn_regex += r"rtvnsEPpInfoToBD-\[uni/tn-(?P<sg_tn>[^/])+/LDevInst-\[uni/tn-(?P<ldev_tn>[^/]+)/lDevVip-(?P<ldev>[^\]]+)\].*\]" + + fvRtEPpInfoToBDs = icurl("class", "fvRtEPpInfoToBD.json") + for fvRtEPpInfoToBD in fvRtEPpInfoToBDs: + m = re.search(dn_regex, fvRtEPpInfoToBD["fvRtEPpInfoToBD"]["attributes"]["dn"]) + if not m: + log.error("Failed to match %s", fvRtEPpInfoToBD["fvRtEPpInfoToBD"]["attributes"]["dn"]) + unformatted_data.append([fvRtEPpInfoToBD["fvRtEPpInfoToBD"]["attributes"]["dn"]]) + continue + data.append([ + "{}:{}".format(m.group("bd_tn"), m.group("bd")), + "{}:{}".format(m.group("ldev_tn"), m.group("ldev")), + ]) + + if data or unformatted_data: + result = MANUAL + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + + +# Connection Base Check +@check_wrapper(check_title='Observer Database Size') +def observer_db_size_check(username, password, **kwargs): + result = PASS + headers = ["Node", "File Location", "Size (GB)"] + data = [] + recommended_action = 'Contact TAC to analyze and truncate large DB files' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#observer-database-size' + + topSystem_api = 'topSystem.json' + topSystem_api += '?query-target-filter=eq(topSystem.role,"controller")' + + controllers = icurl('class', topSystem_api) + if not controllers: + return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') + + has_error = False + prints('') + for apic in controllers: + attr = apic['topSystem']['attributes'] + node_title = 'Checking %s...' % attr['name'] + print_title(node_title) + try: + c = Connection(attr['address']) + c.username = username + c.password = password + c.log = LOG_FILE + c.connect() + except Exception as e: + data.append([attr['id'], attr['name'], str(e)]) + print_result(node_title, ERROR) + has_error = True + continue + try: + cmd = r"ls -lh /data2/dbstats | awk '{print $5, $9}'" + c.cmd(cmd) + if "No such file or directory" in c.output: + data.append([attr['id'], '/data2/dbstats/ not found', "Check user permissions or retry as 'apic#fallback\\\\admin'"]) + print_result(node_title, ERROR) + has_error = True + continue + dbstats = c.output.split("\n") + for line in dbstats: + observer_gig_regex = r"(?P<size>\d{1,3}(?:\.\d)?G)\s(?P<file>observer_\d{1,3}.db)" + size_match = re.match(observer_gig_regex, line) + if size_match: + file_size = size_match.group("size") + file_name = "/data2/dbstats/" + size_match.group("file") + data.append([attr['id'], file_name, file_size]) + print_result(node_title, DONE) + except Exception as e: + data.append([attr['id'], attr['name'], str(e)]) + print_result(node_title, ERROR) + has_error = True + continue + if has_error: + result = ERROR + elif data: + result = FAIL_UF + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url, adjust_title=True) + + +@check_wrapper(check_title='AVE End-of-Life') +def ave_eol_check(tversion, **kwargs): + result = NA + headers = ["AVE Domain Name"] + data = [] + recommended_action = 'AVE domain(s) must be migrated to supported domain types prior to 6.0+ upgrade' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#ave-end-of-life' + + ave_api = 'vmmDomP.json' + ave_api += '?query-target-filter=eq(vmmDomP.enableAVE,"true")' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("6.0(1a)"): + ave = icurl('class', ave_api) + for domain in ave: + name = domain['vmmDomP']['attributes']['name'] + data.append([name]) + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='ISIS DTEPs Byte Size') +def isis_database_byte_check(tversion, **kwargs): + result = PASS + headers = ["ISIS DTEPs Byte Size", "ISIS DTEPs"] + data = [] + recommended_action = 'Upgrade to a version with the fix for CSCwp15375. Current target version is affected.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#isis-dteps-byte-size' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.newer_than("6.1(1a)") and tversion.older_than("6.1(3g)"): + isisDTEp_api = 'isisDTEp.json' + isisDTEp_api += '?query-target-filter=eq(isisDTEp.role,"spine")' + + isisDTEps = icurl('class', isisDTEp_api) + + physical_ids = set() + proxy_acast_ids = set() + + for entry in isisDTEps: + dtep_type = entry['isisDTEp']['attributes']['type'] + dtep_id = entry['isisDTEp']['attributes']['id'] + + if dtep_type == "physical": + physical_ids.add(dtep_id) + elif "physical,proxy-acast" in dtep_type: + proxy_acast_ids.add(dtep_id) + + for physical_id in physical_ids: + combined_dteps = ",".join([physical_id] + list(proxy_acast_ids)) + total_bytes = len(combined_dteps) + + if total_bytes > 57: + result = FAIL_O + data.append([total_bytes, combined_dteps]) + break + else: + return Result(result=NA, msg=VER_NOT_AFFECTED) + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + +@check_wrapper(check_title='Service-EP Flag in BD without PBR') +def service_ep_flag_bd_check(cversion, tversion, **kwargs): + result = PASS + headers = ["Tenant ", "Bridge Domain ", "Service Graph Device", "Device Node Name" ] + data = [] + unformatted_headers = ["DN of vnsLIfCtx"] + unformatted_data = [] + recommended_action = ( + "\n\tConfirm that within these BDs the PBR configuration is complete." + "\n\tPlease check the reference document for details." + ) + doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#service_ep_flag_bd" + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if ( + # Older versions not affected + cversion.older_than("5.2(5c)") and tversion.older_than("5.2(5c)") + ) or ( + # Current version not affected target version fixed + cversion.older_than("5.2(5b)") and tversion.newer_than("6.0(8e)") + ) or ( + # Current version and target version fixed + cversion.newer_than("6.0(8e)") and tversion.newer_than("6.0(8e)") + ): + return Result(result=NA, msg=VER_NOT_AFFECTED) + + bd_dn_regex = r"uni/tn-(?P<bd_tn>[^/]+)/BD-(?P<bd>[^/]+)" + + sg_regex = r"uni/tn-(?P<sg_tn>[^/]+)/" + sg_regex += r"ldevCtx-c-(?P<ldev_ctrc>[^-][^g]+)" + sg_regex += r"-g-(?P<ldev_graph>[^-][^n]+)" + sg_regex += r"-n-(?P<ldev_node>[^/]+)/" + sg_regex += r"lIfCtx-c-(?P<ldev_conn>.+)" + + # pbr_regex = r"uni/tn-(?P<pbr_tn>[^/]+)/" + # pbr_regex += r"svcCont/svcRedirectPol-(?P<pbr_name>.+)" + + vnsLIfCtx_api = "vnsLIfCtx.json" + vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" + vnsLIfCtxs = icurl("class", vnsLIfCtx_api) + + for vnsLIfCtx in vnsLIfCtxs: + if ("vnsRsLIfCtxToSvcRedirectPol" not in vnsLIfCtx["vnsLIfCtx"]["children"][0]): + # vnsRsLIfCtxToSvcRedirectPol missing, + sg_graph_name = re.search(sg_regex, vnsLIfCtx["vnsLIfCtx"]["attributes"]["dn"]) + result = FAIL_O + for child in vnsLIfCtx["vnsLIfCtx"]["children"]: + if "vnsRsLIfCtxToBD" in child: + bd_name = re.search(bd_dn_regex, child["vnsRsLIfCtxToBD"]["attributes"]["tDn"]) + if sg_graph_name and bd_name: + data.append([ + sg_graph_name.group("sg_tn"), + bd_name.group("bd"), + sg_graph_name.group("ldev_graph"), + sg_graph_name.group("ldev_node") + ]) + break + + if unformatted_data: + result = MANUAL + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url, + ) + +# Subprocess check - cat + acidiag +@check_wrapper(check_title='APIC Database Size') +def apic_database_size_check(cversion, **kwargs): + result = PASS + headers = ["APIC ID", "DME", "Class Name", "Object Count"] + data = [] + recommended_action = 'Contact Cisco TAC to investigate all flagged high object counts' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-database-size' + + dme_svc_list = ['vmmmgr', 'policymgr', 'eventmgr', 'policydist'] + unique_list = {} + apic_id_to_name = {} + apic_node_mo = icurl('class', 'infraWiNode.json') + for apic in apic_node_mo: + if apic['infraWiNode']['attributes']['operSt'] == 'available': + apic_id = apic['infraWiNode']['attributes']['id'] + apic_name = apic['infraWiNode']['attributes']['nodeName'] + if apic_id not in apic_id_to_name: + apic_id_to_name[apic_id] = apic_name + + # For 3 APIC cluster, only check APIC Id 2 due to static local shards (R0) + if len(apic_id_to_name) == 3: + apic_id_to_name = {"2": apic_id_to_name["2"]} + + if cversion.older_than("6.1(3a)"): + for dme in dme_svc_list: + for id in apic_id_to_name: + apic_hostname = apic_id_to_name[id] + collect_stats_cmd = 'cat /debug/'+apic_hostname+'/'+dme+'/mitmocounters/mo | grep -v ALL | sort -rn -k3' + top_class_stats = run_cmd(collect_stats_cmd, splitlines=True) + + for svc_stats in top_class_stats[:4]: + if ":" in svc_stats: + class_name = svc_stats.split(":")[0].strip() + mo_count = svc_stats.split(":")[1].strip() + if int(mo_count) > 1000*1000*1.5: + unique_list[class_name] = {"id": id, "dme": dme, "checked_val": mo_count} + else: + headers = ["APIC ID", "DME", "Shard", "Size"] + recommended_action = 'Contact Cisco TAC to investigate all flagged large DB sizes' + for id in apic_id_to_name: + collect_stats_cmd = "acidiag dbsize --topshard --apic " + id + " -f json" + try: + collect_shard_stats_data = run_cmd(collect_stats_cmd, splitlines=False) + except subprocess.CalledProcessError: + return Result(result=MANUAL, msg="acidiag command not available to current user") + top_db_stats = json.loads(collect_shard_stats_data) + + for db_stats in top_db_stats['dbs']: + if int(db_stats['size_b']) >= 1073741824 * 5: + apic_id = db_stats['apic'] + dme = db_stats['dme'] + shard = db_stats['shard_replica'] + size = db_stats['size_h'] + unique_list[shard] = {"id": id, "dme": dme, "checked_val": size} + + # dedup based on unique_key + if unique_list: + for unique_key, details in unique_list.items(): + apic_id = details['id'] + dme = details['dme'] + checked_val = details['checked_val'] + data.append([apic_id, dme, unique_key, checked_val]) + + if data: + result = FAIL_UF + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + +@check_wrapper(check_title='Policydist configpushShardCont crash') +def configpush_shard_check(tversion, **kwargs): + result = NA + headers = ["dn", "headTx", "tailTx"] + data = [] + recommended_action = 'Contact Cisco TAC for Support before upgrade' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#policydist-configpushshardcont-crash' + + if not tversion: + return Result(result=MANUAL, msg=TVER_MISSING) + + if tversion.older_than("6.1(4a)"): + result = PASS + configpushShardCont_api = 'configpushShardCont.json' + configpushShardCont_api += '?query-target-filter=and(eq(configpushShardCont.tailTx,"0"),ne(configpushShardCont.headTx,"0"))' + configpush_sh_cont = icurl('class', configpushShardCont_api) + if configpush_sh_cont: + for sh_cont in configpush_sh_cont: + headtx = sh_cont['configpushShardCont']['attributes']['headTx'] + tailtx = sh_cont['configpushShardCont']['attributes']['tailTx'] + sh_cont_dn = sh_cont['configpushShardCont']['attributes']['dn'] + data.append([sh_cont_dn, headtx, tailtx]) + + if data: + result = FAIL_O + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + +# ---- Script Execution ---- + + +def parse_args(args): + parser = ArgumentParser(description="ACI Pre-Upgrade Validation Script - %s" % SCRIPT_VERSION) + parser.add_argument("-t", "--tversion", action="store", type=str, help="Upgrade Target Version. Ex. 6.2(1a)") + parser.add_argument("-c", "--cversion", action="store", type=str, help="Override Current Version. Ex. 6.1(1a)") + parser.add_argument("-d", "--debug-function", action="store", type=str, help="Name of a single function to debug. Ex. 'apic_version_md5_check'") + parser.add_argument("-a", "--api-only", action="store_true", help="For built-in PUV. API Checks only. Checks using SSH are skipped.") + parser.add_argument("-n", "--no-cleanup", action="store_true", help="Skip all file cleanup after script execution.") + parser.add_argument("-v", "--version", action="store_true", help="Only show the script version, then end.") + parser.add_argument("--total-checks", action="store_true", help="Only show the total number of checks, then end.") + parsed_args = parser.parse_args(args) + return parsed_args + + +def initialize(): + """ + Initialize the script environment, create necessary directories and set up log. + Not required for some options such as `--version` or `--total-checks`. + """ + if os.path.isdir(DIR): + log.info("Cleaning up previous run files in %s", DIR) + shutil.rmtree(DIR) + log.info("Creating directories %s and %s", DIR, JSON_DIR) + os.mkdir(DIR) + os.mkdir(JSON_DIR) + fmt = '[%(asctime)s.%(msecs)03d{} %(levelname)-8s %(funcName)20s:%(lineno)-4d] %(message)s'.format(tz) + logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, format=fmt, datefmt='%Y-%m-%d %H:%M:%S') + + +def prepare(api_only, arg_tversion, arg_cversion, checks): + prints(' ==== %s%s, Script Version %s ====\n' % (ts, tz, SCRIPT_VERSION)) + prints('!!!! Check https://github.com/datacenter/ACI-Pre-Upgrade-Validation-Script for Latest Release !!!!\n') + + # Create empty result files for all checks + for idx, check in enumerate(checks): + check(idx + 1, len(checks), init=True) + + username = password = None + if not api_only: + username, password = get_credentials() + try: + cversion = get_current_version(arg_cversion) + tversion = get_target_version(arg_tversion) + vpc_nodes = get_vpc_nodes() + sw_cversion = get_switch_version() + except Exception as e: + prints('\n\nError: %s' % e) + prints("Initial query failed. Ensure APICs are healthy. Ending script run.") + log.exception(e) + sys.exit() + inputs = {'username': username, 'password': password, + 'cversion': cversion, 'tversion': tversion, + 'vpc_node_ids': vpc_nodes, 'sw_cversion': sw_cversion} + metadata = { + "name": "PreupgradeCheck", + "method": "standalone script", + "datetime": ts + tz, + "script_version": str(SCRIPT_VERSION), + "cversion": str(cversion), + "tversion": str(tversion), + "sw_cversion": str(sw_cversion), + "api_only": api_only, + "total_checks": len(checks), + } + with open(META_FILE, "w") as f: + json.dump(metadata, f, indent=2) + return inputs + + +def get_checks(api_only, debug_function): + api_checks = [ + # General Checks + target_version_compatibility_check, + gen1_switch_compatibility_check, + r_leaf_compatibility_check, + cimc_compatibilty_check, + apic_cluster_health_check, + switch_status_check, + ntp_status_check, + maintp_grp_crossing_4_0_check, + features_to_disable_check, + switch_group_guideline_check, + mini_aci_6_0_2_check, + post_upgrade_cb_check, + validate_32_64_bit_image_check, + fabric_link_redundancy_check, + + # Faults + apic_disk_space_faults_check, + switch_bootflash_usage_check, + switch_ssd_check, + port_configured_for_apic_check, + port_configured_as_l2_check, + port_configured_as_l3_check, + prefix_already_in_use_check, + encap_already_in_use_check, + access_untagged_check, + bd_subnet_overlap_check, + bd_duplicate_subnet_check, + vmm_controller_status_check, + vmm_controller_adj_check, + lldp_with_infra_vlan_mismatch_check, + hw_program_fail_check, + scalability_faults_check, + fabric_port_down_check, + equipment_disk_limits_exceeded, + + # Configurations + vpc_paired_switches_check, + overlapping_vlan_pools_check, + l3out_mtu_check, + bgp_peer_loopback_check, + l3out_route_map_direction_check, + l3out_route_map_missing_target_check, + l3out_overlapping_loopback_check, + intersight_upgrade_status_check, + isis_redis_metric_mpod_msite_check, + bgp_golf_route_target_type_check, + docker0_subnet_overlap_check, + uplink_limit_check, + oob_mgmt_security_check, + eecdh_cipher_check, + subnet_scope_check, + unsupported_fec_configuration_ex_check, + cloudsec_encryption_depr_check, + out_of_service_ports_check, + tep_to_tep_ac_counter_check, + https_throttle_rate_check, + aes_encryption_check, + service_bd_forceful_routing_check, + ave_eol_check, + consumer_vzany_shared_services_check, + + # Bugs + ep_announce_check, + eventmgr_db_defect_check, + contract_22_defect_check, + telemetryStatsServerP_object_check, + llfc_susceptibility_check, + internal_vlanpool_check, + fabricdomain_name_check, + sup_hwrev_check, + sup_a_high_memory_check, + vmm_active_uplinks_check, + fabric_dpp_check, + n9k_c93108tc_fx3p_interface_down_check, + fabricPathEp_target_check, + lldp_custom_int_description_defect_check, + rtmap_comm_match_defect_check, + static_route_overlap_check, + fc_ex_model_check, + vzany_vzany_service_epg_check, + clock_signal_component_failure_check, + stale_decomissioned_spine_check, + n9408_model_check, + pbr_high_scale_check, + standby_sup_sync_check, + isis_database_byte_check, + configpush_shard_check, + + ] + conn_checks = [ + # General + apic_version_md5_check, + apic_database_size_check, + + # Faults + standby_apic_disk_space_check, + apic_ssd_check, + + # Bugs + observer_db_size_check, + apic_ca_cert_validation, + + ] + if debug_function: + return [check for check in api_checks + conn_checks if check.__name__ == debug_function] + if api_only: + return api_checks + return conn_checks + api_checks + + +def run_checks(checks, inputs): + summary_headers = [PASS, FAIL_O, FAIL_UF, MANUAL, POST, NA, ERROR, 'TOTAL'] + summary = {key: 0 if key != 'TOTAL' else len(checks) for key in summary_headers} + for idx, check in enumerate(checks): + try: + r = check(idx + 1, len(checks), **inputs) + summary[r] += 1 + except KeyboardInterrupt: + prints('\n\n!!! KeyboardInterrupt !!!\n') + break + except Exception as e: + prints('') + err = 'Wrapper Error: %s' % e + print_title(err) + print_result(title=err, result=ERROR) + summary[ERROR] += 1 + logging.exception(e) + + prints('\n=== Summary Result ===\n') + res = max(summary_headers, key=len) + max_header_len = len(res) + for key in summary_headers: + prints('{:{}} : {:2}'.format(key, max_header_len, summary[key])) + + with open(SUMMARY_FILE, 'w') as f: + json.dump(summary, f, indent=2) + + +def wrapup(no_cleanup): + subprocess.check_output(['tar', '-czf', BUNDLE_NAME, DIR]) + bundle_loc = '/'.join([os.getcwd(), BUNDLE_NAME]) + prints(""" + Pre-Upgrade Check Complete. + Next Steps: Address all checks flagged as FAIL, ERROR or MANUAL CHECK REQUIRED + + Result output and debug info saved to below bundle for later reference. + Attach this bundle to Cisco TAC SRs opened to address the flagged checks. + + Result Bundle: {bundle} + """.format(bundle=bundle_loc)) + prints('==== Script Version %s FIN ====' % (SCRIPT_VERSION)) + + # puv integration needs to keep reading files from `JSON_DIR` under `DIR`. + if not no_cleanup and os.path.isdir(DIR): + log.info('Cleaning up temporary files and directories...') + shutil.rmtree(DIR) + + +def main(_args=None): + args = parse_args(_args) + if args.version: + print(SCRIPT_VERSION) + return + checks = get_checks(args.api_only, args.debug_function) + if args.total_checks: + print("Total Number of Checks: {}".format(len(checks))) + return + + initialize() + inputs = prepare(args.api_only, args.tversion, args.cversion, checks) + run_checks(checks, inputs) + wrapup(args.no_cleanup) + + +if __name__ == "__main__": + main() diff --git a/docs/docs/validations.md b/docs/docs/validations.md index e395564..8d4134a 100644 --- a/docs/docs/validations.md +++ b/docs/docs/validations.md @@ -191,6 +191,8 @@ Items | Defect | This Script [Stale pconsRA Object][d26] | CSCwp22212 | :warning:{title="Deprecated"} | :no_entry_sign: [ISIS DTEPs Byte Size][d27] | CSCwp15375 | :white_check_mark: | :no_entry_sign: [Policydist configpushShardCont Crash][d28] | CSCwp95515 | :white_check_mark: | +[Service-EP Flag in BD without PBR][d29] | CSCwi17652 | :white_check_mark: | :no_entry_sign: + [d1]: #ep-announce-compatibility [d2]: #eventmgr-db-size-defect-susceptibility @@ -220,6 +222,7 @@ Items | Defect | This Script [d26]: #stale-pconsra-object [d27]: #isis-dteps-byte-size [d28]: #policydist-configpushshardcont-crash +[d29]: #service-ep-flag-in-bd-without-pbr ## General Check Details @@ -2604,6 +2607,19 @@ Due to [CSCwp95515][59], upgrading to an affected version while having any `conf If any instances of `configpushShardCont` are flagged by this script, Cisco TAC must be contacted to identify and resolve the underlying issue before performing the upgrade. +### Service-EP Flag in BD without PBR + +On ACI releases 5.2.5c/6.0.1g and 16.0.8e/6.1.1f, the Service-ep flag is set on the Service epg (vlanCktEp) even when PBR (vnsRsLIfCtxToSvcRedirectPol) is not configured. +The service-ep ctrl setting configures the Don't Learn (DL) Bit to 1 when forwarding the traffic to destination. +The DL bit being set on traffic coming from service device causes more BUM traffic on customer network. + +When customers upgrade to a version >= 16.0.8e/6.1.1f, due to the fix of [CSCwi17652][62] the Service-ep flag gets removed for the specific service EPGs vlanCktEp without PBR + +This may affect working service graphs. If any instances of missing `vnsRsLIfCtxToSvcRedirectPol` are flagged by this script, Cisco TAC must be contacted to identify and resolve any underlying issue before performing the upgrade. + + + + [0]: https://github.com/datacenter/ACI-Pre-Upgrade-Validation-Script [1]: https://www.cisco.com/c/dam/en/us/td/docs/Website/datacenter/apicmatrix/index.html [2]: https://www.cisco.com/c/en/us/support/switches/nexus-9000-series-switches/products-release-notes-list.html @@ -2666,3 +2682,4 @@ If any instances of `configpushShardCont` are flagged by this script, Cisco TAC [59]: https://bst.cloudapps.cisco.com/bugsearch/bug/CSCwp95515 [60]: https://www.cisco.com/c/en/us/solutions/collateral/data-center-virtualization/application-centric-infrastructure/white-paper-c11-743951.html#Inter [61]: https://www.cisco.com/c/en/us/solutions/collateral/data-center-virtualization/application-centric-infrastructure/white-paper-c11-743951.html#EnablePolicyCompression +[62]: https://bst.cloudapps.cisco.com/bugsearch/bug/CSCwi17652 \ No newline at end of file diff --git a/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py b/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py new file mode 100644 index 0000000..f5be4a6 --- /dev/null +++ b/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py @@ -0,0 +1,76 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + + +# icurl queries +vnsLIfCtx_api = "vnsLIfCtx.json" +vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, tversion, expected_result", + [ + # tversion missing + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-na.json")}, + "5.2(8h)", + None, + script.MANUAL + ), + # Version not affected (both new) + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "6.0(8h)", + "6.1(1g)", + script.NA, + ), + # Version not affected (both old) + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "4.2(7s)", + "5.2(4c)", + script.NA, + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "5.2(8h)", + "6.0(8e)", + script.FAIL_O + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "5.2(8h)", + "6.1(1f)", + script.FAIL_O + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + "5.2(8h)", + "6.0(8e)", + script.PASS + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + "5.2(8h)", + "6.1(1f)", + script.PASS + ), + ], +) +def test_logic(mock_icurl, cversion, tversion, expected_result): + cver = script.AciVersion(cversion) + tver = script.AciVersion(tversion) if tversion else None + result = script.service_ep_flag_bd_check(1, 1, cver, tver) + assert result == expected_result diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-na.json b/tests/service-ep_flag_bd_check/vnsLIfCtx-na.json new file mode 100644 index 0000000..0637a08 --- /dev/null +++ b/tests/service-ep_flag_bd_check/vnsLIfCtx-na.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json b/tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json new file mode 100644 index 0000000..76e5b87 --- /dev/null +++ b/tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json @@ -0,0 +1,173 @@ +[ + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-External-to-Any-DC-Prod_Contract-g-Prod-FTD-Local-SG-n-N1/lIfCtx-c-consumer", + "l3Dest": "no", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Outside-Users_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-Out", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-Out_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "provider", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-ANYEPG-To-OSP-Cluster2_LB_uSeg_EPG_Contract-g-Prod_FTD_LB_Local-n-N1/lIfCtx-c-provider", + "l3Dest": "no", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Inside-Servers_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-IN", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-IN_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-Test_LB_Contract-g-Prod-FW-LB-SG-n-node1/lIfCtx-c-consumer", + "l3Dest": "yes", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Inside-Servers_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-IN", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-IN_BD", + "tType": "mo" + } + } + } + ] + } + } +] \ No newline at end of file diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json b/tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json new file mode 100644 index 0000000..b6c9826 --- /dev/null +++ b/tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json @@ -0,0 +1,311 @@ +[ + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-External-to-Any-DC-Prod_Contract-g-Prod-FTD-Local-SG-n-N1/lIfCtx-c-consumer", + "l3Dest": "no", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Outside-Users_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-Out", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-Out_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "provider", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-ANYEPG-To-OSP-Cluster2_LB_uSeg_EPG_Contract-g-Prod_FTD_LB_Local-n-N1/lIfCtx-c-provider", + "l3Dest": "no", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Inside-Servers_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-IN", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-IN_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "provider", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-Test_LB_Contract-g-Prod_FTD_LB_Local-n-N2/lIfCtx-c-provider", + "l3Dest": "yes", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-LB/lIf-Prod-SV-LB-IN", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-LB-IN_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-Test_LB_Contract-g-Prod_FTD_LB_Local-n-N2/lIfCtx-c-consumer", + "l3Dest": "yes", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-LB/lIf-Prod-SV-LB-Out", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-LB-Out_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-Test_LB_Contract-g-Prod-FW-LB-SG-n-node1/lIfCtx-c-consumer", + "l3Dest": "yes", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToSvcRedirectPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToSvcRedirectPol", + "tCl": "vnsSvcRedirectPol", + "tDn": "uni/tn-CIB/svcCont/svcRedirectPol-Prod-FTD-Inside-Servers_RedirectPol", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-FTD/lIf-Prod-SV-FTD-IN", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-FTD-IN_BD", + "tType": "mo" + } + } + } + ] + } + }, + { + "vnsLIfCtx": { + "attributes": { + "connNameOrLbl": "consumer", + "ctxDn": "uni/tn-CIB/ctx-DC-VRF", + "dn": "uni/tn-CIB/ldevCtx-c-Prod-ExtEPG-to-OSP_uSeg_EPG-g-Prod_FTD_LB_Local-n-N2/lIfCtx-c-consumer", + "l3Dest": "yes", + "permitLog": "no", + "prefGrMemb": "exclude" + }, + "children": [ + { + "vnsRsLIfCtxToLIf": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToLIf", + "tCl": "vnsLIf", + "tDn": "uni/tn-CIB/lDevVip-Prod-SV-LB/lIf-Prod-SV-LB-Out", + "tType": "mo" + } + } + }, + { + "vnsRsLIfCtxToCustQosPol": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToCustQosPol", + "stateQual": "default-target", + "tDn": "uni/tn-common/qoscustom-default" + } + } + }, + { + "vnsRsLIfCtxToBD": { + "attributes": { + "rType": "mo", + "rn": "rsLIfCtxToBD", + "tCl": "fvBD", + "tDn": "uni/tn-CIB/BD-Prod-LB-Out_BD", + "tType": "mo" + } + } + } + ] + } + } +] \ No newline at end of file From 8a888a101b1ea97ff203417f91676d1a3e22ddf9 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 12 Jan 2026 15:35:46 -0600 Subject: [PATCH 02/14] corrected service_ep_flag_bd_check in test file --- .../test_service_ep_flag_bd_check.py | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py diff --git a/tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py new file mode 100644 index 0000000..f5be4a6 --- /dev/null +++ b/tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py @@ -0,0 +1,76 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + + +# icurl queries +vnsLIfCtx_api = "vnsLIfCtx.json" +vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, tversion, expected_result", + [ + # tversion missing + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-na.json")}, + "5.2(8h)", + None, + script.MANUAL + ), + # Version not affected (both new) + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "6.0(8h)", + "6.1(1g)", + script.NA, + ), + # Version not affected (both old) + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "4.2(7s)", + "5.2(4c)", + script.NA, + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "5.2(8h)", + "6.0(8e)", + script.FAIL_O + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + "5.2(8h)", + "6.1(1f)", + script.FAIL_O + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + "5.2(8h)", + "6.0(8e)", + script.PASS + ), + # Version affected with L4L7 Interface connector without PBR + ( + {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + "5.2(8h)", + "6.1(1f)", + script.PASS + ), + ], +) +def test_logic(mock_icurl, cversion, tversion, expected_result): + cver = script.AciVersion(cversion) + tver = script.AciVersion(tversion) if tversion else None + result = script.service_ep_flag_bd_check(1, 1, cver, tver) + assert result == expected_result From 7885df197ab874baa1eced6f971e5e701aa8b615 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 12 Jan 2026 15:40:27 -0600 Subject: [PATCH 03/14] corrected name --- .../test_service-ep_flag_bd_check.py | 76 ------------------- 1 file changed, 76 deletions(-) delete mode 100644 tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py diff --git a/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py b/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py deleted file mode 100644 index f5be4a6..0000000 --- a/tests/service-ep_flag_bd_check/test_service-ep_flag_bd_check.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -vnsLIfCtx_api = "vnsLIfCtx.json" -vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" - - -@pytest.mark.parametrize( - "icurl_outputs, cversion, tversion, expected_result", - [ - # tversion missing - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-na.json")}, - "5.2(8h)", - None, - script.MANUAL - ), - # Version not affected (both new) - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, - "6.0(8h)", - "6.1(1g)", - script.NA, - ), - # Version not affected (both old) - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, - "4.2(7s)", - "5.2(4c)", - script.NA, - ), - # Version affected with L4L7 Interface connector without PBR - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, - "5.2(8h)", - "6.0(8e)", - script.FAIL_O - ), - # Version affected with L4L7 Interface connector without PBR - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, - "5.2(8h)", - "6.1(1f)", - script.FAIL_O - ), - # Version affected with L4L7 Interface connector without PBR - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, - "5.2(8h)", - "6.0(8e)", - script.PASS - ), - # Version affected with L4L7 Interface connector without PBR - ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, - "5.2(8h)", - "6.1(1f)", - script.PASS - ), - ], -) -def test_logic(mock_icurl, cversion, tversion, expected_result): - cver = script.AciVersion(cversion) - tver = script.AciVersion(tversion) if tversion else None - result = script.service_ep_flag_bd_check(1, 1, cver, tver) - assert result == expected_result From eda9d73d9007ddb5728cfd768a7db5feabf3bbb2 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 12 Jan 2026 18:24:57 -0600 Subject: [PATCH 04/14] changed directory name --- .../test_service_ep_flag_bd_check.py | 0 .../vnsLIfCtx-na.json | 0 .../vnsLIfCtx-neg.json | 0 .../vnsLIfCtx-pos.json | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename tests/{service-ep_flag_bd_check => service_ep_flag_bd_check}/test_service_ep_flag_bd_check.py (100%) rename tests/{service-ep_flag_bd_check => service_ep_flag_bd_check}/vnsLIfCtx-na.json (100%) rename tests/{service-ep_flag_bd_check => service_ep_flag_bd_check}/vnsLIfCtx-neg.json (100%) rename tests/{service-ep_flag_bd_check => service_ep_flag_bd_check}/vnsLIfCtx-pos.json (100%) diff --git a/tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py similarity index 100% rename from tests/service-ep_flag_bd_check/test_service_ep_flag_bd_check.py rename to tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-na.json b/tests/service_ep_flag_bd_check/vnsLIfCtx-na.json similarity index 100% rename from tests/service-ep_flag_bd_check/vnsLIfCtx-na.json rename to tests/service_ep_flag_bd_check/vnsLIfCtx-na.json diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json b/tests/service_ep_flag_bd_check/vnsLIfCtx-neg.json similarity index 100% rename from tests/service-ep_flag_bd_check/vnsLIfCtx-neg.json rename to tests/service_ep_flag_bd_check/vnsLIfCtx-neg.json diff --git a/tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json b/tests/service_ep_flag_bd_check/vnsLIfCtx-pos.json similarity index 100% rename from tests/service-ep_flag_bd_check/vnsLIfCtx-pos.json rename to tests/service_ep_flag_bd_check/vnsLIfCtx-pos.json From 5febc4b4a448bc55cb1a8f1284ddc0989fd09d53 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 12 Jan 2026 23:00:54 -0600 Subject: [PATCH 05/14] changes in test file --- .../test_service_ep_flag_bd_check.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py index f5be4a6..740e593 100644 --- a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py +++ b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py @@ -70,7 +70,7 @@ ], ) def test_logic(mock_icurl, cversion, tversion, expected_result): - cver = script.AciVersion(cversion) - tver = script.AciVersion(tversion) if tversion else None - result = script.service_ep_flag_bd_check(1, 1, cver, tver) + cversion = script.AciVersion(cversion) + tversion = script.AciVersion(tversion) if tversion else None + result = script.service_ep_flag_bd_check(1, 1, cversion, tversion) assert result == expected_result From b5ecb6e43da3b3763ce5a0839b0fa0443bd414e1 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 12 Jan 2026 23:19:07 -0600 Subject: [PATCH 06/14] changed func check_title --- aci-preupgrade-validation-script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index a6aaebd..bd208ce 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -5460,7 +5460,7 @@ def isis_database_byte_check(tversion, **kwargs): return Result(result=NA, msg=VER_NOT_AFFECTED) return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) -@check_wrapper(check_title='Service-EP Flag in BD without PBR') +@check_wrapper(check_title='Service EP Flag in BD without PBR') def service_ep_flag_bd_check(cversion, tversion, **kwargs): result = PASS headers = ["Tenant ", "Bridge Domain ", "Service Graph Device", "Device Node Name" ] From 2b5855f6bfa7d338302ada01c216a704fb8fef7c Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Tue, 13 Jan 2026 22:47:52 -0600 Subject: [PATCH 07/14] changed test file. --- .../test_service_ep_flag_bd_check.py | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py index 740e593..540310a 100644 --- a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py +++ b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py @@ -20,49 +20,63 @@ [ # tversion missing ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-na.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-na.json") + }, "5.2(8h)", None, script.MANUAL ), # Version not affected (both new) ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json") + }, "6.0(8h)", "6.1(1g)", script.NA, ), # Version not affected (both old) ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json") + }, "4.2(7s)", "5.2(4c)", script.NA, ), # Version affected with L4L7 Interface connector without PBR ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json") + }, "5.2(8h)", "6.0(8e)", script.FAIL_O ), # Version affected with L4L7 Interface connector without PBR ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-pos.json") + }, "5.2(8h)", "6.1(1f)", script.FAIL_O ), # Version affected with L4L7 Interface connector without PBR ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json") + }, "5.2(8h)", "6.0(8e)", script.PASS ), # Version affected with L4L7 Interface connector without PBR ( - {vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json")}, + { + vnsLIfCtx_api: read_data(dir, "vnsLIfCtx-neg.json") + }, "5.2(8h)", "6.1(1f)", script.PASS @@ -70,7 +84,5 @@ ], ) def test_logic(mock_icurl, cversion, tversion, expected_result): - cversion = script.AciVersion(cversion) - tversion = script.AciVersion(tversion) if tversion else None - result = script.service_ep_flag_bd_check(1, 1, cversion, tversion) + result = script.service_ep_flag_bd_check(1, 1, script.AciVersion(cversion), script.AciVersion(tversion) if tversion else None) assert result == expected_result From 6e620d3d11b411de57a470e5df74e9f76efb91cb Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Mon, 26 Jan 2026 18:13:02 -0600 Subject: [PATCH 08/14] commit --- tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py index 540310a..ed7f2c9 100644 --- a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py +++ b/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py @@ -83,6 +83,6 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): +def test_logic(mock_icurl, icurl_outputs, cversion, tversion, expected_result): result = script.service_ep_flag_bd_check(1, 1, script.AciVersion(cversion), script.AciVersion(tversion) if tversion else None) assert result == expected_result From 7888e1109034c3bb7b333fa07b39561b41f03ca3 Mon Sep 17 00:00:00 2001 From: takishida <38262981+takishida@users.noreply.github.com> Date: Tue, 25 Nov 2025 16:49:01 -0800 Subject: [PATCH 09/14] V4.0.0 (#296) * Support multithread execution * fix: switch_ssd_check regex and add pytest * fix: MANUAL instead of ERROR when switch bootflash objects not found * fix: MANUAL instead of ERROR when switch sup objects not found * bump to v3.4.13 * feat: Add fabricNode in common_data * fix: fallback on firmware queries on older ACI version to get current ver * fix: fallback to infraWiNode with pre-4.0 without fabricNode.address * test: fail mock_icurl when test data is not provided * fix: Change error message for API timeout * fix: raise exception with empty imdata with totalCount>1 * fix: pytest error for version display * fix: Abort correctly when user is not authorized to access firmwareCtrlrRunning * fix: Output from a timed out test thread task impacts other tests result * fix: Raise exception with empty imdata with totalCount>0. Handle docker0 check error individually * Bump to v4.0.0 --- aci-preupgrade-validation-script.py | 1322 +++++++++++------ pytest.ini | 2 +- tests/apic_version_md5_check/topSystem.json | 1023 ------------- .../access_untagged_check/faultInst_NEG.json | 0 .../access_untagged_check/faultInst_POS.json | 0 .../test_access_untagged_check.py | 7 +- .../aes_encryption_check/exportcryptkey.json | 0 .../exportcryptkey_disabled.json | 0 .../test_aes_encryption_check.py | 8 +- .../apic_ca_cert_validation/NEG_certreq.txt | 0 .../apic_ca_cert_validation/POS_certreq.txt | 0 .../test_apic_ca_cert_validation.py | 10 +- .../infraWiNode_3.json | 0 .../infraWiNode_4.json | 0 .../test_apic_database_size_check.py | 22 +- tests/checks/apic_ssd_check/fabricNode.json | 93 ++ .../apic_ssd_check/fabricNode_no_apic.json | 48 + .../checks/apic_ssd_check/fabricNode_old.json | 62 + tests/checks/apic_ssd_check/fault_F2731.json | 12 + .../apic_ssd_check/infraWiNode_apic1.json | 62 + .../apic_ssd_check/test_apic_ssd_check.py | 214 +++ .../apic_version_md5_check/fabricNode.json | 93 ++ .../fabricNode_no_apic.json | 48 + .../fabricNode_old.json | 62 + .../firmwareFirmware_6.0.5h.json | 0 ...rmwareFirmware_6.0.5h_image_sign_fail.json | 0 .../infraWiNode_apic1.json | 62 + .../test_apic_version_md5_check.py | 151 +- .../ave_eol_check/test_ave_eol_check.py | 11 +- .../ave_eol_check/vmmDomP_POS.json | 0 .../fvCtx_pos.json | 0 .../test_bgp_golf_route_target_type_check.py | 13 +- .../l3extRsNodeL3OutAtt_neg.json | 0 .../l3extRsNodeL3OutAtt_pos.json | 0 .../l3extRsNodeL3OutAtt_pos1.json | 0 .../l3extRsNodeL3OutAtt_pos2.json | 0 .../test_bgp_peer_loopback_check.py | 7 +- .../compatRsSuppHw_605_L2.json | 0 .../compatRsSuppHw_605_M1.json | 0 .../compatRsSuppHw_empty.json | 0 .../eqptCh_newver.json | 0 .../eqptCh_oldver.json | 0 .../eqptCh_reallyoldver.json | 0 .../test_cimc_compatibilty_check.py | 24 +- .../eqptFC_NEG.json | 0 .../eqptFC_POS.json | 0 .../eqptLC_NEG.json | 0 .../eqptLC_POS.json | 0 ...st_clock_signal_component_failure_check.py | 7 +- .../cloudsecPreSharedKey_err.json | 0 .../cloudsecPreSharedKey_neg.json | 0 .../cloudsecPreSharedKey_pos.json | 0 .../cloudsecPreSharedKey_pos2.json | 0 .../test_cloudsec_encryption_depr_check.py | 7 +- .../configpushShardCont_pos.json | 0 .../test_configpush_shard_check.py | 12 +- tests/checks/conftest.py | 137 ++ .../epg_epg2_unmatched.json | 0 .../esg_esg2.json | 0 .../fvCtx_consumer_same_vrf.json | 0 .../fvCtx_consumer_shared.json | 0 .../fvCtx_no_consumers.json | 0 .../global_contracts_epg_only.json | 0 .../global_contracts_esg_only.json | 0 .../global_contracts_same_vrf.json | 0 .../global_contracts_shared.json | 0 .../instp_l3instp2.json | 0 ...st_consumer_vzany_shared_services_check.py | 13 +- .../vnsGraphInst_redirect.json | 0 .../test_contract_22_defect_check.py | 14 +- .../apContainerPol_10_0_0_1__16.json | 0 .../apContainerPol_172_16_0_1__15.json | 0 .../apContainerPol_172_17_0_10__16.json | 0 .../apContainerPol_172_17_0_1__16.json | 0 .../apContainerPol_172_17_0_1__17.json | 0 .../apContainerPol_172_18_0_1__16.json | 0 .../infraWiNode_10_0_0_0__16.json | 0 .../infraWiNode_10_0_x_0__24_remote_apic.json | 0 .../infraWiNode_172_17_0_0__16.json | 0 ...nfraWiNode_172_17_x_0__24_remote_apic.json | 0 .../test_docker0_subent_overlap_check.py | 38 +- .../eecdh_cipher_check/commCipher_neg.json | 0 .../eecdh_cipher_check/commCipher_neg2.json | 0 .../eecdh_cipher_check/commCipher_pos.json | 0 .../test_eecdh_cipher_check.py | 11 +- .../faultInst-encap-pos.json | 0 .../faultInst-new-version.json | 0 .../encap_already_in_use_check/fvIfConn.json | 0 .../test_encap_already_in_use_check.py | 7 +- .../faultInst_neg.json | 0 .../faultInst_pos.json | 0 .../test_equipment_disk_limits_exceeded.py | 9 +- .../test_eventmgr_db_defect_check.py | 10 +- .../fabricRsOosPath_neg.json | 0 .../fabricRsOosPath_pos1.json | 0 .../fabricRsOosPath_pos2.json | 0 .../fabricRsOosPath_pos3.json | 0 .../fabricRsOosPath_pos4.json | 0 .../fabricRsOosPath_pos5.json | 0 .../fabricRsOosPath_pos6.json | 0 .../infraRsHPathAtt_neg.json | 0 .../infraRsHPathAtt_pos1.json | 0 .../infraRsHPathAtt_pos2.json | 0 .../infraRsHPathAtt_pos3.json | 0 .../infraRsHPathAtt_pos4.json | 0 .../infraRsHPathAtt_pos5.json | 0 .../infraRsHPathAtt_pos6.json | 0 .../test_fabricPathEP_target_check.py | 12 +- .../fabric_dpp_check/lbpPol_NEG.json | 0 .../fabric_dpp_check/lbpPol_POS.json | 0 .../fabric_dpp_check/test_fabric_dpp_check.py | 12 +- .../fabricNode.json | 122 ++ .../lldpAdjEp_neg.json | 0 .../lldpAdjEp_pos_spine_only.json | 0 .../lldpAdjEp_pos_spine_t1.json | 0 .../lldpAdjEp_pos_t1_only.json | 0 .../test_fabric_link_redundancy_check.py | 67 + .../fabric_port_down_check/faultInst_pos.json | 0 .../test_fabric_port_down_check.py | 9 +- .../fabricdomain_name_check/fabricNode.json | 108 ++ .../fabricNode_no_apic1.json | 93 ++ .../test_fabricdomain_name_check.py | 126 ++ .../topSystem_1POS.json | 13 + .../topSystem_2POS.json | 13 + .../topSystem_NEG.json | 13 + .../fc_ex_model_check/fabricNode_NEG.json | 107 ++ .../fc_ex_model_check/fabricNode_POS.json | 122 ++ .../fc_ex_model_check/fcEntity_101_102.json | 18 + .../fcEntity_101_102_103.json | 26 + .../fc_ex_model_check/fcEntity_104.json | 10 + .../test_fc_ex_model_check.py | 140 ++ .../fabricNode_no_gen1.json | 30 + .../fabricNode_with_gen1.json | 44 + .../test_gen1_switch_compatibility_check.py | 42 + tests/{ => checks}/helpers/__init__.py | 0 tests/{ => checks}/helpers/utils.py | 2 +- .../commHttps_neg1.json | 0 .../commHttps_neg2.json | 0 .../commHttps_pos.json | 0 .../test_https_throttle_rate_check.py | 12 +- .../fvnsVlanInstP_neg.json | 0 .../fvnsVlanInstP_pos.json | 0 .../test_internal_vlanpool_check.py | 7 +- .../internal_vlanpool_check/vmmDomP_neg.json | 0 .../internal_vlanpool_check/vmmDomP_pos.json | 0 .../isisDTEp_NEG.json | 0 .../isisDTEp_POS.json | 0 .../test_isis_database_byte_check.py | 12 +- .../fvFabricExtConnP_pos1.json | 0 .../fvFabricExtConnP_pos2.json | 0 .../fvFabricExtConnP_pos3.json | 0 .../isisDomP-default_missing.json | 0 .../isisDomP-default_neg.json | 0 .../isisDomP-default_pos.json | 0 ...test_isis_redis_metric_mpod_msite_check.py | 7 +- .../l3out_mtu_check/l2pol-default.json | 0 .../l3out_mtu_check/l3extRsPathL3OutAtt.json | 0 .../l3out_mtu_check/l3extVirtualLIfP.json | 0 .../l3extVirtualLIfP_unresolved.json | 0 .../l3out_mtu_check/test_l3out_mtu_check.py | 7 +- .../diff_l3out_loopback.json | 0 .../diff_l3out_loopback_and_rtrId.json | 0 .../diff_l3out_rtrId.json | 0 .../no_overlap.json | 0 .../overlap_on_diff_nodes.json | 0 .../same_l3out_loopback.json | 0 .../same_l3out_loopback_and_rtrId.json | 0 .../same_l3out_loopback_with_subnet_mask.json | 0 .../same_l3out_rtrId.json | 0 .../same_l3out_rtrId_non_vpc.json | 0 .../same_l3out_two_loopbacks.json | 0 .../test_l3out_overlapping_loopback_check.py | 7 +- .../rtctrlProfile_missing_target.json | 0 ...ultiple_l3out_multiple_missing_target.json | 0 ...rtctrlProfile_multiple_missing_target.json | 0 .../rtctrlProfile_no_missing_target.json | 0 ...st_l3out_route_map_missing_target_check.py | 10 +- .../fvRsDomAtt_neg.json | 0 .../fvRsDomAtt_pos.json | 0 .../infraPortBlk_neg.json | 0 .../infraPortBlk_pos.json | 0 ...ldp_custom_int_description_defect_check.py | 7 +- .../llfc_susceptibility_check/ethpmFcot.json | 0 .../test_llfc_susceptibility_check.py | 13 +- .../fabricNode_all_phys_apic.json | 92 ++ .../fabricNode_mini_aci.json | 92 ++ .../test_mini_aci_6_0_2_check.py | 75 + .../topSystem_controller_neg.json | 0 .../topSystem_controller_pos.json | 0 .../n9408_model_check/eqptCh_NEG.json | 0 .../n9408_model_check/eqptCh_POS.json | 0 .../test_n9408_model_check.py | 8 +- .../fabricNode_FX3H.json | 77 + .../fabricNode_FX3P.json | 77 + .../fabricNode_FX3P3H.json | 77 + .../fabricNode_no_FX3P3H.json | 47 + ..._n9k_c93108tc_fx3p_interface_down_check.py | 93 ++ .../ntp_status_check/NEG_datetimeClkPol.json | 46 + .../ntp_status_check/NEG_datetimeNtpq.json | 29 + .../ntp_status_check/POS_datetimeClkPol.json | 46 + .../ntp_status_check/POS_datetimeNtpq.json | 29 + tests/checks/ntp_status_check/fabricNode.json | 64 + .../ntp_status_check/test_ntp_status_check.py | 65 + .../observer_db_size_check/fabricNode.json | 46 + .../fabricNode_no_apic.json | 13 + .../fabricNode_old.json | 62 + .../infraWiNode_apic1.json | 62 + .../test_observer_db_size_check.py | 185 +++ .../oob_mgmt_security_check/mgmtInstP.json | 0 .../mgmtInstP_no_contracts.json | 0 .../mgmtInstP_no_subnets.json | 0 .../oob_mgmt_security_check/mgmtOoB.json | 0 .../mgmtOoB_no_contracts.json | 0 .../test_oob_mgmt_security_check.py | 12 +- .../ethpmPhysIf-neg.json | 0 .../ethpmPhysIf-pos.json | 0 .../test_out_of_service_ports_check.py | 12 +- .../access_policy.json | 0 .../overlapping_vlan_pools_check/fvAEPg.json | 0 .../fvIfConn.json | 0 .../infraSetPol_no.json | 0 .../infraSetPol_yes.json | 0 .../templates/access_policy.j2 | 0 .../templates/fvAEPg.j2 | 0 .../templates/fvIfConn.j2 | 0 .../templates/macros.j2 | 0 .../test_overlapping_vlan_pools_check.py | 18 +- .../test_pbr_high_scale_check.py | 12 +- .../vnsAdjacencyDefCont_HIGH.json | 0 .../vnsAdjacencyDefCont_LOW.json | 0 .../vnsSvcRedirEcmpBucketCons_HIGH.json | 0 .../vnsSvcRedirEcmpBucketCons_LOW.json | 0 .../post_upgrade_cb_check/moCount_0.json | 0 .../post_upgrade_cb_check/moCount_10.json | 0 .../post_upgrade_cb_check/moCount_8.json | 0 .../test_post_upgrade_cb_check.py | 15 +- ...F0467_prefix-entry-already-in-use_new.json | 0 ...F0467_prefix-entry-already-in-use_old.json | 0 .../prefix_already_in_use_check/fvCtx.json | 0 .../l3extRsEctx.json | 0 .../l3extSubnet_no_overlap.json | 0 .../l3extSubnet_overlap.json | 0 .../test_prefix_already_in_use_check.py | 7 +- .../fabricNode_no_RL.json | 123 ++ .../fabricNode_with_RL.json | 145 ++ .../infraSetPol_DTF_disabled.json | 13 + .../infraSetPol_DTF_enabled.json | 13 + .../infraSetPol_no_DTF.json | 12 + .../test_r_leaf_compatibility_check.py | 103 ++ .../rtctrlCtxP_NEG.json | 0 .../rtctrlCtxP_POS.json | 0 .../rtctrlSubjP_NEG.json | 0 .../rtctrlSubjP_POS.json | 0 .../test_rtmap_comm_match_defect_check.py | 12 +- .../fvRtEPpInfoToBD.json | 0 .../test_service_bd_forceful_routing_check.py | 12 +- .../fabricNode.json | 46 + .../fabricRsDecommissionNode_POS.json | 0 .../test_stale_decomissioned_spine_check.py | 57 + .../standby_sup_sync_check/eqptSupC_NEG.json | 0 .../standby_sup_sync_check/eqptSupC_POS.json | 0 .../test_standby_sup_sync_check.py | 153 ++ .../static_route_overlap_check/fvRsCtx.json | 0 .../static_route_overlap_check/fvSubnet.json | 0 .../ipRouteP_empty.json | 0 .../ipRouteP_neg.json | 0 .../ipRouteP_pos.json | 0 .../l3extRsEctx.json | 0 .../test_static_route_overlap_check.py | 91 ++ .../subnet_scope_check/fvAEPg_empty.json | 0 .../subnet_scope_check/fvAEPg_neg.json | 0 .../subnet_scope_check/fvAEPg_pos.json | 0 .../{ => checks}/subnet_scope_check/fvBD.json | 0 .../subnet_scope_check/fvRsBd.json | 0 .../test_subnet_scope_check.py | 74 + .../eqptSupC_SUP_A.json | 0 .../eqptSupC_SUP_A_Aplus.json | 0 .../eqptSupC_SUP_Aplus.json | 0 .../eqptSupC_no_SUP_A_Aplus.json | 0 .../test_sup_a_high_memory_check.py | 7 +- .../sup_hwrev_check/eqptSpCmnBlk_NEG.json | 0 .../sup_hwrev_check/eqptSpCmnBlk_POS.json | 0 .../sup_hwrev_check/test_sup_hwrev_check.py | 17 +- .../eqptcapacityFSPartition.json | 0 .../maintUpgJob_not_downloaded.json | 0 .../maintUpgJob_old_ver_no_prop.json | 0 .../maintUpgJob_pre_downloaded.json | 0 .../test_switch_bootflash_usage_check.py | 62 + .../bgpRRNodePEp_1001_1002_2001_2002.json | 38 + .../fabricExplicitGEp.json | 92 ++ .../fabricNode.json | 206 +++ ...extRsNodeL3OutAtt_1001_1002_2001_2002.json | 50 + ...extRsNodeL3OutAtt_1003_1004_2001_2002.json | 50 + .../lldpCtrlrAdjEp.json | 62 + .../maintMaintGrp_ALL.json | 94 ++ .../maintMaintGrp_BAD_GRP1_GRP2.json | 116 ++ .../maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json | 160 ++ .../maintMaintGrp_EVEN_ODD.json | 116 ++ .../maintMaintGrp_SPINE_LEAF.json | 116 ++ .../test_switch_group_guideline_check.py | 193 +++ tests/checks/switch_ssd_check/faultInst.json | 28 + .../switch_ssd_check/test_switch_ssd_check.py | 54 + .../switch_status_check/fabricNode_NEG.json | 122 ++ .../switch_status_check/fabricNode_POS.json | 122 ++ .../fabricRsDecommissionNode.json | 12 + .../test_switch_status_check.py | 46 + .../telemetryStatsServerP_neg.json | 0 .../telemetryStatsServerP_pos.json | 0 ...test_telemetryStatsServerP_object_check.py | 13 +- .../dbgAcPath_max.json | 0 .../dbgAcPath_na.json | 0 .../dbgAcPath_pass.json | 0 .../test_tep_to_tep_ac_count_check.py | 23 +- ..._unsupported_fec_configuration_ex_check.py | 14 +- .../topSystem_neg.json | 0 .../topSystem_pos.json | 0 .../uplink_limit_check/eqptPortP_NEG.json | 0 .../uplink_limit_check/eqptPortP_POS.json | 0 .../uplink_limit_check/eqptPortP_empty.json | 0 .../test_uplink_limit_check.py | 10 +- .../firmwareFirmware_empty.json | 0 .../firmwareFirmware_neg.json | 0 .../firmwareFirmware_pos.json | 0 .../firmwareFirmware_pos2.json | 0 .../firmwareFirmware_pos3.json | 0 .../firmwareFirmware_pos4.json | 0 .../test_validate_32_64_bit_image_check.py | 69 +- .../fvUplinkOrderCont_neg.json | 0 .../fvUplinkOrderCont_not_exist.json | 0 .../fvUplinkOrderCont_pos.json | 0 .../test_vmm_active_uplinks_check.py | 9 +- .../vpc_paired_switches_check/fabricNode.json | 145 ++ .../test_vpc_paired_switches_check.py | 38 + .../test_vzany_vzany_service_epg_check.py | 13 +- .../vzRsSubjGraphAtt.json | 0 .../vzRtAny_vzAny_prov_cons_diff_VRFs.json | 0 .../vzRtAny_vzAny_prov_only.json | 0 .../vzRtAny_vzAny_vzAny.json | 0 .../vzRtAny_vzAny_vzAny_2_VRFs.json | 0 tests/conftest.py | 452 ++++-- .../fabricNode.json | 101 -- .../test_fabric_link_redundancy_check.py | 61 - .../test_fabricdomain_name_check.py | 79 - .../topSystem_1POS.json | 35 - .../topSystem_2POS.json | 35 - .../topSystem_NEG.json | 35 - tests/fc_ex_model_check/fabricNode_NEG.json | 1 - tests/fc_ex_model_check/fabricNode_POS.json | 26 - tests/fc_ex_model_check/fcEntity_NEG.json | 1 - tests/fc_ex_model_check/fcEntity_POS.json | 18 - .../test_fc_ex_model_check.py | 65 - .../test_mini_aci_6_0_2_check.py | 71 - .../fabricNode_FX3H.json | 26 - .../fabricNode_FX3P.json | 26 - .../fabricNode_FX3P3H.json | 50 - ..._n9k_c93108tc_fx3p_interface_down_check.py | 50 - .../test_observer_db_size_check.py | 128 -- tests/observer_db_size_check/topSystem.json | 35 - .../topSystem_empty.json | 1 - .../fabricRsDecommissionNode_NEG.json | 1 - .../test_stale_decomissioned_spine_check.py | 66 - .../topSystem.json | 24 - .../test_standby_sup_sync_check.py | 174 --- .../test_static_route_overlap_check.py | 67 - .../test_subnet_scope_check.py | 63 - .../test_switch_bootflash_usage_check.py | 46 - tests/test_AciResult.py | 214 +-- tests/test_CheckManager.py | 353 +++++ tests/test_ResultManager.py | 123 ++ tests/test_ThreadManager.py | 49 + tests/test_common_data.py | 634 ++++++++ tests/test_icurl.py | 36 + tests/test_main.py | 148 +- tests/test_prepare.py | 327 ---- tests/test_run_checks.py | 188 --- .../test_vpc_paired_switches_check.py | 36 - .../vpc_paired_switches_check/topSystem.json | 134 -- 377 files changed, 9174 insertions(+), 3949 deletions(-) delete mode 100644 tests/apic_version_md5_check/topSystem.json rename tests/{ => checks}/access_untagged_check/faultInst_NEG.json (100%) rename tests/{ => checks}/access_untagged_check/faultInst_POS.json (100%) rename tests/{ => checks}/access_untagged_check/test_access_untagged_check.py (80%) rename tests/{ => checks}/aes_encryption_check/exportcryptkey.json (100%) rename tests/{ => checks}/aes_encryption_check/exportcryptkey_disabled.json (100%) rename tests/{ => checks}/aes_encryption_check/test_aes_encryption_check.py (87%) rename tests/{ => checks}/apic_ca_cert_validation/NEG_certreq.txt (100%) rename tests/{ => checks}/apic_ca_cert_validation/POS_certreq.txt (100%) rename tests/{ => checks}/apic_ca_cert_validation/test_apic_ca_cert_validation.py (69%) rename tests/{ => checks}/apic_database_size_check/infraWiNode_3.json (100%) rename tests/{ => checks}/apic_database_size_check/infraWiNode_4.json (100%) rename tests/{ => checks}/apic_database_size_check/test_apic_database_size_check.py (96%) create mode 100644 tests/checks/apic_ssd_check/fabricNode.json create mode 100644 tests/checks/apic_ssd_check/fabricNode_no_apic.json create mode 100644 tests/checks/apic_ssd_check/fabricNode_old.json create mode 100644 tests/checks/apic_ssd_check/fault_F2731.json create mode 100644 tests/checks/apic_ssd_check/infraWiNode_apic1.json create mode 100644 tests/checks/apic_ssd_check/test_apic_ssd_check.py create mode 100644 tests/checks/apic_version_md5_check/fabricNode.json create mode 100644 tests/checks/apic_version_md5_check/fabricNode_no_apic.json create mode 100644 tests/checks/apic_version_md5_check/fabricNode_old.json rename tests/{ => checks}/apic_version_md5_check/firmwareFirmware_6.0.5h.json (100%) rename tests/{ => checks}/apic_version_md5_check/firmwareFirmware_6.0.5h_image_sign_fail.json (100%) create mode 100644 tests/checks/apic_version_md5_check/infraWiNode_apic1.json rename tests/{ => checks}/apic_version_md5_check/test_apic_version_md5_check.py (56%) rename tests/{ => checks}/ave_eol_check/test_ave_eol_check.py (81%) rename tests/{ => checks}/ave_eol_check/vmmDomP_POS.json (100%) rename tests/{ => checks}/bgp_golf_route_target_type_check/fvCtx_pos.json (100%) rename tests/{ => checks}/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py (83%) rename tests/{ => checks}/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_neg.json (100%) rename tests/{ => checks}/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos.json (100%) rename tests/{ => checks}/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos1.json (100%) rename tests/{ => checks}/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos2.json (100%) rename tests/{ => checks}/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py (87%) rename tests/{ => checks}/cimc_compatibilty_check/compatRsSuppHw_605_L2.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/compatRsSuppHw_605_M1.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/compatRsSuppHw_empty.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/eqptCh_newver.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/eqptCh_oldver.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/eqptCh_reallyoldver.json (100%) rename tests/{ => checks}/cimc_compatibilty_check/test_cimc_compatibilty_check.py (60%) rename tests/{ => checks}/clock_signal_component_failure_check/eqptFC_NEG.json (100%) rename tests/{ => checks}/clock_signal_component_failure_check/eqptFC_POS.json (100%) rename tests/{ => checks}/clock_signal_component_failure_check/eqptLC_NEG.json (100%) rename tests/{ => checks}/clock_signal_component_failure_check/eqptLC_POS.json (100%) rename tests/{ => checks}/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py (88%) rename tests/{ => checks}/cloudsec_encryption_depr_check/cloudsecPreSharedKey_err.json (100%) rename tests/{ => checks}/cloudsec_encryption_depr_check/cloudsecPreSharedKey_neg.json (100%) rename tests/{ => checks}/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos.json (100%) rename tests/{ => checks}/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos2.json (100%) rename tests/{ => checks}/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py (89%) rename tests/{ => checks}/configpush_shard_check/configpushShardCont_pos.json (100%) rename tests/{ => checks}/configpush_shard_check/test_configpush_shard_check.py (86%) create mode 100644 tests/checks/conftest.py rename tests/{ => checks}/consumer_vzany_shared_services_check/epg_epg2_unmatched.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/esg_esg2.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/fvCtx_consumer_same_vrf.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/fvCtx_consumer_shared.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/fvCtx_no_consumers.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/global_contracts_epg_only.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/global_contracts_esg_only.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/global_contracts_same_vrf.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/global_contracts_shared.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/instp_l3instp2.json (100%) rename tests/{ => checks}/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py (97%) rename tests/{ => checks}/consumer_vzany_shared_services_check/vnsGraphInst_redirect.json (100%) rename tests/{ => checks}/contract_22_defect_check/test_contract_22_defect_check.py (65%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_10_0_0_1__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_172_16_0_1__15.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_172_17_0_10__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_172_17_0_1__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_172_17_0_1__17.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/apContainerPol_172_18_0_1__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/infraWiNode_10_0_0_0__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/infraWiNode_10_0_x_0__24_remote_apic.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/infraWiNode_172_17_0_0__16.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/infraWiNode_172_17_x_0__24_remote_apic.json (100%) rename tests/{ => checks}/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py (84%) rename tests/{ => checks}/eecdh_cipher_check/commCipher_neg.json (100%) rename tests/{ => checks}/eecdh_cipher_check/commCipher_neg2.json (100%) rename tests/{ => checks}/eecdh_cipher_check/commCipher_pos.json (100%) rename tests/{ => checks}/eecdh_cipher_check/test_eecdh_cipher_check.py (80%) rename tests/{ => checks}/encap_already_in_use_check/faultInst-encap-pos.json (100%) rename tests/{ => checks}/encap_already_in_use_check/faultInst-new-version.json (100%) rename tests/{ => checks}/encap_already_in_use_check/fvIfConn.json (100%) rename tests/{ => checks}/encap_already_in_use_check/test_encap_already_in_use_check.py (86%) rename tests/{ => checks}/equipment_disk_limits_exceeded/faultInst_neg.json (100%) rename tests/{ => checks}/equipment_disk_limits_exceeded/faultInst_pos.json (100%) rename tests/{ => checks}/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py (79%) rename tests/{ => checks}/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py (77%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_neg.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos1.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos2.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos3.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos4.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos5.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/fabricRsOosPath_pos6.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_neg.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos1.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos2.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos3.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos4.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos5.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/infraRsHPathAtt_pos6.json (100%) rename tests/{ => checks}/fabricPathEP_target_check/test_fabricPathEP_target_check.py (89%) rename tests/{ => checks}/fabric_dpp_check/lbpPol_NEG.json (100%) rename tests/{ => checks}/fabric_dpp_check/lbpPol_POS.json (100%) rename tests/{ => checks}/fabric_dpp_check/test_fabric_dpp_check.py (85%) create mode 100644 tests/checks/fabric_link_redundancy_check/fabricNode.json rename tests/{ => checks}/fabric_link_redundancy_check/lldpAdjEp_neg.json (100%) rename tests/{ => checks}/fabric_link_redundancy_check/lldpAdjEp_pos_spine_only.json (100%) rename tests/{ => checks}/fabric_link_redundancy_check/lldpAdjEp_pos_spine_t1.json (100%) rename tests/{ => checks}/fabric_link_redundancy_check/lldpAdjEp_pos_t1_only.json (100%) create mode 100644 tests/checks/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py rename tests/{ => checks}/fabric_port_down_check/faultInst_pos.json (100%) rename tests/{ => checks}/fabric_port_down_check/test_fabric_port_down_check.py (78%) create mode 100644 tests/checks/fabricdomain_name_check/fabricNode.json create mode 100644 tests/checks/fabricdomain_name_check/fabricNode_no_apic1.json create mode 100644 tests/checks/fabricdomain_name_check/test_fabricdomain_name_check.py create mode 100644 tests/checks/fabricdomain_name_check/topSystem_1POS.json create mode 100644 tests/checks/fabricdomain_name_check/topSystem_2POS.json create mode 100644 tests/checks/fabricdomain_name_check/topSystem_NEG.json create mode 100644 tests/checks/fc_ex_model_check/fabricNode_NEG.json create mode 100644 tests/checks/fc_ex_model_check/fabricNode_POS.json create mode 100644 tests/checks/fc_ex_model_check/fcEntity_101_102.json create mode 100644 tests/checks/fc_ex_model_check/fcEntity_101_102_103.json create mode 100644 tests/checks/fc_ex_model_check/fcEntity_104.json create mode 100644 tests/checks/fc_ex_model_check/test_fc_ex_model_check.py create mode 100644 tests/checks/gen1_switch_compatibility_check/fabricNode_no_gen1.json create mode 100644 tests/checks/gen1_switch_compatibility_check/fabricNode_with_gen1.json create mode 100644 tests/checks/gen1_switch_compatibility_check/test_gen1_switch_compatibility_check.py rename tests/{ => checks}/helpers/__init__.py (100%) rename tests/{ => checks}/helpers/utils.py (68%) rename tests/{ => checks}/https_throttle_rate_check/commHttps_neg1.json (100%) rename tests/{ => checks}/https_throttle_rate_check/commHttps_neg2.json (100%) rename tests/{ => checks}/https_throttle_rate_check/commHttps_pos.json (100%) rename tests/{ => checks}/https_throttle_rate_check/test_https_throttle_rate_check.py (88%) rename tests/{ => checks}/internal_vlanpool_check/fvnsVlanInstP_neg.json (100%) rename tests/{ => checks}/internal_vlanpool_check/fvnsVlanInstP_pos.json (100%) rename tests/{ => checks}/internal_vlanpool_check/test_internal_vlanpool_check.py (93%) rename tests/{ => checks}/internal_vlanpool_check/vmmDomP_neg.json (100%) rename tests/{ => checks}/internal_vlanpool_check/vmmDomP_pos.json (100%) rename tests/{ => checks}/isis_database_byte_check/isisDTEp_NEG.json (100%) rename tests/{ => checks}/isis_database_byte_check/isisDTEp_POS.json (100%) rename tests/{ => checks}/isis_database_byte_check/test_isis_database_byte_check.py (88%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos1.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos2.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos3.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/isisDomP-default_missing.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/isisDomP-default_neg.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/isisDomP-default_pos.json (100%) rename tests/{ => checks}/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py (90%) rename tests/{ => checks}/l3out_mtu_check/l2pol-default.json (100%) rename tests/{ => checks}/l3out_mtu_check/l3extRsPathL3OutAtt.json (100%) rename tests/{ => checks}/l3out_mtu_check/l3extVirtualLIfP.json (100%) rename tests/{ => checks}/l3out_mtu_check/l3extVirtualLIfP_unresolved.json (100%) rename tests/{ => checks}/l3out_mtu_check/test_l3out_mtu_check.py (92%) rename tests/{ => checks}/l3out_overlapping_loopback_check/diff_l3out_loopback.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/diff_l3out_loopback_and_rtrId.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/diff_l3out_rtrId.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/no_overlap.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/overlap_on_diff_nodes.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_loopback.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_loopback_and_rtrId.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_loopback_with_subnet_mask.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_rtrId.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_rtrId_non_vpc.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/same_l3out_two_loopbacks.json (100%) rename tests/{ => checks}/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py (92%) rename tests/{ => checks}/l3out_route_map_missing_target_check/rtctrlProfile_missing_target.json (100%) rename tests/{ => checks}/l3out_route_map_missing_target_check/rtctrlProfile_multiple_l3out_multiple_missing_target.json (100%) rename tests/{ => checks}/l3out_route_map_missing_target_check/rtctrlProfile_multiple_missing_target.json (100%) rename tests/{ => checks}/l3out_route_map_missing_target_check/rtctrlProfile_no_missing_target.json (100%) rename tests/{ => checks}/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py (86%) rename tests/{ => checks}/lldp_custom_int_description_defect_check/fvRsDomAtt_neg.json (100%) rename tests/{ => checks}/lldp_custom_int_description_defect_check/fvRsDomAtt_pos.json (100%) rename tests/{ => checks}/lldp_custom_int_description_defect_check/infraPortBlk_neg.json (100%) rename tests/{ => checks}/lldp_custom_int_description_defect_check/infraPortBlk_pos.json (100%) rename tests/{ => checks}/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py (91%) rename tests/{ => checks}/llfc_susceptibility_check/ethpmFcot.json (100%) rename tests/{ => checks}/llfc_susceptibility_check/test_llfc_susceptibility_check.py (86%) create mode 100644 tests/checks/mini_aci_6_0_2_check/fabricNode_all_phys_apic.json create mode 100644 tests/checks/mini_aci_6_0_2_check/fabricNode_mini_aci.json create mode 100644 tests/checks/mini_aci_6_0_2_check/test_mini_aci_6_0_2_check.py rename tests/{mini_aci_6_0_2 => checks/mini_aci_6_0_2_check}/topSystem_controller_neg.json (100%) rename tests/{mini_aci_6_0_2 => checks/mini_aci_6_0_2_check}/topSystem_controller_pos.json (100%) rename tests/{ => checks}/n9408_model_check/eqptCh_NEG.json (100%) rename tests/{ => checks}/n9408_model_check/eqptCh_POS.json (100%) rename tests/{ => checks}/n9408_model_check/test_n9408_model_check.py (82%) create mode 100644 tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json create mode 100644 tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json create mode 100644 tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json create mode 100644 tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_no_FX3P3H.json create mode 100644 tests/checks/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py create mode 100644 tests/checks/ntp_status_check/NEG_datetimeClkPol.json create mode 100644 tests/checks/ntp_status_check/NEG_datetimeNtpq.json create mode 100644 tests/checks/ntp_status_check/POS_datetimeClkPol.json create mode 100644 tests/checks/ntp_status_check/POS_datetimeNtpq.json create mode 100644 tests/checks/ntp_status_check/fabricNode.json create mode 100644 tests/checks/ntp_status_check/test_ntp_status_check.py create mode 100644 tests/checks/observer_db_size_check/fabricNode.json create mode 100644 tests/checks/observer_db_size_check/fabricNode_no_apic.json create mode 100644 tests/checks/observer_db_size_check/fabricNode_old.json create mode 100644 tests/checks/observer_db_size_check/infraWiNode_apic1.json create mode 100644 tests/checks/observer_db_size_check/test_observer_db_size_check.py rename tests/{ => checks}/oob_mgmt_security_check/mgmtInstP.json (100%) rename tests/{ => checks}/oob_mgmt_security_check/mgmtInstP_no_contracts.json (100%) rename tests/{ => checks}/oob_mgmt_security_check/mgmtInstP_no_subnets.json (100%) rename tests/{ => checks}/oob_mgmt_security_check/mgmtOoB.json (100%) rename tests/{ => checks}/oob_mgmt_security_check/mgmtOoB_no_contracts.json (100%) rename tests/{ => checks}/oob_mgmt_security_check/test_oob_mgmt_security_check.py (92%) rename tests/{ => checks}/out_of_service_ports_check/ethpmPhysIf-neg.json (100%) rename tests/{ => checks}/out_of_service_ports_check/ethpmPhysIf-pos.json (100%) rename tests/{ => checks}/out_of_service_ports_check/test_out_of_service_ports_check.py (75%) rename tests/{ => checks}/overlapping_vlan_pools_check/access_policy.json (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/fvAEPg.json (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/fvIfConn.json (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/infraSetPol_no.json (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/infraSetPol_yes.json (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/templates/access_policy.j2 (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/templates/fvAEPg.j2 (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/templates/fvIfConn.j2 (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/templates/macros.j2 (100%) rename tests/{ => checks}/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py (99%) rename tests/{ => checks}/pbr_high_scale_check/test_pbr_high_scale_check.py (90%) rename tests/{ => checks}/pbr_high_scale_check/vnsAdjacencyDefCont_HIGH.json (100%) rename tests/{ => checks}/pbr_high_scale_check/vnsAdjacencyDefCont_LOW.json (100%) rename tests/{ => checks}/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_HIGH.json (100%) rename tests/{ => checks}/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_LOW.json (100%) rename tests/{ => checks}/post_upgrade_cb_check/moCount_0.json (100%) rename tests/{ => checks}/post_upgrade_cb_check/moCount_10.json (100%) rename tests/{ => checks}/post_upgrade_cb_check/moCount_8.json (100%) rename tests/{ => checks}/post_upgrade_cb_check/test_post_upgrade_cb_check.py (92%) rename tests/{ => checks}/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_new.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_old.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/fvCtx.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/l3extRsEctx.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/l3extSubnet_no_overlap.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/l3extSubnet_overlap.json (100%) rename tests/{ => checks}/prefix_already_in_use_check/test_prefix_already_in_use_check.py (91%) create mode 100644 tests/checks/r_leaf_compatibility_check/fabricNode_no_RL.json create mode 100644 tests/checks/r_leaf_compatibility_check/fabricNode_with_RL.json create mode 100644 tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_disabled.json create mode 100644 tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_enabled.json create mode 100644 tests/checks/r_leaf_compatibility_check/infraSetPol_no_DTF.json create mode 100644 tests/checks/r_leaf_compatibility_check/test_r_leaf_compatibility_check.py rename tests/{ => checks}/rtmap_comm_match_defect_check/rtctrlCtxP_NEG.json (100%) rename tests/{ => checks}/rtmap_comm_match_defect_check/rtctrlCtxP_POS.json (100%) rename tests/{ => checks}/rtmap_comm_match_defect_check/rtctrlSubjP_NEG.json (100%) rename tests/{ => checks}/rtmap_comm_match_defect_check/rtctrlSubjP_POS.json (100%) rename tests/{ => checks}/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py (88%) rename tests/{ => checks}/service_bd_forceful_routing_check/fvRtEPpInfoToBD.json (100%) rename tests/{ => checks}/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py (81%) create mode 100644 tests/checks/stale_decomissioned_spine_check/fabricNode.json rename tests/{ => checks}/stale_decomissioned_spine_check/fabricRsDecommissionNode_POS.json (100%) create mode 100644 tests/checks/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py rename tests/{ => checks}/standby_sup_sync_check/eqptSupC_NEG.json (100%) rename tests/{ => checks}/standby_sup_sync_check/eqptSupC_POS.json (100%) create mode 100644 tests/checks/standby_sup_sync_check/test_standby_sup_sync_check.py rename tests/{ => checks}/static_route_overlap_check/fvRsCtx.json (100%) rename tests/{ => checks}/static_route_overlap_check/fvSubnet.json (100%) rename tests/{ => checks}/static_route_overlap_check/ipRouteP_empty.json (100%) rename tests/{ => checks}/static_route_overlap_check/ipRouteP_neg.json (100%) rename tests/{ => checks}/static_route_overlap_check/ipRouteP_pos.json (100%) rename tests/{ => checks}/static_route_overlap_check/l3extRsEctx.json (100%) create mode 100644 tests/checks/static_route_overlap_check/test_static_route_overlap_check.py rename tests/{ => checks}/subnet_scope_check/fvAEPg_empty.json (100%) rename tests/{ => checks}/subnet_scope_check/fvAEPg_neg.json (100%) rename tests/{ => checks}/subnet_scope_check/fvAEPg_pos.json (100%) rename tests/{ => checks}/subnet_scope_check/fvBD.json (100%) rename tests/{ => checks}/subnet_scope_check/fvRsBd.json (100%) create mode 100644 tests/checks/subnet_scope_check/test_subnet_scope_check.py rename tests/{ => checks}/sup_a_high_memory_check/eqptSupC_SUP_A.json (100%) rename tests/{ => checks}/sup_a_high_memory_check/eqptSupC_SUP_A_Aplus.json (100%) rename tests/{ => checks}/sup_a_high_memory_check/eqptSupC_SUP_Aplus.json (100%) rename tests/{ => checks}/sup_a_high_memory_check/eqptSupC_no_SUP_A_Aplus.json (100%) rename tests/{ => checks}/sup_a_high_memory_check/test_sup_a_high_memory_check.py (87%) rename tests/{ => checks}/sup_hwrev_check/eqptSpCmnBlk_NEG.json (100%) rename tests/{ => checks}/sup_hwrev_check/eqptSpCmnBlk_POS.json (100%) rename tests/{ => checks}/sup_hwrev_check/test_sup_hwrev_check.py (77%) rename tests/{ => checks}/switch_bootflash_usage_check/eqptcapacityFSPartition.json (100%) rename tests/{ => checks}/switch_bootflash_usage_check/maintUpgJob_not_downloaded.json (100%) rename tests/{ => checks}/switch_bootflash_usage_check/maintUpgJob_old_ver_no_prop.json (100%) rename tests/{ => checks}/switch_bootflash_usage_check/maintUpgJob_pre_downloaded.json (100%) create mode 100644 tests/checks/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py create mode 100644 tests/checks/switch_group_guideline_check/bgpRRNodePEp_1001_1002_2001_2002.json create mode 100644 tests/checks/switch_group_guideline_check/fabricExplicitGEp.json create mode 100644 tests/checks/switch_group_guideline_check/fabricNode.json create mode 100644 tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1001_1002_2001_2002.json create mode 100644 tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1003_1004_2001_2002.json create mode 100644 tests/checks/switch_group_guideline_check/lldpCtrlrAdjEp.json create mode 100644 tests/checks/switch_group_guideline_check/maintMaintGrp_ALL.json create mode 100644 tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_GRP1_GRP2.json create mode 100644 tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json create mode 100644 tests/checks/switch_group_guideline_check/maintMaintGrp_EVEN_ODD.json create mode 100644 tests/checks/switch_group_guideline_check/maintMaintGrp_SPINE_LEAF.json create mode 100644 tests/checks/switch_group_guideline_check/test_switch_group_guideline_check.py create mode 100644 tests/checks/switch_ssd_check/faultInst.json create mode 100644 tests/checks/switch_ssd_check/test_switch_ssd_check.py create mode 100644 tests/checks/switch_status_check/fabricNode_NEG.json create mode 100644 tests/checks/switch_status_check/fabricNode_POS.json create mode 100644 tests/checks/switch_status_check/fabricRsDecommissionNode.json create mode 100644 tests/checks/switch_status_check/test_switch_status_check.py rename tests/{ => checks}/telemetryStatsServerP_object_check/telemetryStatsServerP_neg.json (100%) rename tests/{ => checks}/telemetryStatsServerP_object_check/telemetryStatsServerP_pos.json (100%) rename tests/{ => checks}/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py (86%) rename tests/{ => checks}/tep-to-tep_atomic_counter_check/dbgAcPath_max.json (100%) rename tests/{ => checks}/tep-to-tep_atomic_counter_check/dbgAcPath_na.json (100%) rename tests/{ => checks}/tep-to-tep_atomic_counter_check/dbgAcPath_pass.json (100%) rename tests/{ => checks}/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py (71%) rename tests/{ => checks}/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py (82%) rename tests/{ => checks}/unsupported_fec_configuration_ex_check/topSystem_neg.json (100%) rename tests/{ => checks}/unsupported_fec_configuration_ex_check/topSystem_pos.json (100%) rename tests/{ => checks}/uplink_limit_check/eqptPortP_NEG.json (100%) rename tests/{ => checks}/uplink_limit_check/eqptPortP_POS.json (100%) rename tests/{ => checks}/uplink_limit_check/eqptPortP_empty.json (100%) rename tests/{ => checks}/uplink_limit_check/test_uplink_limit_check.py (82%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_empty.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_neg.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_pos.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_pos2.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_pos3.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/firmwareFirmware_pos4.json (100%) rename tests/{ => checks}/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py (59%) rename tests/{ => checks}/vmm_active_uplinks_check/fvUplinkOrderCont_neg.json (100%) rename tests/{ => checks}/vmm_active_uplinks_check/fvUplinkOrderCont_not_exist.json (100%) rename tests/{ => checks}/vmm_active_uplinks_check/fvUplinkOrderCont_pos.json (100%) rename tests/{ => checks}/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py (78%) create mode 100644 tests/checks/vpc_paired_switches_check/fabricNode.json create mode 100644 tests/checks/vpc_paired_switches_check/test_vpc_paired_switches_check.py rename tests/{ => checks}/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py (92%) rename tests/{ => checks}/vzany_vzany_service_epg_check/vzRsSubjGraphAtt.json (100%) rename tests/{ => checks}/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_cons_diff_VRFs.json (100%) rename tests/{ => checks}/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_only.json (100%) rename tests/{ => checks}/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny.json (100%) rename tests/{ => checks}/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny_2_VRFs.json (100%) delete mode 100644 tests/fabric_link_redundancy_check/fabricNode.json delete mode 100644 tests/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py delete mode 100644 tests/fabricdomain_name_check/test_fabricdomain_name_check.py delete mode 100644 tests/fabricdomain_name_check/topSystem_1POS.json delete mode 100644 tests/fabricdomain_name_check/topSystem_2POS.json delete mode 100644 tests/fabricdomain_name_check/topSystem_NEG.json delete mode 100644 tests/fc_ex_model_check/fabricNode_NEG.json delete mode 100644 tests/fc_ex_model_check/fabricNode_POS.json delete mode 100644 tests/fc_ex_model_check/fcEntity_NEG.json delete mode 100644 tests/fc_ex_model_check/fcEntity_POS.json delete mode 100644 tests/fc_ex_model_check/test_fc_ex_model_check.py delete mode 100644 tests/mini_aci_6_0_2/test_mini_aci_6_0_2_check.py delete mode 100644 tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json delete mode 100644 tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json delete mode 100644 tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json delete mode 100644 tests/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py delete mode 100644 tests/observer_db_size_check/test_observer_db_size_check.py delete mode 100644 tests/observer_db_size_check/topSystem.json delete mode 100644 tests/observer_db_size_check/topSystem_empty.json delete mode 100644 tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_NEG.json delete mode 100644 tests/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py delete mode 100644 tests/stale_decomissioned_spine_check/topSystem.json delete mode 100644 tests/standby_sup_sync_check/test_standby_sup_sync_check.py delete mode 100644 tests/static_route_overlap_check/test_static_route_overlap_check.py delete mode 100644 tests/subnet_scope_check/test_subnet_scope_check.py delete mode 100644 tests/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py create mode 100644 tests/test_CheckManager.py create mode 100644 tests/test_ResultManager.py create mode 100644 tests/test_ThreadManager.py create mode 100644 tests/test_common_data.py delete mode 100644 tests/test_prepare.py delete mode 100644 tests/test_run_checks.py delete mode 100644 tests/vpc_paired_switches_check/test_vpc_paired_switches_check.py delete mode 100644 tests/vpc_paired_switches_check/topSystem.json diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index bd208ce..9dff95f 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # # Copyright 2021 Cisco Systems, Inc. and its affiliates @@ -20,10 +21,11 @@ from six.moves import input from textwrap import TextWrapper from getpass import getpass -from collections import defaultdict +from collections import defaultdict, OrderedDict from datetime import datetime from argparse import ArgumentParser from itertools import chain +import threading import functools import shutil import warnings @@ -36,7 +38,8 @@ import os import re -SCRIPT_VERSION = "v3.2.0" +SCRIPT_VERSION = "v4.0.0" +DEFAULT_TIMEOUT = 600 # sec # result constants DONE = 'DONE' PASS = 'PASS' @@ -64,16 +67,24 @@ ts = datetime.now().strftime('%Y-%m-%dT%H-%M-%S') BUNDLE_NAME = 'preupgrade_validator_%s%s.tgz' % (ts, tz) DIR = 'preupgrade_validator_logs/' -JSON_DIR = DIR + 'json_results/' -META_FILE = DIR + 'meta.json' -RESULT_FILE = DIR + 'preupgrade_validator_%s%s.txt' % (ts, tz) -SUMMARY_FILE = DIR + 'summary.json' -LOG_FILE = DIR + 'preupgrade_validator_debug.log' +JSON_DIR = os.path.join(DIR, 'json_results/') +META_FILE = os.path.join(DIR, 'meta.json') +RESULT_FILE = os.path.join(DIR, 'preupgrade_validator_%s%s.txt' % (ts, tz)) +SUMMARY_FILE = os.path.join(DIR, 'summary.json') +LOG_FILE = os.path.join(DIR, 'preupgrade_validator_debug.log') warnings.simplefilter(action='ignore', category=FutureWarning) log = logging.getLogger() +# TimeoutError is only from py3.3 +try: + TimeoutError +except NameError: + class TimeoutError(Exception): + pass + + class OldVerClassNotFound(Exception): """ Later versions of ACI can have class properties not found in older versions """ pass @@ -916,6 +927,286 @@ def is_firstver_gt_secondver(first_ver, second_ver): return result +class CustomThread(threading.Thread): + def __init__(self, *args, **kwargs): + super(CustomThread, self).__init__(*args, **kwargs) + self.exception = None + + def start(self, timeout=5.0): + """Thread.start() with timeout to wait for the thread start event. + + This method overrides `_started.wait()` at the end with a timeout to + prevent the issue explained below. + When MemoryError occurs during _start_new_thread(_bootstrap(), ()), + this method (start()) could get stuck forever at _started.wait(), which + is a threading.Event(), because _bootstrap() is supposed to trigger + _started.set() which may never happen because it appears some exceptions + are not raised to be captured by try/except in this method. + This was observed when the script was used inside a container with a + restricted memory allocation and resulted in the script to get stuck + and not being able to start remaining threads. + + Args: + timeout (float): How long we wait for the thread start event. + 5.0 sec by default. + """ + _active_limbo_lock = threading._active_limbo_lock + _limbo = threading._limbo + _start_new_thread = threading._start_new_thread + + # Python2 uses name mangling + if hasattr(self, "_Thread__initialized"): + self._initialized = self._Thread__initialized + if hasattr(self, "_Thread__started"): + self._started = self._Thread__started + if hasattr(self, "_Thread__bootstrap"): + self._bootstrap = self._Thread__bootstrap + + if not self._initialized: + raise RuntimeError("thread.__init__() not called") + + if self._started.is_set(): + raise RuntimeError("threads can only be started once") + + with _active_limbo_lock: + _limbo[self] = self + try: + _start_new_thread(self._bootstrap, ()) + except Exception: + with _active_limbo_lock: + del _limbo[self] + raise + self._started.wait(timeout) + # When self._started was not set within the time limit, handle it + # in the same way as when `_start_new_thread()` correctly captures + # the exception due to OOM. + if not self._started.is_set(): + with _active_limbo_lock: + del _limbo[self] + raise RuntimeError("can't start new thread") + + def run(self): + # Python2 uses name mangling + if hasattr(self, "_Thread__target"): + self._target = self._Thread__target + if hasattr(self, "_Thread__args"): + self._args = self._Thread__args + if hasattr(self, "_Thread__kwargs"): + self._kwargs = self._Thread__kwargs + + try: + if self._target is not None: + self._target(*self._args, **self._kwargs) + except Exception as e: + # Exceptions inside a thread should be captured in the thread, that + # is in the `check_wrapper`. + # If it's not caught inside the thread, notify the main thread. + self.exception = e + finally: + del self._target, self._args, self._kwargs + + +class ThreadManager: + """A class managing all threads to run individual checks. + + This class starts and monitors the status of all threads for check + functions decorated with check_wrapper(). This stops monitoring when all + threads completed or when timeout expired. + On a memory constrained setup, it may take time to start each thread. Some + thread/check may complete before other threads get started. To monitor the + progress correctly from the beginning, the monitoring is also done in a + thread while the main thread is starting all threads for each check. + """ + def __init__( + self, + funcs, + common_kwargs, + monitor_interval=0.5, # sec + monitor_timeout=600, # sec + callback_on_monitoring=None, + callback_on_start_failure=None, + callback_on_timeout=None, + ): + self.funcs = funcs + self.threads = None + self.common_kwargs = common_kwargs + + # Not using `thread.join(timeout)` because it waits for each thread sequentially, + # which means the program may wait for "timeout * num of threads" at worst case. + self.timeout_event = threading.Event() + self.monitor_interval = monitor_interval + self.monitor_timeout = monitor_timeout + self._monitor = self._generate_thread(target=self._monitor_progress) + + # Number of threads that were processed by `_start_thread()`, including + # both success and failure to start. + self._processed_threads_count = 0 + + # Custom callbacks + self._cb_on_monitoring = callback_on_monitoring + self._cb_on_start_failure = callback_on_start_failure + self._cb_on_start_failure_exception = None + self._cb_on_timeout = callback_on_timeout + + def start(self): + if self._monitor.is_alive(): + raise RuntimeError("Threading on going. Cannot start again.") + + self.threads = [ + self._generate_thread(target=func, kwargs=self.common_kwargs) + for func in self.funcs + ] + + self._monitor.start() + + for thread in self.threads: + self._start_thread(thread) + + def join(self): + self._monitor.join() + # If the thread had an exception that was not captured and handled correctly, + # re-raise it in the main thread to notify the script executor about it. + if self._monitor.exception: + raise self._monitor.exception + for thread in self.threads: + if thread.exception: + raise thread.exception + # Exception in the callback means failure to update the result as error. Need to + # re-raise it in the main thread to notify the script excutor about the risk of + # some check results left with in-progress forever. + if self._cb_on_start_failure_exception: + raise self._cb_on_start_failure_exception + + def is_timeout(self): + return self.timeout_event.is_set() + + def _generate_thread(self, target, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + thread = CustomThread( + target=target, name=target.__name__, args=args, kwargs=kwargs + ) + thread.daemon = True + return thread + + def _start_thread(self, thread): + """ Start a thread. When failed due to OOM, retry again after an interval. + Until one of the following conditions are met, we don't move on. + - successfuly started the thread + - exceeded the queue timeout and gave up on this thread + - failed to start the thread for an unknown reason + """ + queue_timeout = 10 # sec + queue_interval = 1 # sec + time_elapsed = 0 # sec + thread_started = False + while not self.is_timeout(): + try: + log.info("({}) Starting thread.".format(thread.name)) + thread.start() + thread_started = True + break + except RuntimeError as e: + if str(e) != "can't start new thread": + log.error("({}) Unexpected error to start a thread.".format(thread.name), exc_info=True) + break + + log_msg = "({}) Not enough memory to start a new thread. ".format(thread.name) + if time_elapsed >= queue_timeout: + log.error(log_msg + "No queue time left. Give up.") + break + else: + log.info(log_msg + "Try again in {} second...".format(queue_interval)) + time.sleep(queue_interval) + time_elapsed += queue_interval + continue + except Exception: + log.error("({}) Unexpected error to start a thread.".format(thread.name), exc_info=True) + break + + # Custom cleanup callback for a thread that couldn't start. + if not thread_started and not thread.is_alive(): + log.error("({}) Failed to start thread.".format(thread.name)) + if self._cb_on_start_failure is not None: + try: + self._cb_on_start_failure(thread.name) + except Exception as e: + log.error("({}) Failed to update the result as error.".format(thread.name), exc_info=True) + self._cb_on_start_failure_exception = e + + self._processed_threads_count += 1 + + def _monitor_progress(self): + """Executed in a separate monitor thread""" + total = len(self.threads) + time_elapsed = 0 # sec + while True: + alive_count = sum(thread.is_alive() for thread in self.threads) + done = self._processed_threads_count - alive_count + + # Custom monitor callback + if self._cb_on_monitoring is not None: + self._cb_on_monitoring(done, total) + + if done == total: + break + + time.sleep(self.monitor_interval) + time_elapsed += self.monitor_interval + if time_elapsed > self.monitor_timeout: + log.error("Timeout. Stop monitoring threads.") + self.timeout_event.set() + break + + # Custom timeout callback per thread + if self.is_timeout() and self._cb_on_timeout is not None: + for thread in self.threads: + if thread.is_alive(): + self._cb_on_timeout(thread.name) + + +class ResultManager: + def __init__(self): + self.titles = {} # {check_id: check_title} + self.results = {} # {check_id: Result} + + def _update_aci_result(self, check_id, check_title, result_obj=None): + aci_result = AciResult(check_id, check_title, result_obj) + filepath = self.get_result_filepath(check_id) + write_jsonfile(filepath, aci_result.as_dict()) + return filepath + + def init_result(self, check_id, check_title): + self.titles[check_id] = check_title + filepath = self._update_aci_result(check_id, check_title) + log.info("Initialized in {}".format(filepath)) + + def update_result(self, check_id, result_obj): + check_title = self.titles.get(check_id) + if not check_title: + log.error("Check {} is not initialized. Failed to update.".format(check_id)) + return None + self.results[check_id] = result_obj + filepath = self._update_aci_result(check_id, check_title, result_obj) + log.info("Finalized result in {}".format(filepath)) + + def get_summary(self): + summary_headers = [PASS, FAIL_O, FAIL_UF, MANUAL, POST, NA, ERROR, 'TOTAL'] + summary = OrderedDict([(key, 0) for key in summary_headers]) + summary["TOTAL"] = len(self.results) + + for result in self.results.values(): + summary[result.result] += 1 + + return summary + + def get_result_filepath(self, check_id): + """filename should be only alphabet, number and underscore""" + filename = re.sub(r'[^a-zA-Z0-9_]+|\s+', '_', check_id) + '.json' + filepath = os.path.join(JSON_DIR, filename) + return filepath + + class AciResult: """ APIC uses an object called `syntheticMaintPValidate` to store the results of @@ -935,10 +1226,10 @@ class AciResult: PASS = "passed" FAIL = "failed" - def __init__(self, func_name, name, description): + def __init__(self, func_name, name, result_obj=None): self.ruleId = func_name self.name = name - self.description = description + self.description = "" self.reason = "" self.sub_reason = "" self.recommended_action = "" @@ -948,16 +1239,20 @@ def __init__(self, func_name, name, description): self.showValidation = True self.failureDetails = { "failType": "", + "header": [], "data": [], + "unformatted_header": [], "unformatted_data": [], } - - @property - def filename(self): - return re.sub(r'[^a-zA-Z0-9_]+|\s+', '_', self.ruleId) + '.json' + if result_obj: + self.update_with_results(result_obj) @staticmethod - def craftData(column, rows): + def convert_data(column, rows): + """Convert data from `Result` data format to `AciResult` data format. + Result - {header: [h1, h2,,,], data: [[d11, d21,,,], [d21, d22,,,],,,]} + AciResult - [{h1: d11, h2: d21,,,}, {h1: d21, h2: d22,,,},,,] + """ if not (isinstance(rows, list) and isinstance(column, list)): raise TypeError("Rows and column must be lists.") data = [] @@ -966,62 +1261,58 @@ def craftData(column, rows): r_len = len(rows[row_entry]) if r_len != c_len: raise ValueError("Row length ({}), data: {} does not match column length ({}).".format(r_len, rows[row_entry], c_len)) - entry = {} + entry = OrderedDict() for col_pos in range(c_len): entry[column[col_pos]] = str(rows[row_entry][col_pos]) data.append(entry) return data - def updateWithResults(self, result, recommended_action, msg, doc_url, headers, data, unformatted_headers, unformatted_data): - self.reason = msg - self.recommended_action = recommended_action - self.docUrl = doc_url + def update_with_results(self, result_obj): + self.recommended_action = result_obj.recommended_action + self.docUrl = result_obj.doc_url - # Show validation - if result in [NA, POST]: - self.showValidation = False + result = result_obj.result + + # Show validatio + self.showValidation = result not in (NA, POST) # Severity - if result in [FAIL_O, FAIL_UF]: - self.severity = "critical" - elif result in [ERROR]: - self.severity = "major" - elif result in [MANUAL]: - self.severity = "warning" - - self.ruleStatus = AciResult.PASS - if result not in [NA, PASS]: - self.ruleStatus = AciResult.FAIL - if not self.reason: - self.reason = "See Failure Details" + severity_map = {FAIL_O: "critical", FAIL_UF: "critical", ERROR: "major", MANUAL: "warning"} + self.severity = severity_map.get(result, "informational") + + # ruleStatus + self.ruleStatus = AciResult.PASS if result in (NA, PASS) else AciResult.FAIL + + # reason + self.reason = result_obj.msg + if not self.reason and self.ruleStatus == AciResult.FAIL: + self.reason = "See Failure Details." + + # failureDetails + if self.ruleStatus == AciResult.FAIL: self.failureDetails["failType"] = result - self.failureDetails["header"] = headers - self.failureDetails["data"] = self.craftData(headers, data) - if unformatted_headers and unformatted_data: - self.failureDetails["unformatted_data"] = self.craftData(unformatted_headers, unformatted_data) - if self.reason: - self.reason += "\n" - self.reason += ( - "Parse failure occurred, the provided data may not be complete. " - "Please contact Cisco TAC to identify the missing data." + self.failureDetails["header"] = result_obj.headers + self.failureDetails["data"] = self.convert_data(result_obj.headers, result_obj.data) + if result_obj.unformatted_headers and result_obj.unformatted_data: + self.failureDetails["unformatted_header"] = result_obj.unformatted_headers + self.failureDetails["unformatted_data"] = self.convert_data( + result_obj.unformatted_headers, result_obj.unformatted_data + ) + self.recommended_action += ( + "\n " + "Note that the provided data in the Failure Details is not complete" + " due to an issue in parsing data. Contact Cisco TAC for the full details." ) - def buildResult(self): + def as_dict(self): return {slot: getattr(self, slot) for slot in self.__slots__} - def writeResult(self, path=JSON_DIR): - if not os.path.isdir(path): - os.mkdir(path) - with open(os.path.join(path, self.filename), "w") as f: - json.dump(self.buildResult(), f, indent=2) - return "{}/{}".format(path, self.filename) - class Result: """Class to hold the result of a check.""" - __slots__ = ("result", "msg", "headers", "data", "unformatted_headers", "unformatted_data", "recommended_action", "doc_url", "adjust_title") + __slots__ = ("result", "msg", "headers", "data", "unformatted_headers", "unformatted_data", "recommended_action", "doc_url") - def __init__(self, result=PASS, msg="", headers=None, data=None, unformatted_headers=None, unformatted_data=None, recommended_action="", doc_url="", adjust_title=False): + def __init__(self, result=PASS, msg="", headers=None, data=None, unformatted_headers=None, unformatted_data=None, recommended_action="", doc_url=""): self.result = result self.msg = msg self.headers = headers if headers is not None else [] @@ -1030,52 +1321,54 @@ def __init__(self, result=PASS, msg="", headers=None, data=None, unformatted_hea self.unformatted_data = unformatted_data if unformatted_data is not None else [] self.recommended_action = recommended_action self.doc_url = doc_url - self.adjust_title = adjust_title def as_dict(self): return {slot: getattr(self, slot) for slot in self.__slots__} - def as_dict_for_json_result(self): - return {slot: getattr(self, slot) for slot in self.__slots__ if slot != "adjust_title"} - def check_wrapper(check_title): - """ - Decorator to wrap a check function to handle the printing of title and results, - and to write the results in a file in a JSON format. + """Decorator to wrap a check function with initializer and finalizer from `CheckManager`. + + The goal is for each check function to focus only on the check logic itself and return + `Result` object. The rest such as initializing the result, printing the result to stdout, + writing the result in a file in JSON etc. are handled through this wrapper and CheckManager. """ def decorator(check_func): @functools.wraps(check_func) - def wrapper(index, total_checks, *args, **kwargs): - # When init is True, we just initialize the result file and return - if kwargs.get("init") is True: - synth = AciResult(wrapper.__name__, check_title, "") - synth.writeResult() + def wrapper(*args, **kwargs): + # Initialization + initialize_check = kwargs.pop("initialize_check", None) + if initialize_check: + # Using `wrapper.__name__` instead of `check_func.__name` because + # both show the original check func name and `wrapper.__name__` can + # be dynamically changed inside each check func if needed. (mainly + # for test or debugging) + initialize_check(wrapper.__name__, check_title) return None + log.info("Start {}".format(wrapper.__name__)) + # Real Run (executed inside a thread) + # When `finalize_check` failed even in `except`, there is nothing we can + # do because it is usually because of system level issues like filesystem + # being full. In such a case, we cannot even update the result of the check + # as `failed` from `in-progress`. To inform the script executor and prevent it + # from indefinitely waiting, we let the exception to go up to the top (`main()`) + # and abort the script immediately. + finalize_check = kwargs.pop("finalize_check") try: - # Print `[Check 1/81] <title>...` - print_title(check_title, index, total_checks) - - # Run check, expecting it to return a `Result` object r = check_func(*args, **kwargs) - - # Print `[Check 1/81] <title>... <msg> <result>\n<failure details>` - print_result(title=check_title, **r.as_dict()) + finalize_check(wrapper.__name__, r) + except MemoryError: + msg = "Not enough memory to complete this check." + r = Result(result=ERROR, msg=msg) + log.error(msg, exc_info=True) + finalize_check(wrapper.__name__, r) except Exception as e: - log.exception(e) - r = Result(result=ERROR, msg='Unexpected Error: {}'.format(e)) - print_result(title=check_title, **r.as_dict()) - finally: - # Write results in JSON - # Using `wrapper.__name__` instead of `check_func.__name` because - # both show the original check func name and `wrapper.__name__` can - # be dynamically changed inside each check func if needed. (mainly - # for test or debugging) - synth = AciResult(wrapper.__name__, check_title, "") - synth.updateWithResults(**r.as_dict_for_json_result()) - synth.writeResult() - return r.result + msg = "Unexpected Error: {}".format(e) + r = Result(result=ERROR, msg=msg) + log.error(msg, exc_info=True) + finalize_check(wrapper.__name__, r) + return r return wrapper return decorator @@ -1201,38 +1494,40 @@ def get_row(widths, values, spad=" ", lpad=""): def prints(objects, sep=' ', end='\n'): with open(RESULT_FILE, 'a') as f: print(objects, sep=sep, end=end, file=sys.stdout) + if end == "\r": + end = "\n" # easier to read with \n in a log file print(objects, sep=sep, end=end, file=f) sys.stdout.flush() f.flush() -def print_title(title, index=None, total=None): - if index and total: - prints('[Check{:3}/{}] {}... '.format(index, total, title), end='') +def print_progress(done, total, bar_length=100): + if not total: + progress = 1.0 else: - prints('{:14}{}... '.format('', title), end='') + progress = done / float(total) + filled = int(bar_length * progress) + bar = "â–ˆ" * filled + "-" * (bar_length - filled) + prints("Progress: |{}| {}/{} checks completed".format(bar, done, total), end="\r") -def print_result(title, result, msg='', +def print_result(index, total, title, + result, msg='', headers=None, data=None, unformatted_headers=None, unformatted_data=None, recommended_action='', - doc_url='', - adjust_title=False): - FULL_LEN = 138 # length of `[Check XX/YY] <title>... <msg> --padding-- <RESULT>` - CHECK_LEN = 18 # length of `[Check XX/YY] ... ` - padding = FULL_LEN - CHECK_LEN - len(title) - len(msg) - if adjust_title: - # adjust padding when the result is on the second line. - # 1st: `[Check XX/YY] <title>... ` - # 2nd: ` <msg> --padding-- <RESULT>` - padding += len(title) + CHECK_LEN + doc_url=''): + """Print `[Check XX/YY] <title>... <msg> --padding-- <result>` + some data""" + idx_len = len(str(total)) + 1 + output = "[Check{:{}}/{}] {}... {}".format(index, idx_len, total, title, msg) + FULL_LEN = 138 # length of `[Check XX/YY] <title>... <msg> --padding-- <result>` + padding = FULL_LEN - len(output) if padding < len(result): # when `msg` is too long (ex. unknown exception), `padding` may get shorter # than what it's padding (`result`), or worse, may get negative. # In such a case, keep one whitespace padding even if the full length gets longer. padding = len(result) + 1 - output = '{}{:>{}}'.format(msg, result, padding) + output += "{:>{}}".format(result, padding) if data: data.sort() output += '\n' + format_table(headers, data) @@ -1249,6 +1544,27 @@ def print_result(title, result, msg='', prints(output) +def write_jsonfile(filepath, content): + with open(filepath, 'w') as f: + json.dump(content, f, indent=2) + + +def write_script_metadata(api_only, timeout, total_checks, common_data): + metadata = { + "name": "PreupgradeCheck", + "method": "standalone script", + "datetime": ts + tz, + "script_version": str(SCRIPT_VERSION), + "cversion": str(common_data["cversion"]), + "tversion": str(common_data["tversion"]), + "sw_cversion": str(common_data["sw_cversion"]), + "api_only": api_only, + "timeout": timeout, + "total_checks": total_checks, + } + write_jsonfile(META_FILE, metadata) + + def _icurl_error_handler(imdata): if imdata and "error" in imdata[0]: if "not found in class" in imdata[0]['error']['attributes']['text']: @@ -1257,6 +1573,8 @@ def _icurl_error_handler(imdata): raise OldVerClassNotFound('Your current ACI version does not have requested class') elif "not found" in imdata[0]['error']['attributes']['text']: raise OldVerClassNotFound('Your current ACI version does not have requested class') + elif "Unable to deliver the message, Resolve timeout" in imdata[0]['error']['attributes']['text']: + raise TimeoutError("API Timeout. APIC may be too busy. Try again later.") else: raise Exception('API call failed! Check debug log') @@ -1283,8 +1601,11 @@ def icurl(apitype, query, page_size=100000): page = 0 while total_cnt > len(total_imdata): data = _icurl(apitype, query, page, page_size) - if not data['imdata']: - break + # API queries may return empty even when totalCount is > 0 and the given page number + # should contain entries. This may happen when there are too many queries + # such as multiple same queries at the same time. + if int(data['totalCount']) > 0 and not data['imdata']: + raise Exception("API response empty with totalCount:{}. APIC may be too busy. Try again later.".format(data["totalCount"])) total_imdata += data['imdata'] total_cnt = int(data['totalCount']) page += 1 @@ -1295,7 +1616,7 @@ def run_cmd(cmd, splitlines=True): """ Run a shell command. :param cmd: Command to run, can be a string or a list. - :param splitlines: If True, splits the output into a list of lines. + :param splitlines: If True, splits the output into a list of lines. If False, returns the raw text output as a single string. Returns the output of the command. """ @@ -1325,31 +1646,10 @@ def get_credentials(): return usr, pwd -def get_current_version(arg_cversion): - """ Returns: AciVersion instance """ - if arg_cversion: - prints("Current APIC version is overridden to %s" % arg_cversion) - try: - current_version = AciVersion(arg_cversion) - except ValueError as e: - prints(e) - sys.exit(1) - return current_version - prints("Checking current APIC version...", end='') - firmwares = icurl('class', 'firmwareCtrlrRunning.json') - for firmware in firmwares: - if 'node-1' in firmware['firmwareCtrlrRunning']['attributes']['dn']: - apic1_version = firmware['firmwareCtrlrRunning']['attributes']['version'] - break - current_version = AciVersion(apic1_version) - prints('%s\n' % current_version) - return current_version - - def get_target_version(arg_tversion): """ Returns: AciVersion instance """ if arg_tversion: - prints("Target APIC version is overridden to %s" % arg_tversion) + prints("Target APIC version is overridden to %s\n" % arg_tversion) try: target_version = AciVersion(arg_tversion) except ValueError as e: @@ -1388,6 +1688,78 @@ def get_target_version(arg_tversion): return None +def get_fabric_nodes(): + """Returns list of fabricNode objects. + + Using fabricNode instead of topSystem because topSystem times out when the + node is inactive. + """ + prints("Gathering Node Information...\n") + fabricNodes = icurl('class', 'fabricNode.json') + return fabricNodes + + +def get_current_versions(fabric_nodes, arg_cversion): + """ Returns: AciVersion instances of APIC and lowest switch """ + if arg_cversion: + prints("Current version is overridden to %s\n" % arg_cversion) + try: + current_version = AciVersion(arg_cversion) + except ValueError as e: + prints(e) + sys.exit(1) + return current_version, current_version + + apic1_dn = "" + apic_version = "" # There can be only one APIC version + switch_versions = set() + for node in fabric_nodes: + version = node["fabricNode"]["attributes"]["version"] + if not version: # inactive nodes show empty version + continue + if node["fabricNode"]["attributes"]["role"] == "controller": + apic_version = version + if node["fabricNode"]["attributes"]["id"] == "1": + apic1_dn = node["fabricNode"]["attributes"]["dn"] + else: + switch_versions.add(version) + + # fabricNode.version in older versions like 3.2 shows an invalid version like "A" + # which cannot be parsed by AciVersion. In that case, query the firmware class. + is_old_version = False + try: + apic_version = AciVersion(apic_version) + except ValueError: + is_old_version = True + apic1_firmware = icurl('mo', apic1_dn + "/sys/ctrlrfwstatuscont/ctrlrrunning.json") + if not apic1_firmware: + prints("Unable to find current APIC version.") + sys.exit(1) + apic1_version = apic1_firmware[0]['firmwareCtrlrRunning']['attributes']['version'] + apic_version = AciVersion(apic1_version) + + prints("Current APIC Version...{}".format(apic_version)) + + if is_old_version: + prints("Checking Switch Version ...") + firmwares = icurl('class', 'firmwareRunning.json') + for firmware in firmwares: + switch_versions.add(firmware['firmwareRunning']['attributes']['peVer']) + + msg = "Lowest Switch Version...{}" + if not switch_versions: + prints(msg.format("Not Found! Join switches to the fabric then re-run this script.\n")) + return apic_version, None + + lowest_sw_ver = AciVersion(switch_versions.pop()) + for sw_version in switch_versions: + sw_version = AciVersion(sw_version) + if lowest_sw_ver.newer_than(sw_version): + lowest_sw_ver = sw_version + prints(msg.format(lowest_sw_ver) + "\n") + return apic_version, lowest_sw_ver + + def get_vpc_nodes(): """ Returns list of VPC Node IDs; ['101', '102', etc...] """ prints("Collecting VPC Node IDs...", end='') @@ -1406,26 +1778,31 @@ def get_vpc_nodes(): return vpc_nodes -def get_switch_version(): - """ Returns lowest switch version as AciVersion instance """ - prints("Gathering Lowest Switch Version from Firmware Repository...", end='') - firmwares = icurl('class', 'firmwareRunning.json') - versions = set() - - for firmware in firmwares: - versions.add(firmware['firmwareRunning']['attributes']['peVer']) +def query_common_data(api_only=False, arg_cversion=None, arg_tversion=None): + username = password = None + if not api_only: + username, password = get_credentials() - if versions: - lowest_sw_ver = AciVersion(versions.pop()) - for version in versions: - version = AciVersion(version) - if lowest_sw_ver.newer_than(str(version)): - lowest_sw_ver = version - prints('%s\n' % lowest_sw_ver) - return lowest_sw_ver - else: - prints("No Switches Detected! Join switches to the fabric then re-run this script.\n") - return None + try: + fabric_nodes = get_fabric_nodes() + cversion, sw_cversion = get_current_versions(fabric_nodes, arg_cversion) + tversion = get_target_version(arg_tversion) + vpc_nodes = get_vpc_nodes() + except Exception as e: + prints('\n\nError: %s' % e) + prints("Initial query failed. Ensure APICs are healthy. Ending script run.") + log.exception(e) + sys.exit(1) + + return { + 'username': username, + 'password': password, + 'cversion': cversion, + 'tversion': tversion, + 'sw_cversion': sw_cversion, + 'fabric_nodes': fabric_nodes, + 'vpc_node_ids': vpc_nodes, + } @check_wrapper(check_title="APIC Cluster Status") @@ -1463,29 +1840,30 @@ def apic_cluster_health_check(cversion, **kwargs): @check_wrapper(check_title="Switch Fabric Membership Status") -def switch_status_check(**kwargs): +def switch_status_check(fabric_nodes, **kwargs): result = FAIL_UF msg = '' headers = ['Pod-ID', 'Node-ID', 'State'] data = [] - recommended_action = 'Bring this node back to "active"' + recommended_action = 'Bring these nodes back to "active"' # fabricNode.fabricSt shows `disabled` for both Decommissioned and Maintenance (GIR). # fabricRsDecommissionNode.debug==yes is required to show `disabled (Maintenance)`. - fabricNodes = icurl('class', 'fabricNode.json?&query-target-filter=ne(fabricNode.role,"controller")') girNodes = icurl('class', 'fabricRsDecommissionNode.json?&query-target-filter=eq(fabricRsDecommissionNode.debug,"yes")') - for fabricNode in fabricNodes: - state = fabricNode['fabricNode']['attributes']['fabricSt'] + for fabric_node in fabric_nodes: + if fabric_node['fabricNode']['attributes']['role'] == "controller": + continue + state = fabric_node['fabricNode']['attributes']['fabricSt'] if state == 'active': continue - dn = re.search(node_regex, fabricNode['fabricNode']['attributes']['dn']) + dn = re.search(node_regex, fabric_node['fabricNode']['attributes']['dn']) pod_id = dn.group("pod") node_id = dn.group("node") for gir in girNodes: if node_id == gir['fabricRsDecommissionNode']['attributes']['targetId']: state = state + ' (Maintenance)' data.append([pod_id, node_id, state]) - if not fabricNodes: + if not fabric_nodes: result = MANUAL msg = 'Switch fabricNode not found!' elif not data: @@ -1519,14 +1897,13 @@ def maintp_grp_crossing_4_0_check(cversion, tversion, **kwargs): @check_wrapper(check_title="NTP Status") -def ntp_status_check(**kargs): +def ntp_status_check(fabric_nodes, **kargs): result = FAIL_UF headers = ["Pod-ID", "Node-ID"] data = [] recommended_action = 'Not Synchronized. Check NTP config and NTP server reachability.' doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#ntp-status" - fabricNodes = icurl('class', 'fabricNode.json') - nodes = [fn['fabricNode']['attributes']['id'] for fn in fabricNodes] + nodes = [fn['fabricNode']['attributes']['id'] for fn in fabric_nodes] apicNTPs = icurl('class', 'datetimeNtpq.json') switchNTPs = icurl('class', 'datetimeClkPol.json') for apicNTP in apicNTPs: @@ -1539,7 +1916,7 @@ def ntp_status_check(**kargs): dn = re.search(node_regex, switchNTP['datetimeClkPol']['attributes']['dn']) if dn and dn.group('node') in nodes: nodes.remove(dn.group('node')) - for fn in fabricNodes: + for fn in fabric_nodes: if fn['fabricNode']['attributes']['id'] in nodes: dn = re.search(node_regex, fn['fabricNode']['attributes']['dn']) data.append([dn.group('pod'), dn.group('node')]) @@ -1591,7 +1968,7 @@ def features_to_disable_check(cversion, tversion, **kwargs): @check_wrapper(check_title="Switch Upgrade Group Guidelines") -def switch_group_guideline_check(**kwargs): +def switch_group_guideline_check(fabric_nodes, **kwargs): result = FAIL_O headers = ['Group Name', 'Pod-ID', 'Node-IDs', 'Failure Reason'] data = [] @@ -1610,8 +1987,7 @@ def switch_group_guideline_check(**kwargs): reason_vpc = 'Both leaf nodes in the same vPC pair are in the same group.' nodes = {} - fabricNodes = icurl('class', 'fabricNode.json') - for fn in fabricNodes: + for fn in fabric_nodes: attr = fn['fabricNode']['attributes'] nodes[attr['dn']] = {'role': attr['role'], 'nodeType': attr['nodeType']} @@ -1690,6 +2066,7 @@ def switch_group_guideline_check(**kwargs): 'pod': vpc_peer['fabricNodePEp']['attributes']['podId'] }) if len(m_vpc_peers) > 1: + m_vpc_peers.sort(key=lambda d: d['node']) data.append([m_name, m_vpc_peers[0]['pod'], ','.join(x['node'] for x in m_vpc_peers), reason_vpc]) @@ -1716,7 +2093,7 @@ def switch_bootflash_usage_check(tversion, **kwargs): partitions = icurl('class', partitions_api) if not partitions: - return Result(result=ERROR, msg='bootflash objects not found', doc_url=doc_url) + return Result(result=MANUAL, msg='bootflash objects not found. Check switch health.', doc_url=doc_url) predownloaded_nodes = [] try: @@ -2211,7 +2588,7 @@ def switch_ssd_check(**kwargs): } doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#switch-ssd-health" - cs_regex = r'model \(New: (?P<model>\w+)\),' + cs_regex = r"model:(?P<model>\w+)," faultInsts = icurl('class', 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F3073"),eq(faultInst.code,"F3074"))') for faultInst in faultInsts: @@ -2241,53 +2618,87 @@ def switch_ssd_check(**kwargs): # Connection Based Check @check_wrapper(check_title="APIC SSD Health") -def apic_ssd_check(cversion, username, password, **kwargs): +def apic_ssd_check(cversion, username, password, fabric_nodes, **kwargs): result = FAIL_UF - headers = ["Pod", "Node", "Storage Unit", "% lifetime remaining", "Recommended Action"] + headers = ["APIC ID", "APIC Name", "Storage Unit", "% lifetime remaining", "Recommended Action"] data = [] - unformatted_headers = ["Fault", "Fault DN", "% lifetime remaining", "Recommended Action"] + unformatted_headers = ["Fault DN", "% lifetime remaining", "Recommended Action"] unformatted_data = [] recommended_action = "Contact TAC for replacement" doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-ssd-health" - has_error = False dn_regex = node_regex + r'/.+p-\[(?P<storage>.+)\]-f' - faultInsts = icurl('class', 'faultInst.json?query-target-filter=eq(faultInst.code,"F2731")') - adjust_title = False - if len(faultInsts) == 0 and (cversion.older_than("4.2(7f)") or cversion.older_than("5.2(1g)")): - controller = icurl('class', 'topSystem.json?query-target-filter=eq(topSystem.role,"controller")') - if not controller: - return Result(result=ERROR, msg="topSystem response empty. Is the cluster healthy?", doc_url=doc_url) - - print('') - adjust_title = True + threshold = {"F2731": "<5% (Fault F2731)", "F2732": "<1% (Fault F2732)"} + # Not checking F0101 because if APIC SSD is not operaitonal, the given APIC + # does not work at all and APIC clustering should be broken. + faultInsts = icurl('class', 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F2731"),eq(faultInst.code,"F2732"))') + for faultInst in faultInsts: + code = faultInst["faultInst"]["attributes"]["code"] + lifetime_remaining = threshold.get(code, "unknown") + dn_match = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) + if dn_match: + apic_name = "-" + for node in fabric_nodes: + if node["fabricNode"]["attributes"]["id"] == dn_match.group("node"): + apic_name = node["fabricNode"]["attributes"]["name"] + break + data.append([ + dn_match.group("node"), + apic_name, + dn_match.group("storage"), + lifetime_remaining, + recommended_action, + ]) + else: + unformatted_data.append([ + faultInst['faultInst']['attributes']['dn'], + lifetime_remaining, + recommended_action, + ]) + + # Versions older than 4.2(7f) or 5.x - 5.2(1g) may fail to raise F273x. + # Check logs for those just in case. + has_error = False + if ( + len(faultInsts) == 0 + and ( + cversion.older_than("4.2(7f)") + or (cversion.major1 == "5" and cversion.older_than("5.2(1g)")) + ) + ): + apics = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["role"] == "controller"] + if not apics: + return Result(result=ERROR, msg="No fabricNode of APIC. Is the cluster healthy?", doc_url=doc_url) + # `fabricNode` in pre-4.0 does not have `address` + if not apics[0]["fabricNode"]["attributes"].get("address"): + apic1 = [apic for apic in apics if apic["fabricNode"]["attributes"]["id"] == "1"][0] + apic1_dn = apic1["fabricNode"]["attributes"]["dn"] + apics = icurl("class", "{}/infraWiNode.json".format(apic1_dn)) + report_other = False - checked_apics = {} - for apic in controller: - attr = apic['topSystem']['attributes'] - if attr['address'] in checked_apics: continue - checked_apics[attr['address']] = 1 - pod_id = attr['podId'] - node_id = attr['id'] - node_title = 'Checking %s...' % attr['name'] - print_title(node_title) + for apic in apics: + if apic.get("fabricNode"): + apic_id = apic["fabricNode"]["attributes"]["id"] + apic_name = apic["fabricNode"]["attributes"]["name"] + apic_addr = apic["fabricNode"]["attributes"]["address"] + else: + apic_id = apic["infraWiNode"]["attributes"]["id"] + apic_name = apic["infraWiNode"]["attributes"]["nodeName"] + apic_addr = apic["infraWiNode"]["attributes"]["addr"] try: - c = Connection(attr['address']) + c = Connection(apic_addr) c.username = username c.password = password c.log = LOG_FILE c.connect() except Exception as e: - data.append([attr['id'], attr['name'], '-', '-', str(e)]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, '-', '-', str(e)]) has_error = True continue try: - c.cmd( - 'grep -oE "SSD Wearout Indicator is [0-9]+" /var/log/dme/log/svc_ifc_ae.bin.log | tail -1') + c.cmd('grep -oE "SSD Wearout Indicator is [0-9]+" /var/log/dme/log/svc_ifc_ae.bin.log | tail -1') except Exception as e: - data.append([attr['id'], attr['name'], '-', '-', str(e)]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, '-', '-', str(e)]) has_error = True continue @@ -2295,24 +2706,11 @@ def apic_ssd_check(cversion, username, password, **kwargs): if wearout_ind is not None: wearout = wearout_ind.group('wearout') if int(wearout) < 5: - data.append([pod_id, node_id, "Solid State Disk", wearout, recommended_action]) + data.append([apic_id, apic_name, "Solid State Disk", wearout, recommended_action]) report_other = True - print_result(node_title, DONE) continue if report_other: - data.append([pod_id, node_id, "Solid State Disk", wearout, "No Action Required"]) - print_result(node_title, DONE) - else: - headers = ["Fault", "Pod", "Node", "Storage Unit", "% lifetime remaining", "Recommended Action"] - for faultInst in faultInsts: - dn_array = re.search(dn_regex, faultInst['faultInst']['attributes']['dn']) - lifetime_remaining = "<5%" - if dn_array: - data.append(['F2731', dn_array.group("pod"), dn_array.group("node"), dn_array.group("storage"), - lifetime_remaining, recommended_action]) - else: - unformatted_data.append( - ['F2731', faultInst['faultInst']['attributes']['dn'], lifetime_remaining, recommended_action]) + data.append([apic_id, apic_name, "Solid State Disk", wearout, "No Action Required"]) if has_error: result = ERROR elif not data and not unformatted_data: @@ -2324,7 +2722,6 @@ def apic_ssd_check(cversion, username, password, **kwargs): unformatted_headers=unformatted_headers, unformatted_data=unformatted_data, doc_url=doc_url, - adjust_title=adjust_title, ) @@ -2895,9 +3292,9 @@ def lldp_with_infra_vlan_mismatch_check(**kwargs): # Connection Based Check @check_wrapper(check_title="APIC Target version image and MD5 hash") -def apic_version_md5_check(tversion, username, password, **kwargs): +def apic_version_md5_check(tversion, username, password, fabric_nodes, **kwargs): result = FAIL_UF - headers = ['APIC', 'Firmware', 'md5sum', 'Failure'] + headers = ["APIC ID", "APIC Name", "Firmware", "md5sum", "Failure"] data = [] recommended_action = 'Delete the firmware from APIC and re-download' doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-target-version-image-and-md5-hash" @@ -2912,7 +3309,7 @@ def apic_version_md5_check(tversion, username, password, **kwargs): desc = fm_mo["firmwareFirmware"]['attributes']["description"] md5 = fm_mo["firmwareFirmware"]['attributes']["checksum"] if "Image signing verification failed" in desc: - data.append(["All", str(tversion), md5, 'Target image is corrupted']) + data.append(["All", "-", str(tversion), md5, 'Target image is corrupted']) image_validaton = False if not image_validaton: @@ -2921,24 +3318,33 @@ def apic_version_md5_check(tversion, username, password, **kwargs): md5s = [] md5_names = [] + apics = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["role"] == "controller"] + if not apics: + return Result(result=ERROR, msg="No fabricNode of APIC. Is the cluster healthy?", doc_url=doc_url) + # `fabricNode` in pre-4.0 does not have `address` + if not apics[0]["fabricNode"]["attributes"].get("address"): + apic1 = [apic for apic in apics if apic["fabricNode"]["attributes"]["id"] == "1"][0] + apic1_dn = apic1["fabricNode"]["attributes"]["dn"] + apics = icurl("class", "{}/infraWiNode.json".format(apic1_dn)) + has_error = False - prints('') - nodes_response_json = icurl('class', 'topSystem.json') - for node in nodes_response_json: - if node['topSystem']['attributes']['role'] != "controller": - continue - apic_name = node['topSystem']['attributes']['name'] - node_title = 'Checking %s...' % apic_name - print_title(node_title) + for apic in apics: + if apic.get("fabricNode"): + apic_id = apic["fabricNode"]["attributes"]["id"] + apic_name = apic["fabricNode"]["attributes"]["name"] + apic_addr = apic["fabricNode"]["attributes"]["address"] + else: + apic_id = apic["infraWiNode"]["attributes"]["id"] + apic_name = apic["infraWiNode"]["attributes"]["nodeName"] + apic_addr = apic["infraWiNode"]["attributes"]["addr"] try: - c = Connection(node['topSystem']['attributes']['address']) + c = Connection(apic_addr) c.username = username c.password = password c.log = LOG_FILE c.connect() except Exception as e: - data.append([apic_name, '-', '-', str(e)]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, '-', '-', str(e)]) has_error = True continue @@ -2946,28 +3352,24 @@ def apic_version_md5_check(tversion, username, password, **kwargs): c.cmd("ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.%s.bin" % tversion.dot_version) except Exception as e: - data.append([apic_name, '-', '-', + data.append([apic_id, apic_name, '-', '-', 'ls command via ssh failed due to:{}'.format(str(e))]) - print_result(node_title, ERROR) has_error = True continue if "No such file or directory" in c.output: - data.append([apic_name, str(tversion), '-', 'image not found']) - print_result(node_title, FAIL_UF) + data.append([apic_id, apic_name, str(tversion), '-', 'image not found']) continue try: c.cmd("cat /firmware/fwrepos/fwrepo/md5sum/aci-apic-dk9.%s.bin" % tversion.dot_version) except Exception as e: - data.append([apic_name, str(tversion), '-', + data.append([apic_id, apic_name, str(tversion), '-', 'failed to check md5sum via ssh due to:{}'.format(str(e))]) - print_result(node_title, ERROR) has_error = True continue if "No such file or directory" in c.output: - data.append([apic_name, str(tversion), '-', 'md5sum file not found']) - print_result(node_title, FAIL_UF) + data.append([apic_id, apic_name, str(tversion), '-', 'md5sum file not found']) continue for line in c.output.split("\n"): words = line.split() @@ -2976,23 +3378,21 @@ def apic_version_md5_check(tversion, username, password, **kwargs): words[1].startswith("/var/run/mgmt/fwrepos/fwrepo/aci-apic") ): md5s.append(words[0]) - md5_names.append(apic_name) + md5_names.append([apic_id, apic_name]) break else: - data.append([apic_name, str(tversion), '-', 'unexpected output when checking md5sum file']) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, str(tversion), '-', 'unexpected output when checking md5sum file']) has_error = True continue - print_result(node_title, DONE) if len(set(md5s)) > 1: - for name, md5 in zip(md5_names, md5s): - data.append([name, str(tversion), md5, 'md5sum do not match on all APICs']) + for id_name, md5 in zip(md5_names, md5s): + data.append([id_name[0], id_name[1], str(tversion), md5, 'md5sum do not match on all APICs']) if has_error: result = ERROR elif not data: result = PASS - return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url, adjust_title=True) + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) # Connection Based Check @@ -3052,7 +3452,7 @@ def standby_apic_disk_space_check(**kwargs): @check_wrapper(check_title="Remote Leaf Compatibility") -def r_leaf_compatibility_check(tversion, **kwargs): +def r_leaf_compatibility_check(tversion, fabric_nodes, **kwargs): result = PASS headers = ['Target Version', 'Remote Leaf', 'Direct Traffic Forwarding'] data = [] @@ -3065,7 +3465,7 @@ def r_leaf_compatibility_check(tversion, **kwargs): if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) - remote_leafs = icurl('class', 'fabricNode.json?&query-target-filter=eq(fabricNode.nodeType,"remote-leaf-wan")') + remote_leafs = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["nodeType"] == "remote-leaf-wan"] if not remote_leafs: return Result(result=NA, msg="No Remote Leaf Found") @@ -3182,20 +3582,19 @@ def vmm_controller_adj_check(**kwargs): @check_wrapper(check_title="VPC-paired Leaf switches") -def vpc_paired_switches_check(vpc_node_ids, **kwargs): +def vpc_paired_switches_check(vpc_node_ids, fabric_nodes, **kwargs): result = PASS headers = ["Node ID", "Node Name"] data = [] recommended_action = 'Determine if dataplane redundancy is available if these nodes go down.' doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vpc-paired-leaf-switches' - top_system = icurl('class', 'topSystem.json') - for node in top_system: - node_id = node['topSystem']['attributes']['id'] - role = node['topSystem']['attributes']['role'] + for node in fabric_nodes: + node_id = node['fabricNode']['attributes']['id'] + role = node['fabricNode']['attributes']['role'] if role == 'leaf' and (node_id not in vpc_node_ids): result = MANUAL - name = node['topSystem']['attributes']['name'] + name = node['fabricNode']['attributes']['name'] data.append([node_id, name]) return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) @@ -3350,13 +3749,18 @@ def bgp_golf_route_target_type_check(cversion, tversion, **kwargs): @check_wrapper(check_title="APIC Container Bridge IP Overlap with APIC TEP") -def docker0_subnet_overlap_check(**kwargs): +def docker0_subnet_overlap_check(cversion, **kwargs): result = PASS headers = ["Container Bridge IP", "APIC TEP"] data = [] recommended_action = 'Change the container bridge IP via "Apps > Settings" on the APIC GUI' doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-container-bridge-ip-overlap-with-apic-tep" + # AppCenter was deprecated in 6.1.2. + # Due to a bug the deprecated object returns totalCount:1 with empty data instead of totalCount:0. + if cversion.newer_than("6.1(2a)"): + return Result(result=NA, msg=VER_NOT_AFFECTED) + containerPols = icurl('mo', 'pluginPolContr/ContainerPol.json') if not containerPols: bip = "172.17.0.1/16" @@ -3423,7 +3827,7 @@ def target_version_compatibility_check(cversion, tversion, **kwargs): @check_wrapper(check_title="Gen 1 switch compatibility") -def gen1_switch_compatibility_check(tversion, **kwargs): +def gen1_switch_compatibility_check(tversion, fabric_nodes, **kwargs): result = FAIL_UF headers = ["Target Version", "Node ID", "Model", "Warning"] gen1_models = ["N9K-C9336PQ", "N9K-X9736PQ", "N9K-C9504-FM", "N9K-C9508-FM", "N9K-C9516-FM", "N9K-C9372PX-E", @@ -3431,13 +3835,12 @@ def gen1_switch_compatibility_check(tversion, **kwargs): "N9K-C93128TX"] data = [] recommended_action = 'Select supported target version or upgrade hardware' - doc_url = 'http://cs.co/9001ydKCV' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#compatibility-switch-hardware-gen1' if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) if tversion.newer_than("5.0(1a)"): - fabric_node = icurl('class', 'fabricNode.json') - for node in fabric_node: + for node in fabric_nodes: if node['fabricNode']['attributes']['model'] in gen1_models: data.append([str(tversion), node['fabricNode']['attributes']['id'], node['fabricNode']['attributes']['model'], 'Not supported on 5.x+']) @@ -3678,22 +4081,26 @@ def apic_ca_cert_validation(**kwargs): @check_wrapper(check_title="FabricDomain Name") -def fabricdomain_name_check(cversion, tversion, **kwargs): +def fabricdomain_name_check(cversion, tversion, fabric_nodes, **kwargs): result = FAIL_O headers = ["FabricDomain", "Reason"] data = [] recommended_action = "Do not upgrade to 6.0(2)" - doc_url = 'https://bst.cloudapps.cisco.com/bugsearch/bug/CSCwf80352' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#fabricdomain-name' if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) if tversion.same_as("6.0(2h)"): - controller = icurl('class', 'topSystem.json?query-target-filter=eq(topSystem.role,"controller")') - if not controller: - return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') - - fabricDomain = controller[0]['topSystem']['attributes']['fabricDomain'] + apic1 = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["id"] == "1"] + if not apic1: + return Result(result=ERROR, msg='No fabricNode of APIC 1. Is the cluster healthy?') + + # Using topSystem because fabricTopology.fabricDomain is not yet available prior to 5.2(6e). + apic1_topsys = icurl("mo", "/".join([apic1[0]["fabricNode"]["attributes"]["dn"], "sys.json"])) + if not apic1_topsys: + return Result(result=ERROR, msg='No topSystem of APIC 1. Is the cluster healthy?') + fabricDomain = apic1_topsys[0]['topSystem']['attributes']['fabricDomain'] if re.search(r'#|;', fabricDomain): data.append([fabricDomain, "Contains a special character"]) @@ -3730,7 +4137,7 @@ def sup_hwrev_check(cversion, tversion, **kwargs): sup_re = r'/.+(?P<supslot>supslot-\d+)' sups = icurl('class', 'eqptSpCmnBlk.json?&query-target-filter=wcard(eqptSpromSupBlk.dn,"sup")') if not sups: - return Result(result=ERROR, msg='No sups found. This is unlikely.') + return Result(result=MANUAL, msg='No sups found. This is unlikely. Check switch health.') for sup in sups: prtNum = sup['eqptSpCmnBlk']['attributes']['prtNum'] @@ -3839,25 +4246,28 @@ def oob_mgmt_security_check(cversion, tversion, **kwargs): @check_wrapper(check_title="Mini ACI Upgrade to 6.0(2)+") -def mini_aci_6_0_2_check(cversion, tversion, **kwargs): +def mini_aci_6_0_2_check(cversion, tversion, fabric_nodes, **kwargs): result = FAIL_UF - headers = ["Pod ID", "Node ID", "APIC Type", "Failure Reason"] + headers = ["Node ID", "Node Name", "APIC Type"] data = [] recommended_action = "All virtual APICs must be removed from the cluster prior to upgrading to 6.0(2)+." - doc_url = 'Upgrading Mini ACI - http://cs.co/9009bBTQB' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#mini-aci-upgrade-to-602-or-later' if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) - if cversion.older_than("6.0(2a)") and tversion.newer_than("6.0(2a)"): - topSystem = icurl('class', 'topSystem.json?query-target-filter=wcard(topSystem.role,"controller")') - if not topSystem: - return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') - for controller in topSystem: - if controller['topSystem']['attributes']['nodeType'] == "virtual": - pod_id = controller["topSystem"]["attributes"]["podId"] - node_id = controller['topSystem']['attributes']['id'] - data.append([pod_id, node_id, "virtual", "Virtual APIC must be removed prior to upgrade to 6.0(2)+"]) + if not (cversion.older_than("6.0(2a)") and tversion.newer_than("6.0(2a)")): + return Result(result=NA, msg=VER_NOT_AFFECTED, doc_url=doc_url) + + apics = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["role"] == "controller"] + if not apics: + return Result(result=ERROR, msg="No fabricNode of APIC. Is the cluster healthy?", doc_url=doc_url) + + for apic in apics: + if apic['fabricNode']['attributes']['nodeType'] == "virtual": + node_id = apic['fabricNode']['attributes']['id'] + node_name = apic['fabricNode']['attributes']['name'] + data.append([node_id, node_name, "virtual"]) if not data: result = PASS @@ -4128,7 +4538,7 @@ def fabric_dpp_check(tversion, **kwargs): @check_wrapper(check_title='N9K-C93108TC-FX3P/FX3H Interface Down') -def n9k_c93108tc_fx3p_interface_down_check(tversion, **kwargs): +def n9k_c93108tc_fx3p_interface_down_check(tversion, fabric_nodes, **kwargs): result = PASS headers = ["Node ID", "Node Name", "Product ID"] data = [] @@ -4143,12 +4553,9 @@ def n9k_c93108tc_fx3p_interface_down_check(tversion, **kwargs): or tversion.same_as("5.3(1d)") or (tversion.major1 == "6" and tversion.older_than("6.0(4a)")) ): - api = 'fabricNode.json' - api += '?query-target-filter=or(' - api += 'eq(fabricNode.model,"N9K-C93108TC-FX3P"),' - api += 'eq(fabricNode.model,"N9K-C93108TC-FX3H"))' - nodes = icurl('class', api) - for node in nodes: + for node in fabric_nodes: + if node["fabricNode"]["attributes"]["model"] not in ["N9K-C93108TC-FX3P", "N9K-C93108TC-FX3H"]: + continue nodeid = node["fabricNode"]["attributes"]["id"] name = node["fabricNode"]["attributes"]["name"] pid = node["fabricNode"]["attributes"]["model"] @@ -4802,7 +5209,7 @@ def validate_32_64_bit_image_check(cversion, tversion, **kwargs): @check_wrapper(check_title='Fabric Link Redundancy') -def fabric_link_redundancy_check(**kwargs): +def fabric_link_redundancy_check(fabric_nodes, **kwargs): result = PASS headers = ["Leaf Name", "Fabric Link Adjacencies", "Problem"] data = [] @@ -4811,20 +5218,18 @@ def fabric_link_redundancy_check(**kwargs): t1_recommended_action = "Connect the tier 2 leaf switch(es) to multiple tier1 leaf switches for redundancy" doc_url = "https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#fabric-link-redundancy" - fabric_nodes_api = 'fabricNode.json' - fabric_nodes_api += '?query-target-filter=and(or(eq(fabricNode.role,"leaf"),eq(fabricNode.role,"spine")),eq(fabricNode.fabricSt,"active"))' - lldp_adj_api = 'lldpAdjEp.json' lldp_adj_api += '?query-target-filter=wcard(lldpAdjEp.sysDesc,"topology/pod")' - fabricNodes = icurl("class", fabric_nodes_api) spines = {} leafs = {} t2leafs = {} - for node in fabricNodes: + for node in fabric_nodes: if node["fabricNode"]["attributes"]["nodeType"] == "remote-leaf-wan": # Not applicable to remote leafs, skip continue + if node["fabricNode"]["attributes"]["fabricSt"] != "active": + continue dn = node["fabricNode"]["attributes"]["dn"] name = node["fabricNode"]["attributes"]["name"] if node["fabricNode"]["attributes"]["role"] == "spine": @@ -4938,7 +5343,7 @@ def out_of_service_ports_check(**kwargs): @check_wrapper(check_title='FC/FCOE support removed for -EX platforms') -def fc_ex_model_check(tversion, **kwargs): +def fc_ex_model_check(tversion, fabric_nodes, **kwargs): result = PASS headers = ["FC/FCOE Node ID", "Model"] data = [] @@ -4946,8 +5351,6 @@ def fc_ex_model_check(tversion, **kwargs): doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#fcfcoe-support-for-ex-switches' fcEntity_api = "fcEntity.json" - fabricNode_api = 'fabricNode.json' - fabricNode_api += '?query-target-filter=wcard(fabricNode.model,".*EX")' if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) @@ -4955,13 +5358,11 @@ def fc_ex_model_check(tversion, **kwargs): if (tversion.newer_than("6.0(7a)") and tversion.older_than("6.0(9c)")) or tversion.same_as("6.1(1f)"): fcEntitys = icurl('class', fcEntity_api) fc_nodes = [] - if fcEntitys: - for fcEntity in fcEntitys: - fc_nodes.append(fcEntity['fcEntity']['attributes']['dn'].split('/sys')[0]) + for fcEntity in fcEntitys: + fc_nodes.append(fcEntity['fcEntity']['attributes']['dn'].split('/sys')[0]) if fc_nodes: - fabricNodes = icurl('class', fabricNode_api) - for node in fabricNodes: + for node in fabric_nodes: node_dn = node['fabricNode']['attributes']['dn'] if node_dn in fc_nodes: model = node['fabricNode']['attributes']['model'] @@ -5040,7 +5441,7 @@ def clock_signal_component_failure_check(**kwargs): @check_wrapper(check_title='Stale Decomissioned Spine') -def stale_decomissioned_spine_check(tversion, **kwargs): +def stale_decomissioned_spine_check(tversion, fabric_nodes, **kwargs): result = PASS headers = ["Susceptible Spine Node Id", "Spine Name", "Current Node State"] data = [] @@ -5048,8 +5449,6 @@ def stale_decomissioned_spine_check(tversion, **kwargs): doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#stale-decommissioned-spine' decomissioned_api = 'fabricRsDecommissionNode.json' - active_spine_api = 'topSystem.json' - active_spine_api += '?query-target-filter=eq(topSystem.role,"spine")' if not tversion: return Result(result=MANUAL, msg=TVER_MISSING) @@ -5059,13 +5458,16 @@ def stale_decomissioned_spine_check(tversion, **kwargs): if decomissioned_switches: decommissioned_node_ids = [node['fabricRsDecommissionNode']['attributes']['targetId'] for node in decomissioned_switches] - active_spine_mo = icurl('class', active_spine_api) - for spine in active_spine_mo: - node_id = spine['topSystem']['attributes']['id'] - name = spine['topSystem']['attributes']['name'] - state = spine['topSystem']['attributes']['state'] + for node in fabric_nodes: + if node["fabricNode"]["attributes"]["role"] != "spine": + continue + if node["fabricNode"]["attributes"]["fabricSt"] != "active": + continue + node_id = node["fabricNode"]["attributes"]["id"] + name = node["fabricNode"]["attributes"]["name"] + fabricSt = node["fabricNode"]["attributes"]["fabricSt"] if node_id in decommissioned_node_ids: - data.append([node_id, name, state]) + data.append([node_id, name, fabricSt]) if data: result = FAIL_O return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) @@ -5334,43 +5736,47 @@ def service_bd_forceful_routing_check(cversion, tversion, **kwargs): # Connection Base Check @check_wrapper(check_title='Observer Database Size') -def observer_db_size_check(username, password, **kwargs): +def observer_db_size_check(username, password, fabric_nodes, **kwargs): result = PASS - headers = ["Node", "File Location", "Size (GB)"] + headers = ["APIC ID", "APIC Name", "File Location", "Size (GB)"] data = [] recommended_action = 'Contact TAC to analyze and truncate large DB files' doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations#observer-database-size' - topSystem_api = 'topSystem.json' - topSystem_api += '?query-target-filter=eq(topSystem.role,"controller")' - - controllers = icurl('class', topSystem_api) - if not controllers: - return Result(result=ERROR, msg='topSystem response empty. Is the cluster healthy?') + apics = [node for node in fabric_nodes if node["fabricNode"]["attributes"]["role"] == "controller"] + if not apics: + return Result(result=ERROR, msg="No fabricNode of APIC. Is the cluster healthy?", doc_url=doc_url) + # `fabricNode` in pre-4.0 does not have `address` + if not apics[0]["fabricNode"]["attributes"].get("address"): + apic1 = [apic for apic in apics if apic["fabricNode"]["attributes"]["id"] == "1"][0] + apic1_dn = apic1["fabricNode"]["attributes"]["dn"] + apics = icurl("class", "{}/infraWiNode.json".format(apic1_dn)) has_error = False - prints('') - for apic in controllers: - attr = apic['topSystem']['attributes'] - node_title = 'Checking %s...' % attr['name'] - print_title(node_title) + for apic in apics: + if apic.get("fabricNode"): + apic_id = apic["fabricNode"]["attributes"]["id"] + apic_name = apic["fabricNode"]["attributes"]["name"] + apic_addr = apic["fabricNode"]["attributes"]["address"] + else: + apic_id = apic["infraWiNode"]["attributes"]["id"] + apic_name = apic["infraWiNode"]["attributes"]["nodeName"] + apic_addr = apic["infraWiNode"]["attributes"]["addr"] try: - c = Connection(attr['address']) + c = Connection(apic_addr) c.username = username c.password = password c.log = LOG_FILE c.connect() except Exception as e: - data.append([attr['id'], attr['name'], str(e)]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, "-", str(e)]) has_error = True continue try: cmd = r"ls -lh /data2/dbstats | awk '{print $5, $9}'" c.cmd(cmd) if "No such file or directory" in c.output: - data.append([attr['id'], '/data2/dbstats/ not found', "Check user permissions or retry as 'apic#fallback\\\\admin'"]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, '/data2/dbstats/ not found', "Check user permissions or retry as 'apic#fallback\\\\admin'"]) has_error = True continue dbstats = c.output.split("\n") @@ -5380,18 +5786,16 @@ def observer_db_size_check(username, password, **kwargs): if size_match: file_size = size_match.group("size") file_name = "/data2/dbstats/" + size_match.group("file") - data.append([attr['id'], file_name, file_size]) - print_result(node_title, DONE) + data.append([apic_id, apic_name, file_name, file_size]) except Exception as e: - data.append([attr['id'], attr['name'], str(e)]) - print_result(node_title, ERROR) + data.append([apic_id, apic_name, "-", str(e)]) has_error = True continue if has_error: result = ERROR elif data: result = FAIL_UF - return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url, adjust_title=True) + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) @check_wrapper(check_title='AVE End-of-Life') @@ -5609,7 +6013,7 @@ def configpush_shard_check(tversion, **kwargs): doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#policydist-configpushshardcont-crash' if not tversion: - return Result(result=MANUAL, msg=TVER_MISSING) + return Result(result=MANUAL, msg=TVER_MISSING) if tversion.older_than("6.1(4a)"): result = PASS @@ -5640,11 +6044,12 @@ def parse_args(args): parser.add_argument("-n", "--no-cleanup", action="store_true", help="Skip all file cleanup after script execution.") parser.add_argument("-v", "--version", action="store_true", help="Only show the script version, then end.") parser.add_argument("--total-checks", action="store_true", help="Only show the total number of checks, then end.") + parser.add_argument("--timeout", action="store", nargs="?", type=int, const=-1, default=DEFAULT_TIMEOUT, help="Show default script timeout (sec) or overwrite it when a number is provided (e.g. --timeout 1200).") parsed_args = parser.parse_args(args) return parsed_args -def initialize(): +def init_system(): """ Initialize the script environment, create necessary directories and set up log. Not required for some options such as `--version` or `--total-checks`. @@ -5655,51 +6060,49 @@ def initialize(): log.info("Creating directories %s and %s", DIR, JSON_DIR) os.mkdir(DIR) os.mkdir(JSON_DIR) - fmt = '[%(asctime)s.%(msecs)03d{} %(levelname)-8s %(funcName)20s:%(lineno)-4d] %(message)s'.format(tz) + fmt = '[%(asctime)s.%(msecs)03d{} %(levelname)-8s %(funcName)s:%(lineno)-4d(%(threadName)s)] %(message)s'.format(tz) logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, format=fmt, datefmt='%Y-%m-%d %H:%M:%S') -def prepare(api_only, arg_tversion, arg_cversion, checks): - prints(' ==== %s%s, Script Version %s ====\n' % (ts, tz, SCRIPT_VERSION)) - prints('!!!! Check https://github.com/datacenter/ACI-Pre-Upgrade-Validation-Script for Latest Release !!!!\n') +def wrapup_system(no_cleanup): + subprocess.check_output(['tar', '-czf', BUNDLE_NAME, DIR]) + bundle_loc = '/'.join([os.getcwd(), BUNDLE_NAME]) + prints(""" + Pre-Upgrade Check Complete. + Next Steps: Address all checks flagged as FAIL, ERROR or MANUAL CHECK REQUIRED - # Create empty result files for all checks - for idx, check in enumerate(checks): - check(idx + 1, len(checks), init=True) + Result output and debug info saved to below bundle for later reference. + Attach this bundle to Cisco TAC SRs opened to address the flagged checks. - username = password = None - if not api_only: - username, password = get_credentials() - try: - cversion = get_current_version(arg_cversion) - tversion = get_target_version(arg_tversion) - vpc_nodes = get_vpc_nodes() - sw_cversion = get_switch_version() - except Exception as e: - prints('\n\nError: %s' % e) - prints("Initial query failed. Ensure APICs are healthy. Ending script run.") - log.exception(e) - sys.exit() - inputs = {'username': username, 'password': password, - 'cversion': cversion, 'tversion': tversion, - 'vpc_node_ids': vpc_nodes, 'sw_cversion': sw_cversion} - metadata = { - "name": "PreupgradeCheck", - "method": "standalone script", - "datetime": ts + tz, - "script_version": str(SCRIPT_VERSION), - "cversion": str(cversion), - "tversion": str(tversion), - "sw_cversion": str(sw_cversion), - "api_only": api_only, - "total_checks": len(checks), - } - with open(META_FILE, "w") as f: - json.dump(metadata, f, indent=2) - return inputs + Result Bundle: {bundle} +""".format(bundle=bundle_loc)) + prints('==== Script Version %s FIN ====' % (SCRIPT_VERSION)) + + # puv integration needs to keep reading files from `JSON_DIR` under `DIR`. + if not no_cleanup and os.path.isdir(DIR): + log.info('Cleaning up temporary files and directories...') + shutil.rmtree(DIR) -def get_checks(api_only, debug_function): +class CheckManager: + """Central managing point of all checks. + Highlevel flows: + 1. Initialize checks + Through `intialize_check()` in the decorator `check_wrapper` for + each check, this does two things: + 1. get the mapping of check_title to check_id which is a check function name. + 2. write empty `AciResult` of each check into a JSON result file. + which is automatically done via decorator `check_wrapper`. + 2. Run checks in thread + Monitor the progress with timeout + 3. Finalize check results + When checks completed within the time limit (`self.monitor_timeout`), + `finalize_check()` is called through `check_wrapper`. + This does two things: + 1. get the mapping of `Result` to check_id + 2. update the JSON result file with the new `AciResult` + 4. Print the result to stdout + """ api_checks = [ # General Checks target_version_compatibility_check, @@ -5792,10 +6195,9 @@ def get_checks(api_only, debug_function): service_ep_flag_bd_check, ] - conn_checks = [ + ssh_checks = [ # General apic_version_md5_check, - apic_database_size_check, # Faults standby_apic_disk_space_check, @@ -5803,62 +6205,97 @@ def get_checks(api_only, debug_function): # Bugs observer_db_size_check, - apic_ca_cert_validation, - ] - if debug_function: - return [check for check in api_checks + conn_checks if check.__name__ == debug_function] - if api_only: - return api_checks - return conn_checks + api_checks - - -def run_checks(checks, inputs): - summary_headers = [PASS, FAIL_O, FAIL_UF, MANUAL, POST, NA, ERROR, 'TOTAL'] - summary = {key: 0 if key != 'TOTAL' else len(checks) for key in summary_headers} - for idx, check in enumerate(checks): - try: - r = check(idx + 1, len(checks), **inputs) - summary[r] += 1 - except KeyboardInterrupt: - prints('\n\n!!! KeyboardInterrupt !!!\n') - break - except Exception as e: - prints('') - err = 'Wrapper Error: %s' % e - print_title(err) - print_result(title=err, result=ERROR) - summary[ERROR] += 1 - logging.exception(e) + cli_checks = [ + # General + apic_database_size_check, - prints('\n=== Summary Result ===\n') - res = max(summary_headers, key=len) - max_header_len = len(res) - for key in summary_headers: - prints('{:{}} : {:2}'.format(key, max_header_len, summary[key])) + # Bugs + apic_ca_cert_validation, + ] - with open(SUMMARY_FILE, 'w') as f: - json.dump(summary, f, indent=2) + def __init__(self, api_only=False, debug_function="", timeout=600, monitor_interval=0.5): + self.api_only = api_only + self.debug_function = debug_function + self.monitor_interval = monitor_interval # sec + self.monitor_timeout = timeout # sec + self.timeout_event = None + self.check_funcs = self.get_check_funcs() -def wrapup(no_cleanup): - subprocess.check_output(['tar', '-czf', BUNDLE_NAME, DIR]) - bundle_loc = '/'.join([os.getcwd(), BUNDLE_NAME]) - prints(""" - Pre-Upgrade Check Complete. - Next Steps: Address all checks flagged as FAIL, ERROR or MANUAL CHECK REQUIRED + self.rm = ResultManager() - Result output and debug info saved to below bundle for later reference. - Attach this bundle to Cisco TAC SRs opened to address the flagged checks. - - Result Bundle: {bundle} - """.format(bundle=bundle_loc)) - prints('==== Script Version %s FIN ====' % (SCRIPT_VERSION)) + @property + def total_checks(self): + return len(self.check_funcs) - # puv integration needs to keep reading files from `JSON_DIR` under `DIR`. - if not no_cleanup and os.path.isdir(DIR): - log.info('Cleaning up temporary files and directories...') - shutil.rmtree(DIR) + @property + def check_ids(self): + return [check_func.__name__ for check_func in self.check_funcs] + + def get_check_funcs(self): + all_checks = [] + self.api_checks # must be a new list to avoid changing api_checks + if not self.api_only: + all_checks += self.ssh_checks + self.cli_checks + if self.debug_function: + return [check for check in all_checks if check.__name__ == self.debug_function] + return all_checks + + def get_check_title(self, check_id): + title = self.rm.titles.get(check_id, "") + if not title: + log.error("Failed to find title for {}".format(check_id)) + return title + + def get_check_result(self, check_id): + result_obj = self.rm.results.get(check_id) + if not result_obj: + log.error("Failed to find result for {}".format(check_id)) + return result_obj + + def get_result_summary(self): + return self.rm.get_summary() + + def initialize_check(self, check_id, check_title): + self.rm.init_result(check_id, check_title) + + def finalize_check(self, check_id, result_obj): + # We do not update the result from here in the case of timeout. + if self.timeout_event and self.timeout_event.is_set(): + return None + if not isinstance(result_obj, Result): + raise TypeError("The result of {} is not a `Result` object".format(check_id)) + return None + self.rm.update_result(check_id, result_obj) + + def finalize_check_on_thread_failure(self, check_id): + """Update the result of a check that couldn't start as ERROR""" + r = Result(result=ERROR, msg="Skipped due to a failure in starting a thread for this check.") + self.rm.update_result(check_id, r) + + def finalize_check_on_thread_timeout(self, check_id): + """Update the result of a check that couldn't finish in time as ERROR""" + msg = "Timeout. Unable to finish in time ({} sec).".format(self.monitor_timeout) + r = Result(result=ERROR, msg=msg) + self.rm.update_result(check_id, r) + + def initialize_checks(self): + for check_func in self.check_funcs: + check_func(initialize_check=self.initialize_check) + + def run_checks(self, common_data): + tm = ThreadManager( + funcs=self.check_funcs, + common_kwargs=dict({"finalize_check": self.finalize_check}, **common_data), + monitor_interval=self.monitor_interval, + monitor_timeout=self.monitor_timeout, + callback_on_monitoring=print_progress, + callback_on_start_failure=self.finalize_check_on_thread_failure, + callback_on_timeout=self.finalize_check_on_thread_timeout, + ) + self.timeout_event = tm.timeout_event + tm.start() + tm.join() def main(_args=None): @@ -5866,16 +6303,63 @@ def main(_args=None): if args.version: print(SCRIPT_VERSION) return - checks = get_checks(args.api_only, args.debug_function) + + if args.timeout == -1: + print("Timeout(sec): {}".format(DEFAULT_TIMEOUT)) + return + + cm = CheckManager(args.api_only, args.debug_function, args.timeout) + if args.total_checks: - print("Total Number of Checks: {}".format(len(checks))) + print("Total Number of Checks: {}".format(cm.total_checks)) return - initialize() - inputs = prepare(args.api_only, args.tversion, args.cversion, checks) - run_checks(checks, inputs) - wrapup(args.no_cleanup) + init_system() + + # Initialize checks with empty results + cm.initialize_checks() + + prints(' ==== %s%s, Script Version %s ====\n' % (ts, tz, SCRIPT_VERSION)) + prints('!!!! Check https://github.com/datacenter/ACI-Pre-Upgrade-Validation-Script for Latest Release !!!!\n') + + common_data = query_common_data(args.api_only, args.cversion, args.tversion) + write_script_metadata(args.api_only, args.timeout, cm.total_checks, common_data) + + cm.run_checks(common_data) + + # Print result reports + prints("\n") + if cm.timeout_event.is_set(): + prints("Timeout !!! Abort and printing the results...\n") + + prints("\n=== Check Result (failed only) ===\n") + + # Print result of each failed check + for index, check_id in enumerate(cm.check_ids): + result_obj = cm.get_check_result(check_id) + if not result_obj or result_obj.result in (NA, PASS): + continue + check_title = cm.get_check_title(check_id) + print_result(index + 1, cm.total_checks, check_title, **result_obj.as_dict()) + + # Print summary + summary = cm.get_result_summary() + prints('\n=== Summary Result ===\n') + longest_header = max(summary.keys(), key=len) + max_header_len = len(longest_header) + for key in summary: + prints('{:{}} : {:2}'.format(key, max_header_len, summary[key])) + + write_jsonfile(SUMMARY_FILE, summary) + + wrapup_system(args.no_cleanup) if __name__ == "__main__": - main() + try: + main() + except Exception as e: + msg = "Abort due to unexpected error - {}".format(e) + prints(msg) + log.error(msg, exc_info=True) + sys.exit(1) diff --git a/pytest.ini b/pytest.ini index 015777f..aac0d26 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,4 @@ [pytest] log_cli = true log_cli_level = DEBUG -log_cli_format = [%(asctime)s.%(msecs)03d %(levelname)-8s %(funcName)20s:%(lineno)-4d] %(message)s +log_cli_format = [%(asctime)s.%(msecs)03d %(levelname)-8s %(funcName)s:%(lineno)-4d(%(threadName)s)] %(message)s diff --git a/tests/apic_version_md5_check/topSystem.json b/tests/apic_version_md5_check/topSystem.json deleted file mode 100644 index 65dca38..0000000 --- a/tests/apic_version_md5_check/topSystem.json +++ /dev/null @@ -1,1023 +0,0 @@ -{ - "totalCount": "17", - "imdata": [ - { - "topSystem": { - "attributes": { - "address": "10.2.0.1", - "bootstrapState": "none", - "childAction": "", - "clusterTimeDiff": "0", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:11:04.529-07:00", - "dn": "topology/pod-1/node-1/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "1", - "inbMgmtAddr": "192.168.12.1", - "inbMgmtAddr6": "fc00::1", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-26T22:37:38.265-07:00", - "lastResetReason": "unknown", - "lcOwn": "local", - "modTs": "2025-03-26T23:17:06.831-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-APIC-1", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "192.168.100.12", - "oobMgmtAddr6": "fe80::86b2:61ff:fe91:932e", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "22", - "oobMgmtGateway": "192.168.100.1", - "oobMgmtGateway6": "fc00::ffff", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "0", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "controller", - "serial": "ABC1234DEFG", - "serverType": "unspecified", - "siteId": "0", - "state": "in-service", - "status": "", - "systemUpTime": "09:13:33:26.000", - "tepPool": "0.0.0.0", - "unicastXrEpLearnDisable": "no", - "version": "6.1(2.145a)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.0.2", - "bootstrapState": "none", - "childAction": "", - "clusterTimeDiff": "108403", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:11:09.229-07:00", - "dn": "topology/pod-1/node-2/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "2", - "inbMgmtAddr": "192.168.12.2", - "inbMgmtAddr6": "fc00::1", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-27T20:07:42.002-07:00", - "lastResetReason": "unknown", - "lcOwn": "local", - "modTs": "2025-03-27T20:37:50.245-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-APIC-2", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "192.168.100.13", - "oobMgmtAddr6": "fe80::86b2:61ff:fe70:9b3e", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "22", - "oobMgmtGateway": "192.168.100.1", - "oobMgmtGateway6": "fc00::ffff", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "0", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "controller", - "serial": "ABC1235DEFG", - "serverType": "unspecified", - "siteId": "0", - "state": "in-service", - "status": "", - "systemUpTime": "08:16:03:27.000", - "tepPool": "0.0.0.0", - "unicastXrEpLearnDisable": "no", - "version": "6.1(2.145a)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.66", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "109356", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:11:08.219-07:00", - "dn": "topology/pod-1/node-101/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "101", - "inbMgmtAddr": "192.168.12.101", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "0.0.0.0", - "lastRebootTime": "2025-04-04T12:03:47.240-07:00", - "lastResetReason": "unknown", - "lcOwn": "local", - "modTs": "2025-04-04T12:53:13.476-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-101", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlOperPodId": "1", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1236DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "01:00:07:21.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-15.3(1d)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.0.3", - "bootstrapState": "none", - "childAction": "", - "clusterTimeDiff": "166085", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:10:03.540-07:00", - "dn": "topology/pod-1/node-3/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "3", - "inbMgmtAddr": "192.168.12.3", - "inbMgmtAddr6": "fc00::1", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-19T23:01:21.686-07:00", - "lastResetReason": "unknown", - "lcOwn": "local", - "modTs": "2025-03-19T23:06:46.170-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-APIC-3", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "192.168.100.14", - "oobMgmtAddr6": "fe80::86b2:61ff:fe70:7dde", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "22", - "oobMgmtGateway": "192.168.100.1", - "oobMgmtGateway6": "fc00::ffff", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "0", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "controller", - "serial": "ABC1237DEFG", - "serverType": "unspecified", - "siteId": "0", - "state": "in-service", - "status": "", - "systemUpTime": "16:13:11:28.000", - "tepPool": "0.0.0.0", - "unicastXrEpLearnDisable": "no", - "version": "6.1(2.145a)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.96", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "167238", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:10:10.361-07:00", - "dn": "topology/pod-1/node-103/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "103", - "inbMgmtAddr": "192.168.12.103", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:36:23.357-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-27T19:21:12.187-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-103", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1238DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:47.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.64", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "155713", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:10:21.887-07:00", - "dn": "topology/pod-1/node-104/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "104", - "inbMgmtAddr": "192.168.12.104", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T01:04:51.864-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-27T19:59:43.390-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-104", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1239DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:05:30.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.99", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "41285", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:12:16.321-07:00", - "dn": "topology/pod-1/node-106/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "106", - "inbMgmtAddr": "192.168.12.106", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:40:00.676-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:52:58.843-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-106", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1240DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:32:15.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "192.168.221.45", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "160428", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:10:17.178-07:00", - "dn": "topology/pod-1/node-213/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "213", - "inbMgmtAddr": "0.0.0.0", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "0", - "inbMgmtGateway": "0.0.0.0", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:36:57.724-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:50:56.201-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "RL-213", - "nameAlias": "", - "nodeType": "remote-leaf-wan", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "yes", - "rlAutoMode": "yes", - "rlGroupId": "1", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "leaf", - "serial": "ABC1241DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:19.000", - "tepPool": "192.168.221.0/25", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.3.96.64", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "-46384", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:13:43.988-07:00", - "dn": "topology/pod-2/node-202/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.202.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "202", - "inbMgmtAddr": "192.168.12.212", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:39:48.586-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-26T13:08:21.651-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-202", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "2", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "2", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1242DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:55.000", - "tepPool": "10.3.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "192.168.221.89", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "1265729", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T11:51:51.876-07:00", - "dn": "topology/pod-1/node-212/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "212", - "inbMgmtAddr": "0.0.0.0", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "0", - "inbMgmtGateway": "0.0.0.0", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:18:05.141-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:49:25.483-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "RL-212", - "nameAlias": "", - "nodeType": "remote-leaf-wan", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "yes", - "rlAutoMode": "yes", - "rlGroupId": "1", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "leaf", - "serial": "ABC1243DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:47.000", - "tepPool": "192.168.221.0/25", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.65", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "94487", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:11:23.132-07:00", - "dn": "topology/pod-1/node-1001/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "1001", - "inbMgmtAddr": "192.168.12.201", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "32", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:38:20.689-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:52:16.469-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Spine-1001", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "spine", - "serial": "ABC1244DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:03.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "no", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "192.168.221.87", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "518949", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:04:18.655-07:00", - "dn": "topology/pod-1/node-214/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "214", - "inbMgmtAddr": "0.0.0.0", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "0", - "inbMgmtGateway": "0.0.0.0", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:31:20.546-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:52:15.931-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "RL-214", - "nameAlias": "", - "nodeType": "remote-leaf-wan", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "yes", - "rlAutoMode": "yes", - "rlGroupId": "1", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "leaf", - "serial": "ABC1245DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:32:58.000", - "tepPool": "192.168.221.0/25", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.3.32.65", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "393685", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:06:23.918-07:00", - "dn": "topology/pod-2/node-201/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.202.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "201", - "inbMgmtAddr": "192.168.12.211", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T01:01:52.297-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T01:22:03.406-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-201", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "2", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "2", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1246DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:04:31.000", - "tepPool": "10.3.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.97", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "67158", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:11:50.428-07:00", - "dn": "topology/pod-1/node-1002/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "1002", - "inbMgmtAddr": "192.168.12.202", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "32", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-11T12:54:41.632-07:00", - "lastResetReason": "reload", - "lcOwn": "local", - "modTs": "2025-03-11T13:12:33.668-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Spine-1002", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "spine", - "serial": "ABC1247DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "24:23:17:08.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "no", - "version": "n9000-16.1(2.145)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.3.32.64", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "532782", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:04:04.823-07:00", - "dn": "topology/pod-2/node-2001/sys", - "enforceSubnetCheck": "no", - "etepAddr": "0.0.0.0", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "2001", - "inbMgmtAddr": "0.0.0.0", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "0", - "inbMgmtGateway": "0.0.0.0", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:30:53.399-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:50:33.631-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Spine-2001", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "2", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "2", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "spine", - "serial": "ABC1248DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:11.000", - "tepPool": "10.3.0.0/16", - "unicastXrEpLearnDisable": "no", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.2.160.98", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "179134", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:09:58.511-07:00", - "dn": "topology/pod-1/node-102/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "102", - "inbMgmtAddr": "192.168.12.102", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "24", - "inbMgmtGateway": "192.168.12.254", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:36:18.789-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:51:25.528-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "S2-Leaf-102", - "nameAlias": "", - "nodeType": "unspecified", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "no", - "rlAutoMode": "no", - "rlGroupId": "0", - "rlOperPodId": "1", - "rlRoutableMode": "no", - "rldirectMode": "no", - "role": "leaf", - "serial": "ABC1249DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:40.000", - "tepPool": "10.2.0.0/16", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "192.168.221.91", - "bootstrapState": "done", - "childAction": "", - "clusterTimeDiff": "-529288", - "configIssues": "", - "controlPlaneMTU": "1400", - "currentTime": "2025-04-05T12:21:46.934-07:00", - "dn": "topology/pod-1/node-211/sys", - "enforceSubnetCheck": "no", - "etepAddr": "192.168.201.1", - "fabricDomain": "S2-Fabric", - "fabricId": "1", - "fabricMAC": "00:22:BD:F8:19:FF", - "id": "211", - "inbMgmtAddr": "0.0.0.0", - "inbMgmtAddr6": "::", - "inbMgmtAddr6Mask": "0", - "inbMgmtAddrMask": "0", - "inbMgmtGateway": "0.0.0.0", - "inbMgmtGateway6": "::", - "lastRebootTime": "2025-03-20T00:47:47.935-07:00", - "lastResetReason": "installer", - "lcOwn": "local", - "modTs": "2025-03-20T00:52:13.287-07:00", - "mode": "unspecified", - "monPolDn": "uni/fabric/monfab-default", - "name": "RL-211", - "nameAlias": "", - "nodeType": "remote-leaf-wan", - "oobMgmtAddr": "0.0.0.0", - "oobMgmtAddr6": "::", - "oobMgmtAddr6Mask": "0", - "oobMgmtAddrMask": "0", - "oobMgmtGateway": "0.0.0.0", - "oobMgmtGateway6": "::", - "podId": "1", - "remoteNetworkId": "0", - "remoteNode": "yes", - "rlAutoMode": "yes", - "rlGroupId": "1", - "rlOperPodId": "1", - "rlRoutableMode": "yes", - "rldirectMode": "yes", - "role": "leaf", - "serial": "ABC1250DEFG", - "serverType": "unspecified", - "siteId": "2", - "state": "in-service", - "status": "", - "systemUpTime": "16:11:33:59.000", - "tepPool": "192.168.221.0/25", - "unicastXrEpLearnDisable": "yes", - "version": "n9000-16.1(2.158)", - "virtualMode": "no" - } - } - } - ] -} diff --git a/tests/access_untagged_check/faultInst_NEG.json b/tests/checks/access_untagged_check/faultInst_NEG.json similarity index 100% rename from tests/access_untagged_check/faultInst_NEG.json rename to tests/checks/access_untagged_check/faultInst_NEG.json diff --git a/tests/access_untagged_check/faultInst_POS.json b/tests/checks/access_untagged_check/faultInst_POS.json similarity index 100% rename from tests/access_untagged_check/faultInst_POS.json rename to tests/checks/access_untagged_check/faultInst_POS.json diff --git a/tests/access_untagged_check/test_access_untagged_check.py b/tests/checks/access_untagged_check/test_access_untagged_check.py similarity index 80% rename from tests/access_untagged_check/test_access_untagged_check.py rename to tests/checks/access_untagged_check/test_access_untagged_check.py index 21851c7..f7a6d61 100644 --- a/tests/access_untagged_check/test_access_untagged_check.py +++ b/tests/checks/access_untagged_check/test_access_untagged_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "access_untagged_check" # icurl queries faultInsts = 'faultInst.json?&query-target-filter=wcard(faultInst.changeSet,"native-or-untagged-encap-failure")' @@ -27,6 +28,6 @@ ) ], ) -def test_logic(mock_icurl,expected_result): - result = script.access_untagged_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/aes_encryption_check/exportcryptkey.json b/tests/checks/aes_encryption_check/exportcryptkey.json similarity index 100% rename from tests/aes_encryption_check/exportcryptkey.json rename to tests/checks/aes_encryption_check/exportcryptkey.json diff --git a/tests/aes_encryption_check/exportcryptkey_disabled.json b/tests/checks/aes_encryption_check/exportcryptkey_disabled.json similarity index 100% rename from tests/aes_encryption_check/exportcryptkey_disabled.json rename to tests/checks/aes_encryption_check/exportcryptkey_disabled.json diff --git a/tests/aes_encryption_check/test_aes_encryption_check.py b/tests/checks/aes_encryption_check/test_aes_encryption_check.py similarity index 87% rename from tests/aes_encryption_check/test_aes_encryption_check.py rename to tests/checks/aes_encryption_check/test_aes_encryption_check.py index acd6e06..440a860 100644 --- a/tests/aes_encryption_check/test_aes_encryption_check.py +++ b/tests/checks/aes_encryption_check/test_aes_encryption_check.py @@ -9,6 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "aes_encryption_check" + # icurl queries exportcryptkey = "uni/exportcryptkey.json" @@ -55,6 +57,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.aes_encryption_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/apic_ca_cert_validation/NEG_certreq.txt b/tests/checks/apic_ca_cert_validation/NEG_certreq.txt similarity index 100% rename from tests/apic_ca_cert_validation/NEG_certreq.txt rename to tests/checks/apic_ca_cert_validation/NEG_certreq.txt diff --git a/tests/apic_ca_cert_validation/POS_certreq.txt b/tests/checks/apic_ca_cert_validation/POS_certreq.txt similarity index 100% rename from tests/apic_ca_cert_validation/POS_certreq.txt rename to tests/checks/apic_ca_cert_validation/POS_certreq.txt diff --git a/tests/apic_ca_cert_validation/test_apic_ca_cert_validation.py b/tests/checks/apic_ca_cert_validation/test_apic_ca_cert_validation.py similarity index 69% rename from tests/apic_ca_cert_validation/test_apic_ca_cert_validation.py rename to tests/checks/apic_ca_cert_validation/test_apic_ca_cert_validation.py index d76d955..338a66c 100644 --- a/tests/apic_ca_cert_validation/test_apic_ca_cert_validation.py +++ b/tests/checks/apic_ca_cert_validation/test_apic_ca_cert_validation.py @@ -8,6 +8,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "apic_ca_cert_validation" + @pytest.mark.parametrize( "certreq_out_file, expected_result", @@ -24,9 +26,9 @@ ), ], ) -def test_logic(certreq_out_file, expected_result): - data_path = os.path.join("tests", dir, certreq_out_file) +def test_logic(run_check, certreq_out_file, expected_result): + data_path = os.path.join("tests", "checks", dir, certreq_out_file) with open(data_path, "r") as file: certreq_out = file.read() - result = script.apic_ca_cert_validation(1, 1, certreq_out=certreq_out) - assert result == expected_result + result = run_check(certreq_out=certreq_out) + assert result.result == expected_result diff --git a/tests/apic_database_size_check/infraWiNode_3.json b/tests/checks/apic_database_size_check/infraWiNode_3.json similarity index 100% rename from tests/apic_database_size_check/infraWiNode_3.json rename to tests/checks/apic_database_size_check/infraWiNode_3.json diff --git a/tests/apic_database_size_check/infraWiNode_4.json b/tests/checks/apic_database_size_check/infraWiNode_4.json similarity index 100% rename from tests/apic_database_size_check/infraWiNode_4.json rename to tests/checks/apic_database_size_check/infraWiNode_4.json diff --git a/tests/apic_database_size_check/test_apic_database_size_check.py b/tests/checks/apic_database_size_check/test_apic_database_size_check.py similarity index 96% rename from tests/apic_database_size_check/test_apic_database_size_check.py rename to tests/checks/apic_database_size_check/test_apic_database_size_check.py index 73eb62c..8a651bc 100644 --- a/tests/apic_database_size_check/test_apic_database_size_check.py +++ b/tests/checks/apic_database_size_check/test_apic_database_size_check.py @@ -2,7 +2,6 @@ import pytest import logging import importlib -import json from helpers.utils import read_data script = importlib.import_module("aci-preupgrade-validation-script") @@ -10,6 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "apic_database_size_check" + apic_node_api = 'infraWiNode.json' apic1_pm_cat = "cat /debug/apic1/policymgr/mitmocounters/mo | grep -v ALL | sort -rn -k3" @@ -166,6 +167,7 @@ ] }""" + @pytest.mark.parametrize( "icurl_outputs, cmd_outputs, cversion, expected_result", [ @@ -292,10 +294,11 @@ ), ], ) -def test_logic(mock_icurl, mock_run_cmd, cversion, expected_result): - cver = script.AciVersion(cversion) if cversion else None - result = script.apic_database_size_check(1, 1, cver) - assert result == expected_result +def test_logic(run_check, mock_icurl, mock_run_cmd, cversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion) if cversion else None + ) + assert result.result == expected_result @pytest.mark.parametrize( @@ -326,7 +329,8 @@ def test_logic(mock_icurl, mock_run_cmd, cversion, expected_result): ), ], ) -def test_permission_logic(mock_icurl, mock_run_cmd, cversion, expected_result): - cver = script.AciVersion(cversion) if cversion else None - result = script.apic_database_size_check(1, 1, cver) - assert result == expected_result +def test_permission_logic(run_check, mock_icurl, mock_run_cmd, cversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion) if cversion else None + ) + assert result.result == expected_result diff --git a/tests/checks/apic_ssd_check/fabricNode.json b/tests/checks/apic_ssd_check/fabricNode.json new file mode 100644 index 0000000..962a4ad --- /dev/null +++ b/tests/checks/apic_ssd_check/fabricNode.json @@ -0,0 +1,93 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/apic_ssd_check/fabricNode_no_apic.json b/tests/checks/apic_ssd_check/fabricNode_no_apic.json new file mode 100644 index 0000000..254f40d --- /dev/null +++ b/tests/checks/apic_ssd_check/fabricNode_no_apic.json @@ -0,0 +1,48 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/apic_ssd_check/fabricNode_old.json b/tests/checks/apic_ssd_check/fabricNode_old.json new file mode 100644 index 0000000..f71fb9f --- /dev/null +++ b/tests/checks/apic_ssd_check/fabricNode_old.json @@ -0,0 +1,62 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "1", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic1", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "2", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic2", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-3", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "3", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic3", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "nodeType": "unspecified", + "id": "101", + "version": "", + "role": "leaf", + "adSt": "on", + "name": "leaf1", + "model": "N9K-C9396PX" + } + } + } +] diff --git a/tests/checks/apic_ssd_check/fault_F2731.json b/tests/checks/apic_ssd_check/fault_F2731.json new file mode 100644 index 0000000..578f682 --- /dev/null +++ b/tests/checks/apic_ssd_check/fault_F2731.json @@ -0,0 +1,12 @@ +[ + { + "faultInst": { + "attributes": { + "code": "F2731", + "changeSet": "mediaWearout (Old: 2, New: 1)", + "description": "Storage unit /dev/sdb on Node 3 mounted at /dev/sdb has 1% life remaining", + "dn": "topology/pod-2/node-3/sys/ch/p-[/dev/sdb]-f-[/dev/sdb]/fault-F2731" + } + } + } +] diff --git a/tests/checks/apic_ssd_check/infraWiNode_apic1.json b/tests/checks/apic_ssd_check/infraWiNode_apic1.json new file mode 100644 index 0000000..b6626d0 --- /dev/null +++ b/tests/checks/apic_ssd_check/infraWiNode_apic1.json @@ -0,0 +1,62 @@ +[ + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.1", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-1", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "1", + "mbSn": "FCH1234ABCD", + "name": "", + "nodeName": "apic1", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.2", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-2", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "2", + "mbSn": "FCH1235ABCD", + "name": "", + "nodeName": "apic2", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.3", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-3", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "3", + "mbSn": "FCH1236ABCD", + "name": "", + "nodeName": "apic3", + "operSt": "available", + "podId": "1", + "targetMbSn": "" + } + } + } +] diff --git a/tests/checks/apic_ssd_check/test_apic_ssd_check.py b/tests/checks/apic_ssd_check/test_apic_ssd_check.py new file mode 100644 index 0000000..b0cc678 --- /dev/null +++ b/tests/checks/apic_ssd_check/test_apic_ssd_check.py @@ -0,0 +1,214 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "apic_ssd_check" + + +faultInst = 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F2731"),eq(faultInst.code,"F2732"))' +infraWiNode = "topology/pod-1/node-1/infraWiNode.json" + +apic_ips = [ + node["fabricNode"]["attributes"]["address"] + for node in read_data(dir, "fabricNode.json") + if node["fabricNode"]["attributes"]["role"] == "controller" +] + +grep_cmd = 'grep -oE "SSD Wearout Indicator is [0-9]+" /var/log/dme/log/svc_ifc_ae.bin.log | tail -1' +grep_output_hit = "5504||2023-01-11T22:11:26.851446656+00:00||ifc_ae||DBG4||fn=[getWearout]||SSD Wearout Indicator is 4||../svc/ae/src/gen/ifc/beh/imp/./eqpt/StorageBI.cc||395" +grep_output_no_hit = "5504||2023-01-11T22:11:26.851446656+00:00||ifc_ae||DBG4||fn=[getWearout]||SSD Wearout Indicator is 5||../svc/ae/src/gen/ifc/beh/imp/./eqpt/StorageBI.cc||395" + + +@pytest.mark.parametrize( + "icurl_outputs, conn_failure, conn_cmds, cversion, fabric_nodes, expected_result, expected_data", + [ + # New Versions, F273x are effective and raised + ( + {faultInst: read_data(dir, "fault_F2731.json")}, + False, + [], + "4.2(7w)", + read_data(dir, "fabricNode.json"), + script.FAIL_UF, + [["3", "apic3", "/dev/sdb", "<5% (Fault F2731)", "Contact TAC for replacement"]], + ), + ( + {faultInst: read_data(dir, "fault_F2731.json")}, + False, + [], + "5.2(1h)", + read_data(dir, "fabricNode.json"), + script.FAIL_UF, + [["3", "apic3", "/dev/sdb", "<5% (Fault F2731)", "Contact TAC for replacement"]], + ), + # New Versions, F273x are effective and NOT raised + ( + {faultInst: []}, + False, + [], + "4.2(7w)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + ( + {faultInst: []}, + False, + [], + "5.2(1h)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # Old Versions, but F273x was still raised. + ( + {faultInst: read_data(dir, "fault_F2731.json")}, + False, + [], + "4.2(6o)", + read_data(dir, "fabricNode.json"), + script.FAIL_UF, + [["3", "apic3", "/dev/sdb", "<5% (Fault F2731)", "Contact TAC for replacement"]], + ), + + # --- Old Versions, no F273x was raised. --- + + # No fabricNode for APICs + ( + {faultInst: []}, + False, + [], + "4.2(6o)", + read_data(dir, "fabricNode_no_apic.json"), + script.ERROR, + [], + ), + # Exception failure at the very first connection() + ( + {faultInst: []}, + True, + [], + "4.2(6o)", + read_data(dir, "fabricNode.json"), + script.ERROR, + [ + ["1", "apic1", "-", "-", "Simulated exception at connect()"], + ["2", "apic2", "-", "-", "Simulated exception at connect()"], + ["3", "apic3", "-", "-", "Simulated exception at connect()"], + ], + ), + # Exception failure at the grep command + ( + {faultInst: []}, + False, + { + apic_ip: [ + { + "cmd": grep_cmd, + "output": "", + "exception": Exception("Simulated exception at `grep` command"), + } + ] + for apic_ip in apic_ips + }, + "4.2(6o)", + read_data(dir, "fabricNode.json"), + script.ERROR, + [ + ["1", "apic1", "-", "-", "Simulated exception at `grep` command"], + ["2", "apic2", "-", "-", "Simulated exception at `grep` command"], + ["3", "apic3", "-", "-", "Simulated exception at `grep` command"], + ], + ), + # SSD Wearout Indicator is less than 5 + ( + {faultInst: []}, + False, + { + apic_ips[0]: [ + { + "cmd": grep_cmd, + "output": "\n".join([grep_cmd, grep_output_hit]), + "exception": None, + }, + ], + apic_ips[1]: [ + { + "cmd": grep_cmd, + "output": "\n".join([grep_cmd, grep_output_no_hit]), + "exception": None, + }, + ], + apic_ips[2]: [ + { + "cmd": grep_cmd, + "output": "\n".join([grep_cmd, grep_output_hit]), + "exception": None, + }, + ], + }, + "4.2(6o)", + read_data(dir, "fabricNode.json"), + script.FAIL_UF, + [ + ["1", "apic1", "Solid State Disk", "4", "Contact TAC for replacement"], + ["2", "apic2", "Solid State Disk", "5", "No Action Required"], + ["3", "apic3", "Solid State Disk", "4", "Contact TAC for replacement"], + ], + ), + # Pass + ( + {faultInst: []}, + False, + { + apic_ip: [ + { + "cmd": grep_cmd, + "output": "\n".join([grep_cmd, grep_output_no_hit]), + "exception": None, + }, + ] + for apic_ip in apic_ips + }, + "4.2(6o)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # Pass (pre-4.0 with infraWiNode) + ( + {faultInst: [], infraWiNode: read_data(dir, "infraWiNode_apic1.json")}, + False, + { + apic_ip: [ + { + "cmd": grep_cmd, + "output": "\n".join([grep_cmd, grep_output_no_hit]), + "exception": None, + }, + ] + for apic_ip in apic_ips + }, + "4.2(6o)", + read_data(dir, "fabricNode_old.json"), + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, mock_conn, cversion, fabric_nodes, expected_result, expected_data): + result = run_check( + cversion=script.AciVersion(cversion), + username="fake_username", + password="fake_password", + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/apic_version_md5_check/fabricNode.json b/tests/checks/apic_version_md5_check/fabricNode.json new file mode 100644 index 0000000..962a4ad --- /dev/null +++ b/tests/checks/apic_version_md5_check/fabricNode.json @@ -0,0 +1,93 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/apic_version_md5_check/fabricNode_no_apic.json b/tests/checks/apic_version_md5_check/fabricNode_no_apic.json new file mode 100644 index 0000000..254f40d --- /dev/null +++ b/tests/checks/apic_version_md5_check/fabricNode_no_apic.json @@ -0,0 +1,48 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/apic_version_md5_check/fabricNode_old.json b/tests/checks/apic_version_md5_check/fabricNode_old.json new file mode 100644 index 0000000..f71fb9f --- /dev/null +++ b/tests/checks/apic_version_md5_check/fabricNode_old.json @@ -0,0 +1,62 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "1", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic1", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "2", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic2", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-3", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "3", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic3", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "nodeType": "unspecified", + "id": "101", + "version": "", + "role": "leaf", + "adSt": "on", + "name": "leaf1", + "model": "N9K-C9396PX" + } + } + } +] diff --git a/tests/apic_version_md5_check/firmwareFirmware_6.0.5h.json b/tests/checks/apic_version_md5_check/firmwareFirmware_6.0.5h.json similarity index 100% rename from tests/apic_version_md5_check/firmwareFirmware_6.0.5h.json rename to tests/checks/apic_version_md5_check/firmwareFirmware_6.0.5h.json diff --git a/tests/apic_version_md5_check/firmwareFirmware_6.0.5h_image_sign_fail.json b/tests/checks/apic_version_md5_check/firmwareFirmware_6.0.5h_image_sign_fail.json similarity index 100% rename from tests/apic_version_md5_check/firmwareFirmware_6.0.5h_image_sign_fail.json rename to tests/checks/apic_version_md5_check/firmwareFirmware_6.0.5h_image_sign_fail.json diff --git a/tests/checks/apic_version_md5_check/infraWiNode_apic1.json b/tests/checks/apic_version_md5_check/infraWiNode_apic1.json new file mode 100644 index 0000000..b6626d0 --- /dev/null +++ b/tests/checks/apic_version_md5_check/infraWiNode_apic1.json @@ -0,0 +1,62 @@ +[ + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.1", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-1", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "1", + "mbSn": "FCH1234ABCD", + "name": "", + "nodeName": "apic1", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.2", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-2", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "2", + "mbSn": "FCH1235ABCD", + "name": "", + "nodeName": "apic2", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.3", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-3", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "3", + "mbSn": "FCH1236ABCD", + "name": "", + "nodeName": "apic3", + "operSt": "available", + "podId": "1", + "targetMbSn": "" + } + } + } +] diff --git a/tests/apic_version_md5_check/test_apic_version_md5_check.py b/tests/checks/apic_version_md5_check/test_apic_version_md5_check.py similarity index 56% rename from tests/apic_version_md5_check/test_apic_version_md5_check.py rename to tests/checks/apic_version_md5_check/test_apic_version_md5_check.py index 19ea263..32605ad 100644 --- a/tests/apic_version_md5_check/test_apic_version_md5_check.py +++ b/tests/checks/apic_version_md5_check/test_apic_version_md5_check.py @@ -9,14 +9,16 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "apic_version_md5_check" + + api_firmware = "fwrepo/fw-aci-apic-dk9.6.0.5h.json" -api_topSystem = "topSystem.json" +api_infraWiNode = "topology/pod-1/node-1/infraWiNode.json" -topSystems = read_data(dir, "topSystem.json") apic_ips = [ - mo["topSystem"]["attributes"]["address"] - for mo in topSystems["imdata"] - if mo["topSystem"]["attributes"]["role"] == "controller" + node["fabricNode"]["attributes"]["address"] + for node in read_data(dir, "fabricNode.json") + if node["fabricNode"]["attributes"]["role"] == "controller" ] ls_cmd = "ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin" @@ -49,38 +51,47 @@ @pytest.mark.parametrize( - "icurl_outputs, conn_failure, conn_cmds, tversion, expected_result", + "icurl_outputs, conn_failure, conn_cmds, tversion, fabric_nodes, expected_result, expected_data", [ # tversion missing - ({}, False, [], None, script.MANUAL), + ({}, False, [], None, read_data(dir, "fabricNode.json"), script.MANUAL, []), # Image signing failure shown in firmwareFirmware ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h_image_sign_fail.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h_image_sign_fail.json")}, False, [], "6.0(5h)", + read_data(dir, "fabricNode.json"), script.FAIL_UF, + [["All", "-", "6.0(5h)", "d5afca58fce2018495d068c000000000", "Target image is corrupted"]], + ), + # No fabricNode for APICs + ( + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, + False, + [], + "6.0(5h)", + read_data(dir, "fabricNode_no_apic.json"), + script.ERROR, + [], ), # Exception failure at the very first connection() ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, True, [], "6.0(5h)", + read_data(dir, "fabricNode.json"), script.ERROR, + [ + ["1", "apic1", "-", "-", "Simulated exception at connect()"], + ["2", "apic2", "-", "-", "Simulated exception at connect()"], + ["3", "apic3", "-", "-", "Simulated exception at connect()"], + ], ), # Exception failure at the ls command ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ip: [ @@ -93,14 +104,17 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.ERROR, + [ + ["1", "apic1", "-", "-", "ls command via ssh failed due to:Simulated exception at `ls` command"], + ["2", "apic2", "-", "-", "ls command via ssh failed due to:Simulated exception at `ls` command"], + ["3", "apic3", "-", "-", "ls command via ssh failed due to:Simulated exception at `ls` command"], + ], ), # No such file output from the ls command ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ip: [ @@ -113,14 +127,17 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.FAIL_UF, + [ + ["1", "apic1", "6.0(5h)", "-", "image not found"], + ["2", "apic2", "6.0(5h)", "-", "image not found"], + ["3", "apic3", "6.0(5h)", "-", "image not found"], + ], ), # Exception failure at the cat command ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ip: [ @@ -138,14 +155,17 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.ERROR, + [ + ["1", "apic1", "6.0(5h)", "-", "failed to check md5sum via ssh due to:Simulated exception at `cat` command"], + ["2", "apic2", "6.0(5h)", "-", "failed to check md5sum via ssh due to:Simulated exception at `cat` command"], + ["3", "apic3", "6.0(5h)", "-", "failed to check md5sum via ssh due to:Simulated exception at `cat` command"], + ], ), # No such file output from the cat command ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ip: [ @@ -163,14 +183,17 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.FAIL_UF, + [ + ["1", "apic1", "6.0(5h)", "-", "md5sum file not found"], + ["2", "apic2", "6.0(5h)", "-", "md5sum file not found"], + ["3", "apic3", "6.0(5h)", "-", "md5sum file not found"], + ], ), # Unexpected output from the cat command ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ip: [ @@ -188,14 +211,17 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.ERROR, + [ + ["1", "apic1", "6.0(5h)", "-", "unexpected output when checking md5sum file"], + ["2", "apic2", "6.0(5h)", "-", "unexpected output when checking md5sum file"], + ["3", "apic3", "6.0(5h)", "-", "unexpected output when checking md5sum file"], + ], ), # Failure because md5sum on each APIC do not match ( - { - api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), - }, + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, False, { apic_ips[0]: [ @@ -236,13 +262,43 @@ ], }, "6.0(5h)", + read_data(dir, "fabricNode.json"), script.FAIL_UF, + [ + ["1", "apic1", "6.0(5h)", "d5afca58fce2018495d068c44eb4a547", "md5sum do not match on all APICs"], + ["2", "apic2", "6.0(5h)", "d5afca58fce2018495d068c000000000", "md5sum do not match on all APICs"], + ["3", "apic3", "6.0(5h)", "d5afca58fce2018495d068c44eb4a547", "md5sum do not match on all APICs"], + ], ), # Pass + ( + {api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json")}, + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "\n".join([ls_cmd, ls_output]), + "exception": None, + }, + { + "cmd": cat_cmd, + "output": "\n".join([cat_cmd, cat_output]), + "exception": None, + }, + ] + for apic_ip in apic_ips + }, + "6.0(5h)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # Pass (pre-4.0 with infraWiNode) ( { api_firmware: read_data(dir, "firmwareFirmware_6.0.5h.json"), - api_topSystem: read_data(dir, "topSystem.json"), + api_infraWiNode: read_data(dir, "infraWiNode_apic1.json"), }, False, { @@ -261,11 +317,18 @@ for apic_ip in apic_ips }, "6.0(5h)", + read_data(dir, "fabricNode_old.json"), script.PASS, + [], ), ], ) -def test_logic(mock_icurl, mock_conn, tversion, expected_result): - tver = script.AciVersion(tversion) if tversion else None - result = script.apic_version_md5_check(1, 1, tver, "fake_username", "fake_password") - assert result == expected_result +def test_logic(run_check, mock_icurl, mock_conn, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + username="fake_username", + password="fake_password", + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/ave_eol_check/test_ave_eol_check.py b/tests/checks/ave_eol_check/test_ave_eol_check.py similarity index 81% rename from tests/ave_eol_check/test_ave_eol_check.py rename to tests/checks/ave_eol_check/test_ave_eol_check.py index 38db438..0bf67c0 100644 --- a/tests/ave_eol_check/test_ave_eol_check.py +++ b/tests/checks/ave_eol_check/test_ave_eol_check.py @@ -9,12 +9,13 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "ave_eol_check" # icurl queries - ave_api = 'vmmDomP.json' ave_api += '?query-target-filter=eq(vmmDomP.enableAVE,"true")' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -44,6 +45,8 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.ave_eol_check(1, 1, script.AciVersion(tversion) if tversion else None) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result diff --git a/tests/ave_eol_check/vmmDomP_POS.json b/tests/checks/ave_eol_check/vmmDomP_POS.json similarity index 100% rename from tests/ave_eol_check/vmmDomP_POS.json rename to tests/checks/ave_eol_check/vmmDomP_POS.json diff --git a/tests/bgp_golf_route_target_type_check/fvCtx_pos.json b/tests/checks/bgp_golf_route_target_type_check/fvCtx_pos.json similarity index 100% rename from tests/bgp_golf_route_target_type_check/fvCtx_pos.json rename to tests/checks/bgp_golf_route_target_type_check/fvCtx_pos.json diff --git a/tests/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py b/tests/checks/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py similarity index 83% rename from tests/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py rename to tests/checks/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py index 2fc2e58..56e7145 100644 --- a/tests/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py +++ b/tests/checks/bgp_golf_route_target_type_check/test_bgp_golf_route_target_type_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "bgp_golf_route_target_type_check" # icurl queries fvCtxs = 'fvCtx.json?rsp-subtree=full&rsp-subtree-class=l3extGlobalCtxName,bgpRtTarget&rsp-subtree-include=required' @@ -61,11 +62,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.bgp_golf_route_target_type_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_neg.json b/tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_neg.json similarity index 100% rename from tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_neg.json rename to tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_neg.json diff --git a/tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos.json b/tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos.json similarity index 100% rename from tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos.json rename to tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos.json diff --git a/tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos1.json b/tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos1.json similarity index 100% rename from tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos1.json rename to tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos1.json diff --git a/tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos2.json b/tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos2.json similarity index 100% rename from tests/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos2.json rename to tests/checks/bgp_peer_loopback_check/l3extRsNodeL3OutAtt_pos2.json diff --git a/tests/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py b/tests/checks/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py similarity index 87% rename from tests/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py rename to tests/checks/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py index a9e5d36..d08522e 100644 --- a/tests/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py +++ b/tests/checks/bgp_peer_loopback_check/test_bgp_peer_loopback_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "bgp_peer_loopback_check" # icurl queries l3extLNodePs = "l3extLNodeP.json?rsp-subtree=full&rsp-subtree-class=bgpPeerP,l3extRsNodeL3OutAtt,l3extLoopBackIfP" @@ -39,6 +40,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.bgp_peer_loopback_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/cimc_compatibilty_check/compatRsSuppHw_605_L2.json b/tests/checks/cimc_compatibilty_check/compatRsSuppHw_605_L2.json similarity index 100% rename from tests/cimc_compatibilty_check/compatRsSuppHw_605_L2.json rename to tests/checks/cimc_compatibilty_check/compatRsSuppHw_605_L2.json diff --git a/tests/cimc_compatibilty_check/compatRsSuppHw_605_M1.json b/tests/checks/cimc_compatibilty_check/compatRsSuppHw_605_M1.json similarity index 100% rename from tests/cimc_compatibilty_check/compatRsSuppHw_605_M1.json rename to tests/checks/cimc_compatibilty_check/compatRsSuppHw_605_M1.json diff --git a/tests/cimc_compatibilty_check/compatRsSuppHw_empty.json b/tests/checks/cimc_compatibilty_check/compatRsSuppHw_empty.json similarity index 100% rename from tests/cimc_compatibilty_check/compatRsSuppHw_empty.json rename to tests/checks/cimc_compatibilty_check/compatRsSuppHw_empty.json diff --git a/tests/cimc_compatibilty_check/eqptCh_newver.json b/tests/checks/cimc_compatibilty_check/eqptCh_newver.json similarity index 100% rename from tests/cimc_compatibilty_check/eqptCh_newver.json rename to tests/checks/cimc_compatibilty_check/eqptCh_newver.json diff --git a/tests/cimc_compatibilty_check/eqptCh_oldver.json b/tests/checks/cimc_compatibilty_check/eqptCh_oldver.json similarity index 100% rename from tests/cimc_compatibilty_check/eqptCh_oldver.json rename to tests/checks/cimc_compatibilty_check/eqptCh_oldver.json diff --git a/tests/cimc_compatibilty_check/eqptCh_reallyoldver.json b/tests/checks/cimc_compatibilty_check/eqptCh_reallyoldver.json similarity index 100% rename from tests/cimc_compatibilty_check/eqptCh_reallyoldver.json rename to tests/checks/cimc_compatibilty_check/eqptCh_reallyoldver.json diff --git a/tests/cimc_compatibilty_check/test_cimc_compatibilty_check.py b/tests/checks/cimc_compatibilty_check/test_cimc_compatibilty_check.py similarity index 60% rename from tests/cimc_compatibilty_check/test_cimc_compatibilty_check.py rename to tests/checks/cimc_compatibilty_check/test_cimc_compatibilty_check.py index 9ce8af6..cb587fb 100644 --- a/tests/cimc_compatibilty_check/test_cimc_compatibilty_check.py +++ b/tests/checks/cimc_compatibilty_check/test_cimc_compatibilty_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "cimc_compatibilty_check" # icurl queries eqptCh_api = 'eqptCh.json?query-target-filter=wcard(eqptCh.descr,"APIC")' @@ -16,40 +17,41 @@ compatRsSuppHwL2_api = 'uni/fabric/compcat-default/ctlrfw-apic-6.0(5)/rssuppHw-[uni/fabric/compcat-default/ctlrhw-apicl2].json' compatRsSuppHwM1_api = 'uni/fabric/compcat-default/ctlrfw-apic-6.0(5)/rssuppHw-[uni/fabric/compcat-default/ctlrhw-apicm1].json' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ ( {eqptCh_api: read_data(dir, "eqptCh_reallyoldver.json"), - compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), - compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, + compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), + compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, "6.0(5a)", script.FAIL_UF, ), ( {eqptCh_api: read_data(dir, "eqptCh_oldver.json"), - compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), - compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, + compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), + compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, "6.0(5a)", script.FAIL_UF, ), ( {eqptCh_api: read_data(dir, "eqptCh_newver.json"), - compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), - compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, + compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), + compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_605_M1.json")}, "6.0(5a)", script.PASS, ), # Seen in QA testing where version + model does not have catalog entry ( {eqptCh_api: read_data(dir, "eqptCh_newver.json"), - compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), - compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_empty.json")}, + compatRsSuppHwL2_api: read_data(dir, "compatRsSuppHw_605_L2.json"), + compatRsSuppHwM1_api: read_data(dir, "compatRsSuppHw_empty.json")}, "6.0(5a)", script.MANUAL, ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.cimc_compatibilty_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/clock_signal_component_failure_check/eqptFC_NEG.json b/tests/checks/clock_signal_component_failure_check/eqptFC_NEG.json similarity index 100% rename from tests/clock_signal_component_failure_check/eqptFC_NEG.json rename to tests/checks/clock_signal_component_failure_check/eqptFC_NEG.json diff --git a/tests/clock_signal_component_failure_check/eqptFC_POS.json b/tests/checks/clock_signal_component_failure_check/eqptFC_POS.json similarity index 100% rename from tests/clock_signal_component_failure_check/eqptFC_POS.json rename to tests/checks/clock_signal_component_failure_check/eqptFC_POS.json diff --git a/tests/clock_signal_component_failure_check/eqptLC_NEG.json b/tests/checks/clock_signal_component_failure_check/eqptLC_NEG.json similarity index 100% rename from tests/clock_signal_component_failure_check/eqptLC_NEG.json rename to tests/checks/clock_signal_component_failure_check/eqptLC_NEG.json diff --git a/tests/clock_signal_component_failure_check/eqptLC_POS.json b/tests/checks/clock_signal_component_failure_check/eqptLC_POS.json similarity index 100% rename from tests/clock_signal_component_failure_check/eqptLC_POS.json rename to tests/checks/clock_signal_component_failure_check/eqptLC_POS.json diff --git a/tests/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py b/tests/checks/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py similarity index 88% rename from tests/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py rename to tests/checks/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py index bf1b668..a1b017e 100644 --- a/tests/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py +++ b/tests/checks/clock_signal_component_failure_check/test_clock_signal_component_failure_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "clock_signal_component_failure_check" eqptFC_api = 'eqptFC.json' eqptFC_api += '?query-target-filter=or(eq(eqptFC.model,"N9K-C9504-FM-E"),eq(eqptFC.model,"N9K-C9508-FM-E"))' @@ -52,6 +53,6 @@ ) ], ) -def test_logic(mock_icurl,expected_result): - result = script.clock_signal_component_failure_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_err.json b/tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_err.json similarity index 100% rename from tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_err.json rename to tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_err.json diff --git a/tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_neg.json b/tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_neg.json similarity index 100% rename from tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_neg.json rename to tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_neg.json diff --git a/tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos.json b/tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos.json similarity index 100% rename from tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos.json rename to tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos.json diff --git a/tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos2.json b/tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos2.json similarity index 100% rename from tests/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos2.json rename to tests/checks/cloudsec_encryption_depr_check/cloudsecPreSharedKey_pos2.json diff --git a/tests/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py b/tests/checks/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py similarity index 89% rename from tests/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py rename to tests/checks/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py index 1378914..16678e3 100644 --- a/tests/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py +++ b/tests/checks/cloudsec_encryption_depr_check/test_cloudsec_encryption_depr_check.py @@ -10,6 +10,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "cloudsec_encryption_depr_check" # icurl queries cloudsecPreSharedKey = 'cloudsecPreSharedKey.json' @@ -56,6 +57,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.cloudsec_encryption_depr_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/configpush_shard_check/configpushShardCont_pos.json b/tests/checks/configpush_shard_check/configpushShardCont_pos.json similarity index 100% rename from tests/configpush_shard_check/configpushShardCont_pos.json rename to tests/checks/configpush_shard_check/configpushShardCont_pos.json diff --git a/tests/configpush_shard_check/test_configpush_shard_check.py b/tests/checks/configpush_shard_check/test_configpush_shard_check.py similarity index 86% rename from tests/configpush_shard_check/test_configpush_shard_check.py rename to tests/checks/configpush_shard_check/test_configpush_shard_check.py index 341f566..3e48224 100644 --- a/tests/configpush_shard_check/test_configpush_shard_check.py +++ b/tests/checks/configpush_shard_check/test_configpush_shard_check.py @@ -9,10 +9,13 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "configpush_shard_check" + # icurl queries configpushShardCont_api = 'configpushShardCont.json' configpushShardCont_api += '?query-target-filter=and(eq(configpushShardCont.tailTx,"0"),ne(configpushShardCont.headTx,"0"))' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -50,7 +53,8 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - tversion = script.AciVersion(tversion) if tversion else None - result = script.configpush_shard_check(1, 1, tversion) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result diff --git a/tests/checks/conftest.py b/tests/checks/conftest.py new file mode 100644 index 0000000..01cd887 --- /dev/null +++ b/tests/checks/conftest.py @@ -0,0 +1,137 @@ +import pytest +import logging +import importlib +from subprocess import CalledProcessError + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger() + + +@pytest.fixture +def run_check(request): + def _run_check(**kwargs): + test_function = getattr(request.module, "test_function") + cm = script.CheckManager(debug_function=test_function, monitor_interval=0.01) + cm.initialize_checks() + + err = "Unable to find test_function ({}) in CheckManager".format(test_function) + assert test_function in cm.check_ids, err + + cm.run_checks(common_data=kwargs) + + result = cm.get_check_result(test_function) + assert isinstance(result, script.Result) + + return result + + return _run_check + + +@pytest.fixture +def conn_failure(): + return False + + +@pytest.fixture +def conn_cmds(): + ''' + Set of test parameters for mocked `Connection.cmd()`. + + ex) + ``` + { + <apic_ip>: [{ + "cmd": "ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin", + "output": """\ + ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin + 6.1G -rwxr-xr-x 1 root root 6.1G Apr 3 16:36 /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin + f2-apic1# + """, + "exception": None + }] + } + ``` + + The real output from `Connection.cmd()` (i.e. `Connection.output`) contains many ANSI characters. In this fixture, those characters are not considered. + ''' + return {} + + +class MockConnection(script.Connection): + conn_failure = False + conn_cmds = None + + def connect(self): + """ + `Connection.connect()` is just instantiating `pexepect.spawn()` which does not + initiate the SSH connection yet. Not exception likely happens here. + """ + if self.conn_failure: + raise Exception("Simulated exception at connect()") + + def cmd(self, command, **kargs): + """ + `Connection.cmd()` initiates the SSH connection (if not done yet) and sends the command. + Each check typically has multiple `cmd()` with different commands. + To cover that, this mock func uses a dictionary `conn_cmds` as the test data. + """ + _conn_cmds = self.conn_cmds[self.hostname] + for conn_cmd in _conn_cmds: + if command == conn_cmd["cmd"]: + if conn_cmd["exception"]: + raise conn_cmd["exception"] + self.output = conn_cmd["output"] + break + else: + log.error("Command `%s` not found in test data `conn_cmds`", command) + raise Exception("FAILURE IN PYTEST") + + +@pytest.fixture +def mock_conn(monkeypatch, conn_failure, conn_cmds): + MockConnection.conn_failure = conn_failure + MockConnection.conn_cmds = conn_cmds + monkeypatch.setattr(script, "Connection", MockConnection) + + +@pytest.fixture +def cmd_outputs(): + """ + Mocked output for `run_cmd` function. + This is used to avoid executing real commands in tests. + """ + return { + "ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin": { + "splitlines": False, + "output": "6.1G -rwxr-xr-x 1 root root 6.1G Apr 3 16:36 /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin\napic1#", + } + } + + +@pytest.fixture +def mock_run_cmd(monkeypatch, cmd_outputs): + """ + Mock the `run_cmd` function to avoid executing real commands. + This is useful for tests that do not require actual command execution. + """ + def _mock_run_cmd(cmd, splitlines=False): + details = cmd_outputs.get(cmd) + if details is None: + log.error("Command `%s` not found in test data", cmd) + return "" + if details.get("CalledProcessError"): + raise CalledProcessError(127, cmd) + + splitlines = details.get("splitlines", False) + output = details.get("output") + if output is None: + log.error("Output for cmd `%s` not found in test data", cmd) + output = "" + + log.debug("Mocked run_cmd called with args: %s, kwargs: %s", cmd, splitlines) + if splitlines: + return output.splitlines() + else: + return output + monkeypatch.setattr(script, "run_cmd", _mock_run_cmd) diff --git a/tests/consumer_vzany_shared_services_check/epg_epg2_unmatched.json b/tests/checks/consumer_vzany_shared_services_check/epg_epg2_unmatched.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/epg_epg2_unmatched.json rename to tests/checks/consumer_vzany_shared_services_check/epg_epg2_unmatched.json diff --git a/tests/consumer_vzany_shared_services_check/esg_esg2.json b/tests/checks/consumer_vzany_shared_services_check/esg_esg2.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/esg_esg2.json rename to tests/checks/consumer_vzany_shared_services_check/esg_esg2.json diff --git a/tests/consumer_vzany_shared_services_check/fvCtx_consumer_same_vrf.json b/tests/checks/consumer_vzany_shared_services_check/fvCtx_consumer_same_vrf.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/fvCtx_consumer_same_vrf.json rename to tests/checks/consumer_vzany_shared_services_check/fvCtx_consumer_same_vrf.json diff --git a/tests/consumer_vzany_shared_services_check/fvCtx_consumer_shared.json b/tests/checks/consumer_vzany_shared_services_check/fvCtx_consumer_shared.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/fvCtx_consumer_shared.json rename to tests/checks/consumer_vzany_shared_services_check/fvCtx_consumer_shared.json diff --git a/tests/consumer_vzany_shared_services_check/fvCtx_no_consumers.json b/tests/checks/consumer_vzany_shared_services_check/fvCtx_no_consumers.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/fvCtx_no_consumers.json rename to tests/checks/consumer_vzany_shared_services_check/fvCtx_no_consumers.json diff --git a/tests/consumer_vzany_shared_services_check/global_contracts_epg_only.json b/tests/checks/consumer_vzany_shared_services_check/global_contracts_epg_only.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/global_contracts_epg_only.json rename to tests/checks/consumer_vzany_shared_services_check/global_contracts_epg_only.json diff --git a/tests/consumer_vzany_shared_services_check/global_contracts_esg_only.json b/tests/checks/consumer_vzany_shared_services_check/global_contracts_esg_only.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/global_contracts_esg_only.json rename to tests/checks/consumer_vzany_shared_services_check/global_contracts_esg_only.json diff --git a/tests/consumer_vzany_shared_services_check/global_contracts_same_vrf.json b/tests/checks/consumer_vzany_shared_services_check/global_contracts_same_vrf.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/global_contracts_same_vrf.json rename to tests/checks/consumer_vzany_shared_services_check/global_contracts_same_vrf.json diff --git a/tests/consumer_vzany_shared_services_check/global_contracts_shared.json b/tests/checks/consumer_vzany_shared_services_check/global_contracts_shared.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/global_contracts_shared.json rename to tests/checks/consumer_vzany_shared_services_check/global_contracts_shared.json diff --git a/tests/consumer_vzany_shared_services_check/instp_l3instp2.json b/tests/checks/consumer_vzany_shared_services_check/instp_l3instp2.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/instp_l3instp2.json rename to tests/checks/consumer_vzany_shared_services_check/instp_l3instp2.json diff --git a/tests/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py b/tests/checks/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py similarity index 97% rename from tests/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py rename to tests/checks/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py index 92019d2..628dff9 100644 --- a/tests/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py +++ b/tests/checks/consumer_vzany_shared_services_check/test_consumer_vzany_shared_services_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "consumer_vzany_shared_services_check" # icurl queries fvCtx_query = "fvCtx.json?rsp-subtree=full&rsp-subtree-class=vzRsAnyToCons" @@ -262,11 +263,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.consumer_vzany_shared_services_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/consumer_vzany_shared_services_check/vnsGraphInst_redirect.json b/tests/checks/consumer_vzany_shared_services_check/vnsGraphInst_redirect.json similarity index 100% rename from tests/consumer_vzany_shared_services_check/vnsGraphInst_redirect.json rename to tests/checks/consumer_vzany_shared_services_check/vnsGraphInst_redirect.json diff --git a/tests/contract_22_defect_check/test_contract_22_defect_check.py b/tests/checks/contract_22_defect_check/test_contract_22_defect_check.py similarity index 65% rename from tests/contract_22_defect_check/test_contract_22_defect_check.py rename to tests/checks/contract_22_defect_check/test_contract_22_defect_check.py index 1c9e174..26b036c 100644 --- a/tests/contract_22_defect_check/test_contract_22_defect_check.py +++ b/tests/checks/contract_22_defect_check/test_contract_22_defect_check.py @@ -8,6 +8,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "contract_22_defect_check" + @pytest.mark.parametrize( "cversion, tversion, expected_result", @@ -20,11 +22,9 @@ ("5.2(1a)", None, script.MANUAL), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.contract_22_defect_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/docker0_subent_overlap_check/apContainerPol_10_0_0_1__16.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_10_0_0_1__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_10_0_0_1__16.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_10_0_0_1__16.json diff --git a/tests/docker0_subent_overlap_check/apContainerPol_172_16_0_1__15.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_172_16_0_1__15.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_172_16_0_1__15.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_172_16_0_1__15.json diff --git a/tests/docker0_subent_overlap_check/apContainerPol_172_17_0_10__16.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_10__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_172_17_0_10__16.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_10__16.json diff --git a/tests/docker0_subent_overlap_check/apContainerPol_172_17_0_1__16.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_1__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_172_17_0_1__16.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_1__16.json diff --git a/tests/docker0_subent_overlap_check/apContainerPol_172_17_0_1__17.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_1__17.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_172_17_0_1__17.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_172_17_0_1__17.json diff --git a/tests/docker0_subent_overlap_check/apContainerPol_172_18_0_1__16.json b/tests/checks/docker0_subent_overlap_check/apContainerPol_172_18_0_1__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/apContainerPol_172_18_0_1__16.json rename to tests/checks/docker0_subent_overlap_check/apContainerPol_172_18_0_1__16.json diff --git a/tests/docker0_subent_overlap_check/infraWiNode_10_0_0_0__16.json b/tests/checks/docker0_subent_overlap_check/infraWiNode_10_0_0_0__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/infraWiNode_10_0_0_0__16.json rename to tests/checks/docker0_subent_overlap_check/infraWiNode_10_0_0_0__16.json diff --git a/tests/docker0_subent_overlap_check/infraWiNode_10_0_x_0__24_remote_apic.json b/tests/checks/docker0_subent_overlap_check/infraWiNode_10_0_x_0__24_remote_apic.json similarity index 100% rename from tests/docker0_subent_overlap_check/infraWiNode_10_0_x_0__24_remote_apic.json rename to tests/checks/docker0_subent_overlap_check/infraWiNode_10_0_x_0__24_remote_apic.json diff --git a/tests/docker0_subent_overlap_check/infraWiNode_172_17_0_0__16.json b/tests/checks/docker0_subent_overlap_check/infraWiNode_172_17_0_0__16.json similarity index 100% rename from tests/docker0_subent_overlap_check/infraWiNode_172_17_0_0__16.json rename to tests/checks/docker0_subent_overlap_check/infraWiNode_172_17_0_0__16.json diff --git a/tests/docker0_subent_overlap_check/infraWiNode_172_17_x_0__24_remote_apic.json b/tests/checks/docker0_subent_overlap_check/infraWiNode_172_17_x_0__24_remote_apic.json similarity index 100% rename from tests/docker0_subent_overlap_check/infraWiNode_172_17_x_0__24_remote_apic.json rename to tests/checks/docker0_subent_overlap_check/infraWiNode_172_17_x_0__24_remote_apic.json diff --git a/tests/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py b/tests/checks/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py similarity index 84% rename from tests/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py rename to tests/checks/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py index 89cd737..be655c6 100644 --- a/tests/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py +++ b/tests/checks/docker0_subent_overlap_check/test_docker0_subent_overlap_check.py @@ -3,11 +3,13 @@ import logging import importlib from helpers.utils import read_data + script = importlib.import_module("aci-preupgrade-validation-script") log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "docker0_subnet_overlap_check" # icurl queries infraWiNode = "infraWiNode.json" @@ -15,13 +17,22 @@ @pytest.mark.parametrize( - "icurl_outputs, expected_result", + "icurl_outputs, cversion, expected_result", [ ( { infraWiNode: read_data(dir, "infraWiNode_10_0_0_0__16.json"), apContainerPol: [], }, + "6.1(2b)", + script.NA, + ), + ( + { + infraWiNode: read_data(dir, "infraWiNode_10_0_0_0__16.json"), + apContainerPol: [], + }, + "5.3(2b)", script.PASS, ), ( @@ -29,6 +40,7 @@ infraWiNode: read_data(dir, "infraWiNode_10_0_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__16.json"), }, + "5.3(2b)", script.PASS, ), ( @@ -36,6 +48,7 @@ infraWiNode: read_data(dir, "infraWiNode_10_0_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_10_0_0_1__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -43,6 +56,7 @@ infraWiNode: read_data(dir, "infraWiNode_10_0_x_0__24_remote_apic.json"), apContainerPol: [], }, + "5.3(2b)", script.PASS, ), ( @@ -50,6 +64,7 @@ infraWiNode: read_data(dir, "infraWiNode_10_0_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__16.json"), }, + "5.3(2b)", script.PASS, ), ( @@ -57,6 +72,7 @@ infraWiNode: read_data(dir, "infraWiNode_10_0_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_10_0_0_1__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), # This scenario is the most likely one where, prior to the upgrade, @@ -68,6 +84,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: [], }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -75,6 +92,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -82,6 +100,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_10__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -89,6 +108,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__17.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -96,6 +116,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_16_0_1__15.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -103,6 +124,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_0_0__16.json"), apContainerPol: read_data(dir, "apContainerPol_172_18_0_1__16.json"), }, + "5.3(2b)", script.PASS, ), ( @@ -110,6 +132,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: [], }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -117,6 +140,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -124,6 +148,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_10__16.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -131,6 +156,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_17_0_1__17.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -138,6 +164,7 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_16_0_1__15.json"), }, + "5.3(2b)", script.FAIL_UF, ), ( @@ -145,10 +172,13 @@ infraWiNode: read_data(dir, "infraWiNode_172_17_x_0__24_remote_apic.json"), apContainerPol: read_data(dir, "apContainerPol_172_18_0_1__16.json"), }, + "5.3(2b)", script.PASS, ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.docker0_subnet_overlap_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, cversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + ) + assert result.result == expected_result diff --git a/tests/eecdh_cipher_check/commCipher_neg.json b/tests/checks/eecdh_cipher_check/commCipher_neg.json similarity index 100% rename from tests/eecdh_cipher_check/commCipher_neg.json rename to tests/checks/eecdh_cipher_check/commCipher_neg.json diff --git a/tests/eecdh_cipher_check/commCipher_neg2.json b/tests/checks/eecdh_cipher_check/commCipher_neg2.json similarity index 100% rename from tests/eecdh_cipher_check/commCipher_neg2.json rename to tests/checks/eecdh_cipher_check/commCipher_neg2.json diff --git a/tests/eecdh_cipher_check/commCipher_pos.json b/tests/checks/eecdh_cipher_check/commCipher_pos.json similarity index 100% rename from tests/eecdh_cipher_check/commCipher_pos.json rename to tests/checks/eecdh_cipher_check/commCipher_pos.json diff --git a/tests/eecdh_cipher_check/test_eecdh_cipher_check.py b/tests/checks/eecdh_cipher_check/test_eecdh_cipher_check.py similarity index 80% rename from tests/eecdh_cipher_check/test_eecdh_cipher_check.py rename to tests/checks/eecdh_cipher_check/test_eecdh_cipher_check.py index 297d3c9..f6a1986 100644 --- a/tests/eecdh_cipher_check/test_eecdh_cipher_check.py +++ b/tests/checks/eecdh_cipher_check/test_eecdh_cipher_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "eecdh_cipher_check" # icurl queries commCiphers = 'commCipher.json' @@ -39,10 +40,8 @@ ), ], ) -def test_logic(mock_icurl, cversion, expected_result): - result = script.eecdh_cipher_check( - 1, - 1, - script.AciVersion(cversion), +def test_logic(run_check, mock_icurl, cversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), ) - assert result == expected_result \ No newline at end of file + assert result.result == expected_result diff --git a/tests/encap_already_in_use_check/faultInst-encap-pos.json b/tests/checks/encap_already_in_use_check/faultInst-encap-pos.json similarity index 100% rename from tests/encap_already_in_use_check/faultInst-encap-pos.json rename to tests/checks/encap_already_in_use_check/faultInst-encap-pos.json diff --git a/tests/encap_already_in_use_check/faultInst-new-version.json b/tests/checks/encap_already_in_use_check/faultInst-new-version.json similarity index 100% rename from tests/encap_already_in_use_check/faultInst-new-version.json rename to tests/checks/encap_already_in_use_check/faultInst-new-version.json diff --git a/tests/encap_already_in_use_check/fvIfConn.json b/tests/checks/encap_already_in_use_check/fvIfConn.json similarity index 100% rename from tests/encap_already_in_use_check/fvIfConn.json rename to tests/checks/encap_already_in_use_check/fvIfConn.json diff --git a/tests/encap_already_in_use_check/test_encap_already_in_use_check.py b/tests/checks/encap_already_in_use_check/test_encap_already_in_use_check.py similarity index 86% rename from tests/encap_already_in_use_check/test_encap_already_in_use_check.py rename to tests/checks/encap_already_in_use_check/test_encap_already_in_use_check.py index 426ff4c..e55645f 100644 --- a/tests/encap_already_in_use_check/test_encap_already_in_use_check.py +++ b/tests/checks/encap_already_in_use_check/test_encap_already_in_use_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "encap_already_in_use_check" # icurl queries faultInsts = ( @@ -43,6 +44,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.encap_already_in_use_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/equipment_disk_limits_exceeded/faultInst_neg.json b/tests/checks/equipment_disk_limits_exceeded/faultInst_neg.json similarity index 100% rename from tests/equipment_disk_limits_exceeded/faultInst_neg.json rename to tests/checks/equipment_disk_limits_exceeded/faultInst_neg.json diff --git a/tests/equipment_disk_limits_exceeded/faultInst_pos.json b/tests/checks/equipment_disk_limits_exceeded/faultInst_pos.json similarity index 100% rename from tests/equipment_disk_limits_exceeded/faultInst_pos.json rename to tests/checks/equipment_disk_limits_exceeded/faultInst_pos.json diff --git a/tests/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py b/tests/checks/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py similarity index 79% rename from tests/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py rename to tests/checks/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py index 9399c70..72fd0aa 100644 --- a/tests/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py +++ b/tests/checks/equipment_disk_limits_exceeded/test_equipment_disk_limits_exceeded.py @@ -9,9 +9,12 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "equipment_disk_limits_exceeded" + f182x_api = 'faultInst.json' f182x_api += '?query-target-filter=or(eq(faultInst.code,"F1820"),eq(faultInst.code,"F1821"),eq(faultInst.code,"F1822"))' + @pytest.mark.parametrize( "icurl_outputs, expected_result", [ @@ -25,6 +28,6 @@ ) ], ) -def test_logic(mock_icurl, expected_result): - result = script.equipment_disk_limits_exceeded(1, 1) - assert result == expected_result \ No newline at end of file +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py b/tests/checks/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py similarity index 77% rename from tests/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py rename to tests/checks/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py index 4524d60..c4f98dd 100644 --- a/tests/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py +++ b/tests/checks/eventmgr_db_defect_check/test_eventmgr_db_defect_check.py @@ -8,6 +8,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "eventmgr_db_defect_check" + @pytest.mark.parametrize( "cversion, expected_result", @@ -27,8 +29,8 @@ ("5.0(1l)", script.PASS), ], ) -def test_logic(mock_icurl, cversion, expected_result): - result = script.eventmgr_db_defect_check( - 1, 1, script.AciVersion(cversion) +def test_logic(run_check, mock_icurl, cversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_neg.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_neg.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_neg.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_neg.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos1.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos1.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos1.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos1.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos2.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos2.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos2.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos2.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos3.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos3.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos3.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos3.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos4.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos4.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos4.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos4.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos5.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos5.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos5.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos5.json diff --git a/tests/fabricPathEP_target_check/fabricRsOosPath_pos6.json b/tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos6.json similarity index 100% rename from tests/fabricPathEP_target_check/fabricRsOosPath_pos6.json rename to tests/checks/fabricPathEP_target_check/fabricRsOosPath_pos6.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_neg.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_neg.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_neg.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_neg.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos1.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos1.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos1.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos1.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos2.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos2.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos2.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos2.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos3.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos3.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos3.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos3.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos4.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos4.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos4.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos4.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos5.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos5.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos5.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos5.json diff --git a/tests/fabricPathEP_target_check/infraRsHPathAtt_pos6.json b/tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos6.json similarity index 100% rename from tests/fabricPathEP_target_check/infraRsHPathAtt_pos6.json rename to tests/checks/fabricPathEP_target_check/infraRsHPathAtt_pos6.json diff --git a/tests/fabricPathEP_target_check/test_fabricPathEP_target_check.py b/tests/checks/fabricPathEP_target_check/test_fabricPathEP_target_check.py similarity index 89% rename from tests/fabricPathEP_target_check/test_fabricPathEP_target_check.py rename to tests/checks/fabricPathEP_target_check/test_fabricPathEP_target_check.py index b136d7c..98fdd4e 100644 --- a/tests/fabricPathEP_target_check/test_fabricPathEP_target_check.py +++ b/tests/checks/fabricPathEP_target_check/test_fabricPathEP_target_check.py @@ -9,10 +9,12 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "fabricPathEp_target_check" # icurl queries -hpath_api = 'infraRsHPathAtt.json' -oosPorts_api = 'fabricRsOosPath.json' +hpath_api = 'infraRsHPathAtt.json' +oosPorts_api = 'fabricRsOosPath.json' + @pytest.mark.parametrize( "icurl_outputs, expected_result", @@ -62,6 +64,6 @@ ], ) -def test_logic(mock_icurl, expected_result): - result = script.fabricPathEp_target_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/fabric_dpp_check/lbpPol_NEG.json b/tests/checks/fabric_dpp_check/lbpPol_NEG.json similarity index 100% rename from tests/fabric_dpp_check/lbpPol_NEG.json rename to tests/checks/fabric_dpp_check/lbpPol_NEG.json diff --git a/tests/fabric_dpp_check/lbpPol_POS.json b/tests/checks/fabric_dpp_check/lbpPol_POS.json similarity index 100% rename from tests/fabric_dpp_check/lbpPol_POS.json rename to tests/checks/fabric_dpp_check/lbpPol_POS.json diff --git a/tests/fabric_dpp_check/test_fabric_dpp_check.py b/tests/checks/fabric_dpp_check/test_fabric_dpp_check.py similarity index 85% rename from tests/fabric_dpp_check/test_fabric_dpp_check.py rename to tests/checks/fabric_dpp_check/test_fabric_dpp_check.py index 9eacfb7..6e2fddd 100644 --- a/tests/fabric_dpp_check/test_fabric_dpp_check.py +++ b/tests/checks/fabric_dpp_check/test_fabric_dpp_check.py @@ -9,11 +9,13 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "fabric_dpp_check" # icurl queries -lbpPol = 'lbpPol.json' +lbpPol = 'lbpPol.json' lbpPol += '?query-target-filter=eq(lbpPol.pri,"on")' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -62,8 +64,8 @@ ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.fabric_dpp_check( - 1, 1, script.AciVersion(tversion) +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion), ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/checks/fabric_link_redundancy_check/fabricNode.json b/tests/checks/fabric_link_redundancy_check/fabricNode.json new file mode 100644 index 0000000..3dffa26 --- /dev/null +++ b/tests/checks/fabric_link_redundancy_check/fabricNode.json @@ -0,0 +1,122 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "fabricSt": "active", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "fabricSt": "active", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "fabricSt": "active", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "fabricSt": "active", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "fabricSt": "active", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/fabric_link_redundancy_check/lldpAdjEp_neg.json b/tests/checks/fabric_link_redundancy_check/lldpAdjEp_neg.json similarity index 100% rename from tests/fabric_link_redundancy_check/lldpAdjEp_neg.json rename to tests/checks/fabric_link_redundancy_check/lldpAdjEp_neg.json diff --git a/tests/fabric_link_redundancy_check/lldpAdjEp_pos_spine_only.json b/tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_spine_only.json similarity index 100% rename from tests/fabric_link_redundancy_check/lldpAdjEp_pos_spine_only.json rename to tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_spine_only.json diff --git a/tests/fabric_link_redundancy_check/lldpAdjEp_pos_spine_t1.json b/tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_spine_t1.json similarity index 100% rename from tests/fabric_link_redundancy_check/lldpAdjEp_pos_spine_t1.json rename to tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_spine_t1.json diff --git a/tests/fabric_link_redundancy_check/lldpAdjEp_pos_t1_only.json b/tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_t1_only.json similarity index 100% rename from tests/fabric_link_redundancy_check/lldpAdjEp_pos_t1_only.json rename to tests/checks/fabric_link_redundancy_check/lldpAdjEp_pos_t1_only.json diff --git a/tests/checks/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py b/tests/checks/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py new file mode 100644 index 0000000..b31f8fe --- /dev/null +++ b/tests/checks/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py @@ -0,0 +1,67 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "fabric_link_redundancy_check" + +# icurl queries +lldp_adj_api = "lldpAdjEp.json" +lldp_adj_api += '?query-target-filter=wcard(lldpAdjEp.sysDesc,"topology/pod")' + + +@pytest.mark.parametrize( + "icurl_outputs, fabric_nodes, expected_result, expected_data", + [ + # FAILING = T1 leaf101 single-homed, T1 leaf102 none, T1 leaf103 multi-homed + ( + {lldp_adj_api: read_data(dir, "lldpAdjEp_pos_spine_only.json")}, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["LF101", "SP1001", "Only one spine adjacency"], + ["LF102", "", "No spine adjacency"], + ] + ), + # FAILING = T1 leafs multi-homed, T2 leaf111 single-homed, T2 leaf112 multi-homed + ( + {lldp_adj_api: read_data(dir, "lldpAdjEp_pos_t1_only.json")}, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["T2_LF111", "LF102", "Only one tier 1 leaf adjacency"], + ] + ), + # FAILING = T1 leaf101 single-homed, T1 leaf102 none, T1 leaf103 multi-homed + # T2 leaf111 single-homed, T2 leaf112 multi-homed + ( + {lldp_adj_api: read_data(dir, "lldpAdjEp_pos_spine_t1.json")}, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["LF101", "SP1001", "Only one spine adjacency"], + ["LF102", "", "No spine adjacency"], + ["T2_LF111", "LF102", "Only one tier 1 leaf adjacency"], + ] + ), + # PASSING = ALL LEAF SWITCHES ARE MULTI-HOMED except for RL + ( + {lldp_adj_api: read_data(dir, "lldpAdjEp_neg.json")}, + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, fabric_nodes, expected_result, expected_data): + result = run_check( + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert sorted(result.data) == sorted(expected_data) diff --git a/tests/fabric_port_down_check/faultInst_pos.json b/tests/checks/fabric_port_down_check/faultInst_pos.json similarity index 100% rename from tests/fabric_port_down_check/faultInst_pos.json rename to tests/checks/fabric_port_down_check/faultInst_pos.json diff --git a/tests/fabric_port_down_check/test_fabric_port_down_check.py b/tests/checks/fabric_port_down_check/test_fabric_port_down_check.py similarity index 78% rename from tests/fabric_port_down_check/test_fabric_port_down_check.py rename to tests/checks/fabric_port_down_check/test_fabric_port_down_check.py index 034e343..4b13233 100644 --- a/tests/fabric_port_down_check/test_fabric_port_down_check.py +++ b/tests/checks/fabric_port_down_check/test_fabric_port_down_check.py @@ -9,9 +9,10 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "fabric_port_down_check" # icurl queries -faultInsts = 'faultInst.json' +faultInsts = 'faultInst.json' faultInsts += '?&query-target-filter=and(eq(faultInst.code,"F1394")' faultInsts += ',eq(faultInst.rule,"ethpm-if-port-down-fabric"))' @@ -33,6 +34,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.fabric_port_down_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/checks/fabricdomain_name_check/fabricNode.json b/tests/checks/fabricdomain_name_check/fabricNode.json new file mode 100644 index 0000000..80f5c86 --- /dev/null +++ b/tests/checks/fabricdomain_name_check/fabricNode.json @@ -0,0 +1,108 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93108TC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.103", + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf103", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/fabricdomain_name_check/fabricNode_no_apic1.json b/tests/checks/fabricdomain_name_check/fabricNode_no_apic1.json new file mode 100644 index 0000000..fcc7640 --- /dev/null +++ b/tests/checks/fabricdomain_name_check/fabricNode_no_apic1.json @@ -0,0 +1,93 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93108TC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.103", + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf103", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] + diff --git a/tests/checks/fabricdomain_name_check/test_fabricdomain_name_check.py b/tests/checks/fabricdomain_name_check/test_fabricdomain_name_check.py new file mode 100644 index 0000000..c2881bb --- /dev/null +++ b/tests/checks/fabricdomain_name_check/test_fabricdomain_name_check.py @@ -0,0 +1,126 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "fabricdomain_name_check" + +# icurl queries +topSystem = 'topology/pod-1/node-1/sys.json' + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, tversion, fabric_nodes, expected_result, expected_data", + [ + # tversion missing + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "5.2(3g)", + None, + read_data(dir, "fabricNode.json"), + script.MANUAL, + [], + ), + # APIC 1 missing in fabric_nodes (fabricNode) + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "5.2(3g)", + "6.0(2h)", + read_data(dir, "fabricNode_no_apic1.json"), + script.ERROR, + [], + ), + # APIC 1 missing in topSystem + ( + {topSystem: []}, + "5.2(3g)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.ERROR, + [], + ), + # `;` char test + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "5.2(3g)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [["fabric;4", "Contains a special character"]] + ), + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "6.0(3a)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [["fabric;4", "Contains a special character"]] + ), + # `#` char test + ( + {topSystem: read_data(dir, "topSystem_2POS.json")}, + "5.2(3g)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [["fabric#4", "Contains a special character"]] + ), + ( + {topSystem: read_data(dir, "topSystem_2POS.json")}, + "6.0(3a)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [["fabric#4", "Contains a special character"]] + ), + # Neither ; or # in fabricDomain + ( + {topSystem: read_data(dir, "topSystem_NEG.json")}, + "5.2(3g)", + "6.0(2h)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # only affected 6.0(2h), regardless of special chars + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "5.2(3g)", + "6.0(1j)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # Eventual 6.0(3) has fix + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "5.2(3g)", + "6.0(3a)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + ( + {topSystem: read_data(dir, "topSystem_1POS.json")}, + "6.0(3a)", + "6.0(4a)", + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, cversion, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/fabricdomain_name_check/topSystem_1POS.json b/tests/checks/fabricdomain_name_check/topSystem_1POS.json new file mode 100644 index 0000000..3bd73df --- /dev/null +++ b/tests/checks/fabricdomain_name_check/topSystem_1POS.json @@ -0,0 +1,13 @@ +[ + { + "topSystem": { + "attributes": { + "address": "10.0.0.1", + "fabricId": "1", + "id": "1", + "fabricDomain": "fabric;4", + "role": "controller" + } + } + } +] diff --git a/tests/checks/fabricdomain_name_check/topSystem_2POS.json b/tests/checks/fabricdomain_name_check/topSystem_2POS.json new file mode 100644 index 0000000..175bdd8 --- /dev/null +++ b/tests/checks/fabricdomain_name_check/topSystem_2POS.json @@ -0,0 +1,13 @@ +[ + { + "topSystem": { + "attributes": { + "address": "10.0.0.1", + "fabricId": "1", + "id": "1", + "fabricDomain": "fabric#4", + "role": "controller" + } + } + } +] diff --git a/tests/checks/fabricdomain_name_check/topSystem_NEG.json b/tests/checks/fabricdomain_name_check/topSystem_NEG.json new file mode 100644 index 0000000..8e6d3ba --- /dev/null +++ b/tests/checks/fabricdomain_name_check/topSystem_NEG.json @@ -0,0 +1,13 @@ +[ + { + "topSystem": { + "attributes": { + "address": "10.0.0.1", + "fabricId": "1", + "id": "1", + "fabricDomain": "fabric4", + "role": "controller" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/fabricNode_NEG.json b/tests/checks/fc_ex_model_check/fabricNode_NEG.json new file mode 100644 index 0000000..9b11d6b --- /dev/null +++ b/tests/checks/fc_ex_model_check/fabricNode_NEG.json @@ -0,0 +1,107 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93108TC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.103", + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf103", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/fabricNode_POS.json b/tests/checks/fc_ex_model_check/fabricNode_POS.json new file mode 100644 index 0000000..d0b3192 --- /dev/null +++ b/tests/checks/fc_ex_model_check/fabricNode_POS.json @@ -0,0 +1,122 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-EX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93108TC-EX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.103", + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "model": "N9K-C93108LC-EX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf103", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.104", + "dn": "topology/pod-1/node-104", + "fabricSt": "active", + "id": "104", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf104", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/fcEntity_101_102.json b/tests/checks/fc_ex_model_check/fcEntity_101_102.json new file mode 100644 index 0000000..8f052ce --- /dev/null +++ b/tests/checks/fc_ex_model_check/fcEntity_101_102.json @@ -0,0 +1,18 @@ +[ + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-102/sys/fc" + } + } + }, + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-101/sys/fc" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/fcEntity_101_102_103.json b/tests/checks/fc_ex_model_check/fcEntity_101_102_103.json new file mode 100644 index 0000000..9da4663 --- /dev/null +++ b/tests/checks/fc_ex_model_check/fcEntity_101_102_103.json @@ -0,0 +1,26 @@ +[ + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-102/sys/fc" + } + } + }, + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-101/sys/fc" + } + } + }, + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-103/sys/fc" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/fcEntity_104.json b/tests/checks/fc_ex_model_check/fcEntity_104.json new file mode 100644 index 0000000..eed7168 --- /dev/null +++ b/tests/checks/fc_ex_model_check/fcEntity_104.json @@ -0,0 +1,10 @@ +[ + { + "fcEntity": { + "attributes": { + "adminSt": "enabled", + "dn": "topology/pod-1/node-104/sys/fc" + } + } + } +] diff --git a/tests/checks/fc_ex_model_check/test_fc_ex_model_check.py b/tests/checks/fc_ex_model_check/test_fc_ex_model_check.py new file mode 100644 index 0000000..88e421c --- /dev/null +++ b/tests/checks/fc_ex_model_check/test_fc_ex_model_check.py @@ -0,0 +1,140 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "fc_ex_model_check" + +# icurl queries +fcEntity_api = "fcEntity.json" + + +@pytest.mark.parametrize( + "icurl_outputs, tversion, fabric_nodes, expected_result, expected_data", + [ + # TVERSION MISSING + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + None, + read_data(dir, "fabricNode_POS.json"), + script.MANUAL, + [], + ), + # TVERSION NOT AFFECTED + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.0(1f)", + read_data(dir, "fabricNode_POS.json"), + script.PASS, + [], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.0(9f)", + read_data(dir, "fabricNode_POS.json"), + script.PASS, + [], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.1(4h)", + read_data(dir, "fabricNode_POS.json"), + script.PASS, + [], + ), + # FABRIC HAS EX NODES and ALL OF THEM HAVE FC/FCOE CONFIG + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.1(1f)", + read_data(dir, "fabricNode_POS.json"), + script.FAIL_O, + [ + ["topology/pod-1/node-101", "N9K-C93180YC-EX"], + ["topology/pod-1/node-102", "N9K-C93108TC-EX"], + ["topology/pod-1/node-103", "N9K-C93108LC-EX"], + ], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.0(7e)", + read_data(dir, "fabricNode_POS.json"), + script.FAIL_O, + [ + ["topology/pod-1/node-101", "N9K-C93180YC-EX"], + ["topology/pod-1/node-102", "N9K-C93108TC-EX"], + ["topology/pod-1/node-103", "N9K-C93108LC-EX"], + ], + ), + # FABRIC HAS EX NODES and SOME OF THEM HAVE FC/FCOE CONFIG + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102.json")}, + "6.1(1f)", + read_data(dir, "fabricNode_POS.json"), + script.FAIL_O, + [ + ["topology/pod-1/node-101", "N9K-C93180YC-EX"], + ["topology/pod-1/node-102", "N9K-C93108TC-EX"], + ], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102.json")}, + "6.0(7e)", + read_data(dir, "fabricNode_POS.json"), + script.FAIL_O, + [ + ["topology/pod-1/node-101", "N9K-C93180YC-EX"], + ["topology/pod-1/node-102", "N9K-C93108TC-EX"], + ], + ), + # FABRIC HAS EX NODES and NONE OF THEM HAVE FC/FCOE CONFIG + ( + {fcEntity_api: []}, + "6.0(7e)", + read_data(dir, "fabricNode_POS.json"), + script.PASS, + [], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_104.json")}, + "6.0(7e)", + read_data(dir, "fabricNode_POS.json"), + script.PASS, + [], + ), + # FABRIC DOES NOT HAVE EX NODES + ( + {fcEntity_api: []}, + "6.0(7e)", + read_data(dir, "fabricNode_NEG.json"), + script.PASS, + [], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_101_102_103.json")}, + "6.0(7e)", + read_data(dir, "fabricNode_NEG.json"), + script.PASS, + [], + ), + ( + {fcEntity_api: read_data(dir, "fcEntity_104.json")}, + "6.0(7e)", + read_data(dir, "fabricNode_NEG.json"), + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/gen1_switch_compatibility_check/fabricNode_no_gen1.json b/tests/checks/gen1_switch_compatibility_check/fabricNode_no_gen1.json new file mode 100644 index 0000000..d266e8a --- /dev/null +++ b/tests/checks/gen1_switch_compatibility_check/fabricNode_no_gen1.json @@ -0,0 +1,30 @@ +[ + { + "fabricNode": { + "attributes": { + "adSt": "on", + "dn": "topology/pod-2/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C93180YC-FX", + "name": "RL201", + "nodeType": "remote-leaf-wan", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "adSt": "on", + "dn": "topology/pod-2/node-202", + "fabricSt": "active", + "id": "202", + "model": "N9K-C93180YC-FX", + "name": "RL202", + "nodeType": "remote-leaf-wan", + "role": "leaf" + } + } + } +] diff --git a/tests/checks/gen1_switch_compatibility_check/fabricNode_with_gen1.json b/tests/checks/gen1_switch_compatibility_check/fabricNode_with_gen1.json new file mode 100644 index 0000000..e01273b --- /dev/null +++ b/tests/checks/gen1_switch_compatibility_check/fabricNode_with_gen1.json @@ -0,0 +1,44 @@ +[ + { + "fabricNode": { + "attributes": { + "adSt": "on", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C9372TX-E", + "name": "Leaf-101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "adSt": "on", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C9372TX-E", + "name": "Leaf-102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "adSt": "on", + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "model": "N9K-C9332PQ", + "name": "Spine-1001", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/gen1_switch_compatibility_check/test_gen1_switch_compatibility_check.py b/tests/checks/gen1_switch_compatibility_check/test_gen1_switch_compatibility_check.py new file mode 100644 index 0000000..4f076c5 --- /dev/null +++ b/tests/checks/gen1_switch_compatibility_check/test_gen1_switch_compatibility_check.py @@ -0,0 +1,42 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +script = importlib.import_module("aci-preupgrade-validation-script") +AciVersion = script.AciVersion + +test_function = "gen1_switch_compatibility_check" + + +@pytest.mark.parametrize( + "tversion, fabric_nodes, expected_result, expected_data", + [ + # FAIL - gen1 HW does not support t_ver + ( + "5.2(3b)", + read_data(dir, "fabricNode_with_gen1.json"), + script.FAIL_UF, + [ + ["5.2(3b)", "101", "N9K-C9372TX-E", "Not supported on 5.x+"], + ["5.2(3b)", "102", "N9K-C9372TX-E", "Not supported on 5.x+"], + ["5.2(3b)", "1001", "N9K-C9332PQ", "Not supported on 5.x+"], + ], + ), + # PASS - gen1 HW supports t_ver + ("4.2(7r)", read_data(dir, "fabricNode_with_gen1.json"), script.PASS, []), + # PASS - no gen1 hw found + ("5.2(3b)", read_data(dir, "fabricNode_no_gen1.json"), script.PASS, []), + ], +) +def test_logic(run_check, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + tversion=AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/helpers/__init__.py b/tests/checks/helpers/__init__.py similarity index 100% rename from tests/helpers/__init__.py rename to tests/checks/helpers/__init__.py diff --git a/tests/helpers/utils.py b/tests/checks/helpers/utils.py similarity index 68% rename from tests/helpers/utils.py rename to tests/checks/helpers/utils.py index 98fa913..10236f1 100644 --- a/tests/helpers/utils.py +++ b/tests/checks/helpers/utils.py @@ -3,7 +3,7 @@ def read_data(dir, json_file): - data_path = os.path.join("tests", dir, json_file) + data_path = os.path.join("tests", "checks", dir, json_file) with open(data_path, "r") as file: data = json.load(file) return data diff --git a/tests/https_throttle_rate_check/commHttps_neg1.json b/tests/checks/https_throttle_rate_check/commHttps_neg1.json similarity index 100% rename from tests/https_throttle_rate_check/commHttps_neg1.json rename to tests/checks/https_throttle_rate_check/commHttps_neg1.json diff --git a/tests/https_throttle_rate_check/commHttps_neg2.json b/tests/checks/https_throttle_rate_check/commHttps_neg2.json similarity index 100% rename from tests/https_throttle_rate_check/commHttps_neg2.json rename to tests/checks/https_throttle_rate_check/commHttps_neg2.json diff --git a/tests/https_throttle_rate_check/commHttps_pos.json b/tests/checks/https_throttle_rate_check/commHttps_pos.json similarity index 100% rename from tests/https_throttle_rate_check/commHttps_pos.json rename to tests/checks/https_throttle_rate_check/commHttps_pos.json diff --git a/tests/https_throttle_rate_check/test_https_throttle_rate_check.py b/tests/checks/https_throttle_rate_check/test_https_throttle_rate_check.py similarity index 88% rename from tests/https_throttle_rate_check/test_https_throttle_rate_check.py rename to tests/checks/https_throttle_rate_check/test_https_throttle_rate_check.py index f79edf2..7f2f9a6 100644 --- a/tests/https_throttle_rate_check/test_https_throttle_rate_check.py +++ b/tests/checks/https_throttle_rate_check/test_https_throttle_rate_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "https_throttle_rate_check" # icurl queries commHttps = "commHttps.json" @@ -75,8 +76,9 @@ ), ], ) -def test_logic(mock_icurl, cver, tver, expected_result): - cversion = script.AciVersion(cver) - tversion = script.AciVersion(tver) if tver else None - result = script.https_throttle_rate_check(1, 1, cversion, tversion) - assert result == expected_result +def test_logic(run_check, mock_icurl, cver, tver, expected_result): + result = run_check( + cversion=script.AciVersion(cver), + tversion=script.AciVersion(tver) if tver else None, + ) + assert result.result == expected_result diff --git a/tests/internal_vlanpool_check/fvnsVlanInstP_neg.json b/tests/checks/internal_vlanpool_check/fvnsVlanInstP_neg.json similarity index 100% rename from tests/internal_vlanpool_check/fvnsVlanInstP_neg.json rename to tests/checks/internal_vlanpool_check/fvnsVlanInstP_neg.json diff --git a/tests/internal_vlanpool_check/fvnsVlanInstP_pos.json b/tests/checks/internal_vlanpool_check/fvnsVlanInstP_pos.json similarity index 100% rename from tests/internal_vlanpool_check/fvnsVlanInstP_pos.json rename to tests/checks/internal_vlanpool_check/fvnsVlanInstP_pos.json diff --git a/tests/internal_vlanpool_check/test_internal_vlanpool_check.py b/tests/checks/internal_vlanpool_check/test_internal_vlanpool_check.py similarity index 93% rename from tests/internal_vlanpool_check/test_internal_vlanpool_check.py rename to tests/checks/internal_vlanpool_check/test_internal_vlanpool_check.py index 6d16279..704b335 100644 --- a/tests/internal_vlanpool_check/test_internal_vlanpool_check.py +++ b/tests/checks/internal_vlanpool_check/test_internal_vlanpool_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "internal_vlanpool_check" # icurl queries fvnsVlanInstPs = "fvnsVlanInstP.json?rsp-subtree=children&rsp-subtree-class=fvnsRtVlanNs,fvnsEncapBlk&rsp-subtree-include=required" @@ -100,6 +101,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.internal_vlanpool_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/internal_vlanpool_check/vmmDomP_neg.json b/tests/checks/internal_vlanpool_check/vmmDomP_neg.json similarity index 100% rename from tests/internal_vlanpool_check/vmmDomP_neg.json rename to tests/checks/internal_vlanpool_check/vmmDomP_neg.json diff --git a/tests/internal_vlanpool_check/vmmDomP_pos.json b/tests/checks/internal_vlanpool_check/vmmDomP_pos.json similarity index 100% rename from tests/internal_vlanpool_check/vmmDomP_pos.json rename to tests/checks/internal_vlanpool_check/vmmDomP_pos.json diff --git a/tests/isis_database_byte_check/isisDTEp_NEG.json b/tests/checks/isis_database_byte_check/isisDTEp_NEG.json similarity index 100% rename from tests/isis_database_byte_check/isisDTEp_NEG.json rename to tests/checks/isis_database_byte_check/isisDTEp_NEG.json diff --git a/tests/isis_database_byte_check/isisDTEp_POS.json b/tests/checks/isis_database_byte_check/isisDTEp_POS.json similarity index 100% rename from tests/isis_database_byte_check/isisDTEp_POS.json rename to tests/checks/isis_database_byte_check/isisDTEp_POS.json diff --git a/tests/isis_database_byte_check/test_isis_database_byte_check.py b/tests/checks/isis_database_byte_check/test_isis_database_byte_check.py similarity index 88% rename from tests/isis_database_byte_check/test_isis_database_byte_check.py rename to tests/checks/isis_database_byte_check/test_isis_database_byte_check.py index 39003ba..538e132 100644 --- a/tests/isis_database_byte_check/test_isis_database_byte_check.py +++ b/tests/checks/isis_database_byte_check/test_isis_database_byte_check.py @@ -9,10 +9,13 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "isis_database_byte_check" + # icurl queries isisDTEp_api = 'isisDTEp.json' isisDTEp_api += '?query-target-filter=eq(isisDTEp.role,"spine")' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -75,7 +78,8 @@ ) ] ) -def test_logic(mock_icurl, tversion, expected_result): - tversion = script.AciVersion(tversion) if tversion else None - result = script.isis_database_byte_check(1, 1, tversion) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result diff --git a/tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos1.json b/tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos1.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos1.json rename to tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos1.json diff --git a/tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos2.json b/tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos2.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos2.json rename to tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos2.json diff --git a/tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos3.json b/tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos3.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos3.json rename to tests/checks/isis_redis_metric_mpod_msite_check/fvFabricExtConnP_pos3.json diff --git a/tests/isis_redis_metric_mpod_msite_check/isisDomP-default_missing.json b/tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_missing.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/isisDomP-default_missing.json rename to tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_missing.json diff --git a/tests/isis_redis_metric_mpod_msite_check/isisDomP-default_neg.json b/tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_neg.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/isisDomP-default_neg.json rename to tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_neg.json diff --git a/tests/isis_redis_metric_mpod_msite_check/isisDomP-default_pos.json b/tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_pos.json similarity index 100% rename from tests/isis_redis_metric_mpod_msite_check/isisDomP-default_pos.json rename to tests/checks/isis_redis_metric_mpod_msite_check/isisDomP-default_pos.json diff --git a/tests/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py b/tests/checks/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py similarity index 90% rename from tests/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py rename to tests/checks/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py index 153c248..a159de8 100644 --- a/tests/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py +++ b/tests/checks/isis_redis_metric_mpod_msite_check/test_isis_redis_metric_mpod_msite_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "isis_redis_metric_mpod_msite_check" # icurl queries isisDomPs = "uni/fabric/isisDomP-default.json" @@ -55,6 +56,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.isis_redis_metric_mpod_msite_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/l3out_mtu_check/l2pol-default.json b/tests/checks/l3out_mtu_check/l2pol-default.json similarity index 100% rename from tests/l3out_mtu_check/l2pol-default.json rename to tests/checks/l3out_mtu_check/l2pol-default.json diff --git a/tests/l3out_mtu_check/l3extRsPathL3OutAtt.json b/tests/checks/l3out_mtu_check/l3extRsPathL3OutAtt.json similarity index 100% rename from tests/l3out_mtu_check/l3extRsPathL3OutAtt.json rename to tests/checks/l3out_mtu_check/l3extRsPathL3OutAtt.json diff --git a/tests/l3out_mtu_check/l3extVirtualLIfP.json b/tests/checks/l3out_mtu_check/l3extVirtualLIfP.json similarity index 100% rename from tests/l3out_mtu_check/l3extVirtualLIfP.json rename to tests/checks/l3out_mtu_check/l3extVirtualLIfP.json diff --git a/tests/l3out_mtu_check/l3extVirtualLIfP_unresolved.json b/tests/checks/l3out_mtu_check/l3extVirtualLIfP_unresolved.json similarity index 100% rename from tests/l3out_mtu_check/l3extVirtualLIfP_unresolved.json rename to tests/checks/l3out_mtu_check/l3extVirtualLIfP_unresolved.json diff --git a/tests/l3out_mtu_check/test_l3out_mtu_check.py b/tests/checks/l3out_mtu_check/test_l3out_mtu_check.py similarity index 92% rename from tests/l3out_mtu_check/test_l3out_mtu_check.py rename to tests/checks/l3out_mtu_check/test_l3out_mtu_check.py index ee74eb3..50986ac 100644 --- a/tests/l3out_mtu_check/test_l3out_mtu_check.py +++ b/tests/checks/l3out_mtu_check/test_l3out_mtu_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "l3out_mtu_check" # icurl queries regular_api = "l3extRsPathL3OutAtt.json" @@ -47,6 +48,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.l3out_mtu_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/l3out_overlapping_loopback_check/diff_l3out_loopback.json b/tests/checks/l3out_overlapping_loopback_check/diff_l3out_loopback.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/diff_l3out_loopback.json rename to tests/checks/l3out_overlapping_loopback_check/diff_l3out_loopback.json diff --git a/tests/l3out_overlapping_loopback_check/diff_l3out_loopback_and_rtrId.json b/tests/checks/l3out_overlapping_loopback_check/diff_l3out_loopback_and_rtrId.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/diff_l3out_loopback_and_rtrId.json rename to tests/checks/l3out_overlapping_loopback_check/diff_l3out_loopback_and_rtrId.json diff --git a/tests/l3out_overlapping_loopback_check/diff_l3out_rtrId.json b/tests/checks/l3out_overlapping_loopback_check/diff_l3out_rtrId.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/diff_l3out_rtrId.json rename to tests/checks/l3out_overlapping_loopback_check/diff_l3out_rtrId.json diff --git a/tests/l3out_overlapping_loopback_check/no_overlap.json b/tests/checks/l3out_overlapping_loopback_check/no_overlap.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/no_overlap.json rename to tests/checks/l3out_overlapping_loopback_check/no_overlap.json diff --git a/tests/l3out_overlapping_loopback_check/overlap_on_diff_nodes.json b/tests/checks/l3out_overlapping_loopback_check/overlap_on_diff_nodes.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/overlap_on_diff_nodes.json rename to tests/checks/l3out_overlapping_loopback_check/overlap_on_diff_nodes.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_loopback.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_loopback.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_loopback_and_rtrId.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback_and_rtrId.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_loopback_and_rtrId.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback_and_rtrId.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_loopback_with_subnet_mask.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback_with_subnet_mask.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_loopback_with_subnet_mask.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_loopback_with_subnet_mask.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_rtrId.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_rtrId.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_rtrId.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_rtrId.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_rtrId_non_vpc.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_rtrId_non_vpc.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_rtrId_non_vpc.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_rtrId_non_vpc.json diff --git a/tests/l3out_overlapping_loopback_check/same_l3out_two_loopbacks.json b/tests/checks/l3out_overlapping_loopback_check/same_l3out_two_loopbacks.json similarity index 100% rename from tests/l3out_overlapping_loopback_check/same_l3out_two_loopbacks.json rename to tests/checks/l3out_overlapping_loopback_check/same_l3out_two_loopbacks.json diff --git a/tests/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py b/tests/checks/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py similarity index 92% rename from tests/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py rename to tests/checks/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py index b91cfff..0235de8 100644 --- a/tests/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py +++ b/tests/checks/l3out_overlapping_loopback_check/test_l3out_overlapping_loopback_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "l3out_overlapping_loopback_check" # icurl queries api = 'l3extOut.json' @@ -43,6 +44,6 @@ ({api: read_data(dir, "diff_l3out_loopback_and_rtrId.json")}, script.FAIL_O), ], ) -def test_logic(mock_icurl, expected_result): - result = script.l3out_overlapping_loopback_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/l3out_route_map_missing_target_check/rtctrlProfile_missing_target.json b/tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_missing_target.json similarity index 100% rename from tests/l3out_route_map_missing_target_check/rtctrlProfile_missing_target.json rename to tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_missing_target.json diff --git a/tests/l3out_route_map_missing_target_check/rtctrlProfile_multiple_l3out_multiple_missing_target.json b/tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_multiple_l3out_multiple_missing_target.json similarity index 100% rename from tests/l3out_route_map_missing_target_check/rtctrlProfile_multiple_l3out_multiple_missing_target.json rename to tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_multiple_l3out_multiple_missing_target.json diff --git a/tests/l3out_route_map_missing_target_check/rtctrlProfile_multiple_missing_target.json b/tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_multiple_missing_target.json similarity index 100% rename from tests/l3out_route_map_missing_target_check/rtctrlProfile_multiple_missing_target.json rename to tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_multiple_missing_target.json diff --git a/tests/l3out_route_map_missing_target_check/rtctrlProfile_no_missing_target.json b/tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_no_missing_target.json similarity index 100% rename from tests/l3out_route_map_missing_target_check/rtctrlProfile_no_missing_target.json rename to tests/checks/l3out_route_map_missing_target_check/rtctrlProfile_no_missing_target.json diff --git a/tests/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py b/tests/checks/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py similarity index 86% rename from tests/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py rename to tests/checks/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py index 045d9aa..8abce06 100644 --- a/tests/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py +++ b/tests/checks/l3out_route_map_missing_target_check/test_l3out_route_map_missing_target_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "l3out_route_map_missing_target_check" # icurl queries profiles = 'rtctrlProfile.json' @@ -63,6 +64,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.l3out_route_map_missing_target_check(1, 1, script.AciVersion(cversion), script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion), + ) + assert result.result == expected_result diff --git a/tests/lldp_custom_int_description_defect_check/fvRsDomAtt_neg.json b/tests/checks/lldp_custom_int_description_defect_check/fvRsDomAtt_neg.json similarity index 100% rename from tests/lldp_custom_int_description_defect_check/fvRsDomAtt_neg.json rename to tests/checks/lldp_custom_int_description_defect_check/fvRsDomAtt_neg.json diff --git a/tests/lldp_custom_int_description_defect_check/fvRsDomAtt_pos.json b/tests/checks/lldp_custom_int_description_defect_check/fvRsDomAtt_pos.json similarity index 100% rename from tests/lldp_custom_int_description_defect_check/fvRsDomAtt_pos.json rename to tests/checks/lldp_custom_int_description_defect_check/fvRsDomAtt_pos.json diff --git a/tests/lldp_custom_int_description_defect_check/infraPortBlk_neg.json b/tests/checks/lldp_custom_int_description_defect_check/infraPortBlk_neg.json similarity index 100% rename from tests/lldp_custom_int_description_defect_check/infraPortBlk_neg.json rename to tests/checks/lldp_custom_int_description_defect_check/infraPortBlk_neg.json diff --git a/tests/lldp_custom_int_description_defect_check/infraPortBlk_pos.json b/tests/checks/lldp_custom_int_description_defect_check/infraPortBlk_pos.json similarity index 100% rename from tests/lldp_custom_int_description_defect_check/infraPortBlk_pos.json rename to tests/checks/lldp_custom_int_description_defect_check/infraPortBlk_pos.json diff --git a/tests/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py b/tests/checks/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py similarity index 91% rename from tests/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py rename to tests/checks/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py index 20f86ac..72a4fb8 100644 --- a/tests/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py +++ b/tests/checks/lldp_custom_int_description_defect_check/test_lldp_custom_int_description_defect_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "lldp_custom_int_description_defect_check" # icurl queries infraPortBlks = 'infraPortBlk.json?query-target-filter=ne(infraPortBlk.descr,"")&rsp-subtree-include=count' @@ -84,6 +85,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.lldp_custom_int_description_defect_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/llfc_susceptibility_check/ethpmFcot.json b/tests/checks/llfc_susceptibility_check/ethpmFcot.json similarity index 100% rename from tests/llfc_susceptibility_check/ethpmFcot.json rename to tests/checks/llfc_susceptibility_check/ethpmFcot.json diff --git a/tests/llfc_susceptibility_check/test_llfc_susceptibility_check.py b/tests/checks/llfc_susceptibility_check/test_llfc_susceptibility_check.py similarity index 86% rename from tests/llfc_susceptibility_check/test_llfc_susceptibility_check.py rename to tests/checks/llfc_susceptibility_check/test_llfc_susceptibility_check.py index c86b745..eb90593 100644 --- a/tests/llfc_susceptibility_check/test_llfc_susceptibility_check.py +++ b/tests/checks/llfc_susceptibility_check/test_llfc_susceptibility_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "llfc_susceptibility_check" # icurl queries ethpmFcots = 'ethpmFcot.json?query-target-filter=and(eq(ethpmFcot.type,"sfp"),eq(ethpmFcot.state,"inserted"))' @@ -75,12 +76,10 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, vpc_node_ids, expected_result): - result = script.llfc_susceptibility_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, vpc_node_ids, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, vpc_node_ids=vpc_node_ids, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/checks/mini_aci_6_0_2_check/fabricNode_all_phys_apic.json b/tests/checks/mini_aci_6_0_2_check/fabricNode_all_phys_apic.json new file mode 100644 index 0000000..f276f09 --- /dev/null +++ b/tests/checks/mini_aci_6_0_2_check/fabricNode_all_phys_apic.json @@ -0,0 +1,92 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/mini_aci_6_0_2_check/fabricNode_mini_aci.json b/tests/checks/mini_aci_6_0_2_check/fabricNode_mini_aci.json new file mode 100644 index 0000000..de57905 --- /dev/null +++ b/tests/checks/mini_aci_6_0_2_check/fabricNode_mini_aci.json @@ -0,0 +1,92 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "virtual", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "virtual", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/mini_aci_6_0_2_check/test_mini_aci_6_0_2_check.py b/tests/checks/mini_aci_6_0_2_check/test_mini_aci_6_0_2_check.py new file mode 100644 index 0000000..fe5bb27 --- /dev/null +++ b/tests/checks/mini_aci_6_0_2_check/test_mini_aci_6_0_2_check.py @@ -0,0 +1,75 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "mini_aci_6_0_2_check" + + +@pytest.mark.parametrize( + "cversion, tversion, fabric_nodes, expected_result, expected_data", + [ + # tversion missing + ( + "5.2(3a)", + None, + read_data(dir, "fabricNode_mini_aci.json"), + script.MANUAL, + [], + ), + # Version Not Affected (not crossing 6.0.2) + ( + "3.2(1a)", + "5.2(6a)", + read_data(dir, "fabricNode_mini_aci.json"), + script.NA, + [], + ), + # Version Not Affected (not crossing 6.0.2) + ( + "6.0(2e)", + "6.0(5d)", + read_data(dir, "fabricNode_mini_aci.json"), + script.NA, + [], + ), + # Version Affected, Not mini ACI + ( + "5.2(3a)", + "6.0(3d)", + read_data(dir, "fabricNode_all_phys_apic.json"), + script.PASS, + [], + ), + # Version Affected, mini ACI + ( + "4.2(2a)", + "6.0(2c)", + read_data(dir, "fabricNode_mini_aci.json"), + script.FAIL_UF, + [["2", "apic2", "virtual"], ["3", "apic3", "virtual"]], + ), + # Version Affected, mini ACI + ( + "6.0(1a)", + "6.0(2c)", + read_data(dir, "fabricNode_mini_aci.json"), + script.FAIL_UF, + [["2", "apic2", "virtual"], ["3", "apic3", "virtual"]], + ), + ], +) +def test_logic(run_check, cversion, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/mini_aci_6_0_2/topSystem_controller_neg.json b/tests/checks/mini_aci_6_0_2_check/topSystem_controller_neg.json similarity index 100% rename from tests/mini_aci_6_0_2/topSystem_controller_neg.json rename to tests/checks/mini_aci_6_0_2_check/topSystem_controller_neg.json diff --git a/tests/mini_aci_6_0_2/topSystem_controller_pos.json b/tests/checks/mini_aci_6_0_2_check/topSystem_controller_pos.json similarity index 100% rename from tests/mini_aci_6_0_2/topSystem_controller_pos.json rename to tests/checks/mini_aci_6_0_2_check/topSystem_controller_pos.json diff --git a/tests/n9408_model_check/eqptCh_NEG.json b/tests/checks/n9408_model_check/eqptCh_NEG.json similarity index 100% rename from tests/n9408_model_check/eqptCh_NEG.json rename to tests/checks/n9408_model_check/eqptCh_NEG.json diff --git a/tests/n9408_model_check/eqptCh_POS.json b/tests/checks/n9408_model_check/eqptCh_POS.json similarity index 100% rename from tests/n9408_model_check/eqptCh_POS.json rename to tests/checks/n9408_model_check/eqptCh_POS.json diff --git a/tests/n9408_model_check/test_n9408_model_check.py b/tests/checks/n9408_model_check/test_n9408_model_check.py similarity index 82% rename from tests/n9408_model_check/test_n9408_model_check.py rename to tests/checks/n9408_model_check/test_n9408_model_check.py index 86d01b4..3076de1 100644 --- a/tests/n9408_model_check/test_n9408_model_check.py +++ b/tests/checks/n9408_model_check/test_n9408_model_check.py @@ -9,12 +9,14 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "n9408_model_check" # icurl queries eqptCh_api = 'eqptCh.json' eqptCh_api += '?query-target-filter=eq(eqptCh.model,"N9K-C9400-SW-GX2A")' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -38,6 +40,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.n9408_model_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json new file mode 100644 index 0000000..d974762 --- /dev/null +++ b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json @@ -0,0 +1,77 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.113", + "dn": "topology/pod-2/node-113", + "fabricSt": "active", + "id": "113", + "model": "N9K-C93108TC-FX3H", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf113", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.114", + "dn": "topology/pod-2/node-114", + "fabricSt": "active", + "id": "114", + "model": "N9K-C93108TC-FX3H", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf114", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json new file mode 100644 index 0000000..e31c409 --- /dev/null +++ b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json @@ -0,0 +1,77 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.113", + "dn": "topology/pod-2/node-113", + "fabricSt": "active", + "id": "113", + "model": "N9K-C93108TC-FX3P", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf113", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.114", + "dn": "topology/pod-2/node-114", + "fabricSt": "active", + "id": "114", + "model": "N9K-C93108TC-FX3P", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf114", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json new file mode 100644 index 0000000..05a5ffa --- /dev/null +++ b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json @@ -0,0 +1,77 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.113", + "dn": "topology/pod-2/node-113", + "fabricSt": "active", + "id": "113", + "model": "N9K-C93108TC-FX3P", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf113", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.114", + "dn": "topology/pod-2/node-114", + "fabricSt": "active", + "id": "114", + "model": "N9K-C93108TC-FX3H", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf114", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_no_FX3P3H.json b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_no_FX3P3H.json new file mode 100644 index 0000000..2d42772 --- /dev/null +++ b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/fabricNode_no_FX3P3H.json @@ -0,0 +1,47 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-1/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine201", + "nodeType": "unspecified", + "role": "spine" + } + } + } +] diff --git a/tests/checks/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py new file mode 100644 index 0000000..e23529c --- /dev/null +++ b/tests/checks/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py @@ -0,0 +1,93 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "n9k_c93108tc_fx3p_interface_down_check" + + +@pytest.mark.parametrize( + "tversion, fabric_nodes, expected_result, expected_data", + [ + # Version not supplied + (None, read_data(dir, "fabricNode_FX3P3H.json"), script.MANUAL, []), + # Version not affected + ("5.2(8h)", read_data(dir, "fabricNode_FX3P3H.json"), script.PASS, []), + ("5.3(2b)", read_data(dir, "fabricNode_FX3P3H.json"), script.PASS, []), + ("6.0(4c)", read_data(dir, "fabricNode_FX3P3H.json"), script.PASS, []), + # Affected version, no FX3P or FX3H + ("5.2(8g)", read_data(dir, "fabricNode_no_FX3P3H.json"), script.PASS, []), + ("5.3(1d)", read_data(dir, "fabricNode_no_FX3P3H.json"), script.PASS, []), + ("6.0(2h)", read_data(dir, "fabricNode_no_FX3P3H.json"), script.PASS, []), + # Affected version, FX3P + ( + "5.2(8g)", + read_data(dir, "fabricNode_FX3P.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3P"]], + ), + ( + "5.3(1d)", + read_data(dir, "fabricNode_FX3P.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3P"]], + ), + ( + "6.0(2h)", + read_data(dir, "fabricNode_FX3P.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3P"]], + ), + # Affected version, FX3H + ( + "5.2(8g)", + read_data(dir, "fabricNode_FX3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3H"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + ( + "5.3(1d)", + read_data(dir, "fabricNode_FX3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3H"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + ( + "6.0(2h)", + read_data(dir, "fabricNode_FX3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3H"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + # Affected version, FX3P and FX3H + ( + "5.2(8g)", + read_data(dir, "fabricNode_FX3P3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + ( + "5.3(1d)", + read_data(dir, "fabricNode_FX3P3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + ( + "6.0(2h)", + read_data(dir, "fabricNode_FX3P3H.json"), + script.FAIL_O, + [["113", "leaf113", "N9K-C93108TC-FX3P"], ["114", "leaf114", "N9K-C93108TC-FX3H"]], + ), + ], +) +def test_logic(run_check, tversion, fabric_nodes, expected_result, expected_data): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/ntp_status_check/NEG_datetimeClkPol.json b/tests/checks/ntp_status_check/NEG_datetimeClkPol.json new file mode 100644 index 0000000..437efa3 --- /dev/null +++ b/tests/checks/ntp_status_check/NEG_datetimeClkPol.json @@ -0,0 +1,46 @@ +{ + "imdata": [ + { + "datetimeClkPol": { + "attributes": { + "StratumValue": "8", + "adminSt": "enabled", + "authSt": "disabled", + "childAction": "", + "clock": "2022-12-07T18:22:33.715-05:00", + "clockRaw": "13206872637755334329", + "descr": "", + "dn": "topology/pod-1/node-201/sys/time", + "flags": "synced", + "lcOwn": "local", + "leap": "0", + "masterMode": "disabled", + "modTs": "2022-01-02T22:36:04.148-05:00", + "monPolDn": "uni/fabric/monfab-Spine-Mon", + "name": "default", + "nameAlias": "", + "ntpdCfgFailedBmp": "", + "ntpdCfgFailedTs": "00:00:00:00.000", + "ntpdCfgState": "0", + "ownerKey": "", + "ownerTag": "", + "peer": "258740908", + "polDn": "uni/fabric/time-default", + "poll": "6", + "precision": "-20", + "refId": "172.18.108.15", + "refName": "172.18.108.15", + "refTime": "2022-12-07T18:21:19.436-05:00", + "refTimeRaw": "8059221958312304239", + "rootDelay": "49", + "rootDispersion": "2315255808", + "serverState": "disabled", + "srvStatus": "synced_remote_server", + "status": "", + "stratum": "2" + } + } + } + ], + "totalCount": "1" +} diff --git a/tests/checks/ntp_status_check/NEG_datetimeNtpq.json b/tests/checks/ntp_status_check/NEG_datetimeNtpq.json new file mode 100644 index 0000000..0b676fe --- /dev/null +++ b/tests/checks/ntp_status_check/NEG_datetimeNtpq.json @@ -0,0 +1,29 @@ +{ + "imdata": [ + { + "datetimeNtpq": { + "attributes": { + "auth": "none", + "childAction": "", + "delay": "0.875", + "dn": "topology/pod-1/node-1/sys/ntpq-calo-timeserver-1.cisco.com", + "jitter": "0.027", + "lcOwn": "local", + "modTs": "2022-12-07T18:20:27.571-05:00", + "monPolDn": "uni/fabric/monfab-default", + "offset": "0.004", + "poll": "64", + "reach": "377", + "refid": ".GPS.", + "remote": "calo-timeserver-1.cisco.com", + "status": "", + "stratum": "1", + "t": "u", + "tally": "*", + "when": "16" + } + } + } + ], + "totalCount": "1" +} diff --git a/tests/checks/ntp_status_check/POS_datetimeClkPol.json b/tests/checks/ntp_status_check/POS_datetimeClkPol.json new file mode 100644 index 0000000..5c83dbb --- /dev/null +++ b/tests/checks/ntp_status_check/POS_datetimeClkPol.json @@ -0,0 +1,46 @@ +{ + "imdata": [ + { + "datetimeClkPol": { + "attributes": { + "StratumValue": "8", + "adminSt": "enabled", + "authSt": "disabled", + "childAction": "", + "clock": "2022-12-07T18:22:33.715-05:00", + "clockRaw": "13206872637755334329", + "descr": "", + "dn": "topology/pod-1/node-201/sys/time", + "flags": "synced", + "lcOwn": "local", + "leap": "0", + "masterMode": "disabled", + "modTs": "2022-01-02T22:36:04.148-05:00", + "monPolDn": "uni/fabric/monfab-Spine-Mon", + "name": "default", + "nameAlias": "", + "ntpdCfgFailedBmp": "", + "ntpdCfgFailedTs": "00:00:00:00.000", + "ntpdCfgState": "0", + "ownerKey": "", + "ownerTag": "", + "peer": "258740908", + "polDn": "uni/fabric/time-default", + "poll": "6", + "precision": "-20", + "refId": "172.18.108.15", + "refName": "172.18.108.15", + "refTime": "2022-12-07T18:21:19.436-05:00", + "refTimeRaw": "8059221958312304239", + "rootDelay": "49", + "rootDispersion": "2315255808", + "serverState": "disabled", + "srvStatus": "", + "status": "", + "stratum": "2" + } + } + } + ], + "totalCount": "1" +} diff --git a/tests/checks/ntp_status_check/POS_datetimeNtpq.json b/tests/checks/ntp_status_check/POS_datetimeNtpq.json new file mode 100644 index 0000000..698e516 --- /dev/null +++ b/tests/checks/ntp_status_check/POS_datetimeNtpq.json @@ -0,0 +1,29 @@ +{ + "imdata": [ + { + "datetimeNtpq": { + "attributes": { + "auth": "none", + "childAction": "", + "delay": "0.875", + "dn": "topology/pod-1/node-1/sys/ntpq-calo-timeserver-1.cisco.com", + "jitter": "0.027", + "lcOwn": "local", + "modTs": "2022-12-07T18:20:27.571-05:00", + "monPolDn": "uni/fabric/monfab-default", + "offset": "0.004", + "poll": "64", + "reach": "377", + "refid": ".GPS.", + "remote": "calo-timeserver-1.cisco.com", + "status": "", + "stratum": "1", + "t": "u", + "tally": "", + "when": "16" + } + } + } + ], + "totalCount": "1" +} diff --git a/tests/checks/ntp_status_check/fabricNode.json b/tests/checks/ntp_status_check/fabricNode.json new file mode 100644 index 0000000..ce488e6 --- /dev/null +++ b/tests/checks/ntp_status_check/fabricNode.json @@ -0,0 +1,64 @@ +[ + { + "fabricNode": { + "attributes": { + "adSt": "on", + "address": "10.0.0.1", + "annotation": "", + "apicType": "apic", + "childAction": "", + "delayedHeartbeat": "no", + "dn": "topology/pod-1/node-1", + "extMngdBy": "", + "fabricSt": "unknown", + "id": "1", + "lastStateModTs": "2022-11-14T13:36:15.401-05:00", + "lcOwn": "local", + "modTs": "2022-11-14T13:36:16.119-05:00", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "fab3-apic1", + "nameAlias": "", + "nodeType": "unspecified", + "role": "controller", + "serial": "FOX1234ABCE", + "status": "", + "uid": "0", + "userdom": "all", + "vendor": "Cisco Systems, Inc", + "version": "5.2(7f)" + } + } + }, + { + "fabricNode": { + "attributes": { + "adSt": "on", + "address": "10.0.128.65", + "annotation": "", + "apicType": "apic", + "childAction": "", + "delayedHeartbeat": "no", + "dn": "topology/pod-1/node-201", + "extMngdBy": "", + "fabricSt": "active", + "id": "201", + "lastStateModTs": "2022-03-30T11:34:22.529-05:00", + "lcOwn": "local", + "modTs": "2022-03-30T11:34:34.616-05:00", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "fab3-spine201", + "nameAlias": "", + "nodeType": "unspecified", + "role": "spine", + "serial": "FOX1234ABCD", + "status": "", + "uid": "0", + "userdom": "all", + "vendor": "Cisco Systems, Inc", + "version": "n9000-14.2(6d)" + } + } + } +] diff --git a/tests/checks/ntp_status_check/test_ntp_status_check.py b/tests/checks/ntp_status_check/test_ntp_status_check.py new file mode 100644 index 0000000..3fff822 --- /dev/null +++ b/tests/checks/ntp_status_check/test_ntp_status_check.py @@ -0,0 +1,65 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "ntp_status_check" + +# icurl queries +apic_ntp = "datetimeNtpq.json" +switch_ntp = "datetimeClkPol.json" + + +@pytest.mark.parametrize( + "icurl_outputs, expected_result, expected_data", + [ + # FAIL - APIC is not Synced + ( + { + apic_ntp: read_data(dir, "POS_datetimeNtpq.json"), + switch_ntp: read_data(dir, "NEG_datetimeClkPol.json"), + }, + script.FAIL_UF, + [["1", "1"]], + ), + # FAIL - Switch is not Synced + ( + { + apic_ntp: read_data(dir, "NEG_datetimeNtpq.json"), + switch_ntp: read_data(dir, "POS_datetimeClkPol.json"), + }, + script.FAIL_UF, + [["1", "201"]], + ), + # FAIL - APIC and Switch are not Synced + ( + { + apic_ntp: read_data(dir, "POS_datetimeNtpq.json"), + switch_ntp: read_data(dir, "POS_datetimeClkPol.json"), + }, + script.FAIL_UF, + [["1", "1"], ["1", "201"]], + ), + # PASS - Both are synced + ( + { + apic_ntp: read_data(dir, "NEG_datetimeNtpq.json"), + switch_ntp: read_data(dir, "NEG_datetimeClkPol.json"), + }, + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, expected_result, expected_data): + result = run_check( + fabric_nodes=read_data(dir, "fabricNode.json"), + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/observer_db_size_check/fabricNode.json b/tests/checks/observer_db_size_check/fabricNode.json new file mode 100644 index 0000000..21eed6a --- /dev/null +++ b/tests/checks/observer_db_size_check/fabricNode.json @@ -0,0 +1,46 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "id": "1", + "name": "apic1", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "id": "2", + "name": "apic2", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-1/node-3", + "id": "3", + "name": "apic3", + "role": "controller" + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "id": "101", + "name": "leaf1", + "role": "leaf" + } + } + } +] diff --git a/tests/checks/observer_db_size_check/fabricNode_no_apic.json b/tests/checks/observer_db_size_check/fabricNode_no_apic.json new file mode 100644 index 0000000..b82c912 --- /dev/null +++ b/tests/checks/observer_db_size_check/fabricNode_no_apic.json @@ -0,0 +1,13 @@ +[ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "id": "101", + "name": "fab5-leaf1", + "role": "leaf" + } + } + } +] diff --git a/tests/checks/observer_db_size_check/fabricNode_old.json b/tests/checks/observer_db_size_check/fabricNode_old.json new file mode 100644 index 0000000..f71fb9f --- /dev/null +++ b/tests/checks/observer_db_size_check/fabricNode_old.json @@ -0,0 +1,62 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "1", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic1", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "2", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic2", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-3", + "fabricSt": "unknown", + "nodeType": "unspecified", + "id": "3", + "version": "A", + "role": "controller", + "adSt": "on", + "name": "apic3", + "model": "APIC-SERVER-M1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "nodeType": "unspecified", + "id": "101", + "version": "", + "role": "leaf", + "adSt": "on", + "name": "leaf1", + "model": "N9K-C9396PX" + } + } + } +] diff --git a/tests/checks/observer_db_size_check/infraWiNode_apic1.json b/tests/checks/observer_db_size_check/infraWiNode_apic1.json new file mode 100644 index 0000000..b6626d0 --- /dev/null +++ b/tests/checks/observer_db_size_check/infraWiNode_apic1.json @@ -0,0 +1,62 @@ +[ + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.1", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-1", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "1", + "mbSn": "FCH1234ABCD", + "name": "", + "nodeName": "apic1", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.2", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-2", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "2", + "mbSn": "FCH1235ABCD", + "name": "", + "nodeName": "apic2", + "operSt": "available", + "podId": "0", + "targetMbSn": "" + } + } + }, + { + "infraWiNode": { + "attributes": { + "addr": "10.0.0.3", + "adminSt": "in-service", + "apicMode": "active", + "cntrlSbstState": "approved", + "dn": "topology/pod-1/node-1/av/node-3", + "failoverStatus": "idle", + "health": "fully-fit", + "id": "3", + "mbSn": "FCH1236ABCD", + "name": "", + "nodeName": "apic3", + "operSt": "available", + "podId": "1", + "targetMbSn": "" + } + } + } +] diff --git a/tests/checks/observer_db_size_check/test_observer_db_size_check.py b/tests/checks/observer_db_size_check/test_observer_db_size_check.py new file mode 100644 index 0000000..b70f791 --- /dev/null +++ b/tests/checks/observer_db_size_check/test_observer_db_size_check.py @@ -0,0 +1,185 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "observer_db_size_check" + +infraWiNode = "topology/pod-1/node-1/infraWiNode.json" + +fabricNodes = read_data(dir, "fabricNode.json") +apic_ips = [ + mo["fabricNode"]["attributes"]["address"] + for mo in fabricNodes + if mo["fabricNode"]["attributes"]["role"] == "controller" +] + +ls_cmd = "ls -lh /data2/dbstats | awk '{print $5, $9}'" +ls_output_neg = """\ + +11M observer_8.db +11M observer_9.db +11M observer_10.db +11M observer_template.db +apic1# +""" +ls_output_pos = """\ + +1.0G observer_8.db +12G observer_9.db +999M observer_10.db +11M observer_template.db +apic1# +""" +ls_output_no_such_file = """\ +ls: cannot access /data2/dbstats: No such file or directory +apic1# +""" + + +@pytest.mark.parametrize( + "icurl_outputs, fabric_nodes, conn_failure, conn_cmds, expected_result, expected_data", + [ + # Connection failure + ( + {}, + fabricNodes, + True, + [], + script.ERROR, + [ + ["1", "apic1", "-", "Simulated exception at connect()"], + ["2", "apic2", "-", "Simulated exception at connect()"], + ["3", "apic3", "-", "Simulated exception at connect()"], + ], + ), + # Simulatated exception at `ls` command + ( + {}, + fabricNodes, + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "", + "exception": Exception("Simulated exception at `ls` command"), + } + ] + for apic_ip in apic_ips + }, + script.ERROR, + [ + ["1", "apic1", "-", "Simulated exception at `ls` command"], + ["2", "apic2", "-", "Simulated exception at `ls` command"], + ["3", "apic3", "-", "Simulated exception at `ls` command"], + ], + ), + # dbstats dir not found/not accessible + ( + {}, + fabricNodes, + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "\n".join([ls_cmd, ls_output_no_such_file]), + "exception": None, + } + ] + for apic_ip in apic_ips + }, + script.ERROR, + [ + ["1", "apic1", "/data2/dbstats/ not found", "Check user permissions or retry as 'apic#fallback\\\\admin'"], + ["2", "apic2", "/data2/dbstats/ not found", "Check user permissions or retry as 'apic#fallback\\\\admin'"], + ["3", "apic3", "/data2/dbstats/ not found", "Check user permissions or retry as 'apic#fallback\\\\admin'"], + ], + ), + # dbstats dir found, all DBs under 1G + ( + {}, + fabricNodes, + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "\n".join([ls_cmd, ls_output_neg]), + "exception": None, + } + ] + for apic_ip in apic_ips + }, + script.PASS, + [], + ), + # dbstats dir found, all DBs under 1G (pre-4.0 with infraWiNode) + ( + {infraWiNode: read_data(dir, "infraWiNode_apic1.json")}, + read_data(dir, "fabricNode_old.json"), + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "\n".join([ls_cmd, ls_output_neg]), + "exception": None, + } + ] + for apic_ip in apic_ips + }, + script.PASS, + [], + ), + # dbstats dir found, found DBs over 1G + ( + {}, + fabricNodes, + False, + { + apic_ip: [ + { + "cmd": ls_cmd, + "output": "\n".join([ls_cmd, ls_output_pos]), + "exception": None, + } + ] + for apic_ip in apic_ips + }, + script.FAIL_UF, + [ + ["1", "apic1", "/data2/dbstats/observer_8.db", "1.0G"], + ["1", "apic1", "/data2/dbstats/observer_9.db", "12G"], + ["2", "apic2", "/data2/dbstats/observer_8.db", "1.0G"], + ["2", "apic2", "/data2/dbstats/observer_9.db", "12G"], + ["3", "apic3", "/data2/dbstats/observer_8.db", "1.0G"], + ["3", "apic3", "/data2/dbstats/observer_9.db", "12G"], + ], + ), + # ERROR, fabricNode failure + ( + {}, + read_data(dir, "fabricNode_no_apic.json"), + False, + [], + script.ERROR, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, fabric_nodes, mock_conn, expected_result, expected_data): + result = run_check( + username="fake_username", + password="fake_password", + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/oob_mgmt_security_check/mgmtInstP.json b/tests/checks/oob_mgmt_security_check/mgmtInstP.json similarity index 100% rename from tests/oob_mgmt_security_check/mgmtInstP.json rename to tests/checks/oob_mgmt_security_check/mgmtInstP.json diff --git a/tests/oob_mgmt_security_check/mgmtInstP_no_contracts.json b/tests/checks/oob_mgmt_security_check/mgmtInstP_no_contracts.json similarity index 100% rename from tests/oob_mgmt_security_check/mgmtInstP_no_contracts.json rename to tests/checks/oob_mgmt_security_check/mgmtInstP_no_contracts.json diff --git a/tests/oob_mgmt_security_check/mgmtInstP_no_subnets.json b/tests/checks/oob_mgmt_security_check/mgmtInstP_no_subnets.json similarity index 100% rename from tests/oob_mgmt_security_check/mgmtInstP_no_subnets.json rename to tests/checks/oob_mgmt_security_check/mgmtInstP_no_subnets.json diff --git a/tests/oob_mgmt_security_check/mgmtOoB.json b/tests/checks/oob_mgmt_security_check/mgmtOoB.json similarity index 100% rename from tests/oob_mgmt_security_check/mgmtOoB.json rename to tests/checks/oob_mgmt_security_check/mgmtOoB.json diff --git a/tests/oob_mgmt_security_check/mgmtOoB_no_contracts.json b/tests/checks/oob_mgmt_security_check/mgmtOoB_no_contracts.json similarity index 100% rename from tests/oob_mgmt_security_check/mgmtOoB_no_contracts.json rename to tests/checks/oob_mgmt_security_check/mgmtOoB_no_contracts.json diff --git a/tests/oob_mgmt_security_check/test_oob_mgmt_security_check.py b/tests/checks/oob_mgmt_security_check/test_oob_mgmt_security_check.py similarity index 92% rename from tests/oob_mgmt_security_check/test_oob_mgmt_security_check.py rename to tests/checks/oob_mgmt_security_check/test_oob_mgmt_security_check.py index 95b33b1..86e093a 100644 --- a/tests/oob_mgmt_security_check/test_oob_mgmt_security_check.py +++ b/tests/checks/oob_mgmt_security_check/test_oob_mgmt_security_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "oob_mgmt_security_check" # icurl queries mgmtOoB = "mgmtOoB.json?rsp-subtree=children" @@ -129,8 +130,9 @@ ), ], ) -def test_logic(mock_icurl, cver, tver, expected_result): - cversion = script.AciVersion(cver) - tversion = script.AciVersion(tver) if tver else None - result = script.oob_mgmt_security_check(1, 1, cversion, tversion) - assert result == expected_result +def test_logic(run_check, mock_icurl, cver, tver, expected_result): + result = run_check( + cversion=script.AciVersion(cver), + tversion=script.AciVersion(tver) if tver else None, + ) + assert result.result == expected_result diff --git a/tests/out_of_service_ports_check/ethpmPhysIf-neg.json b/tests/checks/out_of_service_ports_check/ethpmPhysIf-neg.json similarity index 100% rename from tests/out_of_service_ports_check/ethpmPhysIf-neg.json rename to tests/checks/out_of_service_ports_check/ethpmPhysIf-neg.json diff --git a/tests/out_of_service_ports_check/ethpmPhysIf-pos.json b/tests/checks/out_of_service_ports_check/ethpmPhysIf-pos.json similarity index 100% rename from tests/out_of_service_ports_check/ethpmPhysIf-pos.json rename to tests/checks/out_of_service_ports_check/ethpmPhysIf-pos.json diff --git a/tests/out_of_service_ports_check/test_out_of_service_ports_check.py b/tests/checks/out_of_service_ports_check/test_out_of_service_ports_check.py similarity index 75% rename from tests/out_of_service_ports_check/test_out_of_service_ports_check.py rename to tests/checks/out_of_service_ports_check/test_out_of_service_ports_check.py index 069677e..4296eee 100644 --- a/tests/out_of_service_ports_check/test_out_of_service_ports_check.py +++ b/tests/checks/out_of_service_ports_check/test_out_of_service_ports_check.py @@ -9,6 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "out_of_service_ports_check" + # operst: '1' = 'up' # usage: '32' = 'blacklist', '2' = 'epg'. '34'= 'blacklist,epg' ethpmPhysIf_api = 'ethpmPhysIf.json' @@ -19,17 +21,17 @@ "icurl_outputs, expected_result", [ ( - ## Two 'up' ports flagged with 'blacklist,epg' + # Two 'up' ports flagged with 'blacklist,epg' {ethpmPhysIf_api: read_data(dir, "ethpmPhysIf-pos.json")}, script.FAIL_O, ), ( - ## 0 ports returned + # 0 ports returned {ethpmPhysIf_api: read_data(dir, "ethpmPhysIf-neg.json")}, script.PASS, ) ], ) -def test_logic(mock_icurl, expected_result): - result = script.out_of_service_ports_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/overlapping_vlan_pools_check/access_policy.json b/tests/checks/overlapping_vlan_pools_check/access_policy.json similarity index 100% rename from tests/overlapping_vlan_pools_check/access_policy.json rename to tests/checks/overlapping_vlan_pools_check/access_policy.json diff --git a/tests/overlapping_vlan_pools_check/fvAEPg.json b/tests/checks/overlapping_vlan_pools_check/fvAEPg.json similarity index 100% rename from tests/overlapping_vlan_pools_check/fvAEPg.json rename to tests/checks/overlapping_vlan_pools_check/fvAEPg.json diff --git a/tests/overlapping_vlan_pools_check/fvIfConn.json b/tests/checks/overlapping_vlan_pools_check/fvIfConn.json similarity index 100% rename from tests/overlapping_vlan_pools_check/fvIfConn.json rename to tests/checks/overlapping_vlan_pools_check/fvIfConn.json diff --git a/tests/overlapping_vlan_pools_check/infraSetPol_no.json b/tests/checks/overlapping_vlan_pools_check/infraSetPol_no.json similarity index 100% rename from tests/overlapping_vlan_pools_check/infraSetPol_no.json rename to tests/checks/overlapping_vlan_pools_check/infraSetPol_no.json diff --git a/tests/overlapping_vlan_pools_check/infraSetPol_yes.json b/tests/checks/overlapping_vlan_pools_check/infraSetPol_yes.json similarity index 100% rename from tests/overlapping_vlan_pools_check/infraSetPol_yes.json rename to tests/checks/overlapping_vlan_pools_check/infraSetPol_yes.json diff --git a/tests/overlapping_vlan_pools_check/templates/access_policy.j2 b/tests/checks/overlapping_vlan_pools_check/templates/access_policy.j2 similarity index 100% rename from tests/overlapping_vlan_pools_check/templates/access_policy.j2 rename to tests/checks/overlapping_vlan_pools_check/templates/access_policy.j2 diff --git a/tests/overlapping_vlan_pools_check/templates/fvAEPg.j2 b/tests/checks/overlapping_vlan_pools_check/templates/fvAEPg.j2 similarity index 100% rename from tests/overlapping_vlan_pools_check/templates/fvAEPg.j2 rename to tests/checks/overlapping_vlan_pools_check/templates/fvAEPg.j2 diff --git a/tests/overlapping_vlan_pools_check/templates/fvIfConn.j2 b/tests/checks/overlapping_vlan_pools_check/templates/fvIfConn.j2 similarity index 100% rename from tests/overlapping_vlan_pools_check/templates/fvIfConn.j2 rename to tests/checks/overlapping_vlan_pools_check/templates/fvIfConn.j2 diff --git a/tests/overlapping_vlan_pools_check/templates/macros.j2 b/tests/checks/overlapping_vlan_pools_check/templates/macros.j2 similarity index 100% rename from tests/overlapping_vlan_pools_check/templates/macros.j2 rename to tests/checks/overlapping_vlan_pools_check/templates/macros.j2 diff --git a/tests/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py b/tests/checks/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py similarity index 99% rename from tests/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py rename to tests/checks/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py index cf6f6ee..24a31f0 100644 --- a/tests/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py +++ b/tests/checks/overlapping_vlan_pools_check/test_overlapping_vlan_pools_check.py @@ -11,6 +11,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "overlapping_vlan_pools_check" + j2_env = Environment(loader=FileSystemLoader("/".join([dir, "templates"]))) tmpl = { "infra": j2_env.get_template("access_policy.j2"), @@ -2384,15 +2386,7 @@ def input(param): + [input(param) for param in params], ids=["validation_yes", "validation_no"] + [param["id"] for param in params], ) -def test_logic(capsys, mock_icurl, expected_result, expected_num_bad_ports): - result = script.overlapping_vlan_pools_check(1, 1) - assert result == expected_result - - captured = capsys.readouterr() - log.debug(captured.out) - lines = [ - x - for x in captured.out.split("\n") - if x.endswith("Outage") or x.endswith("Flood Scope") - ] - assert len(lines) == expected_num_bad_ports +def test_logic(run_check, mock_icurl, expected_result, expected_num_bad_ports): + result = run_check() + assert result.result == expected_result + assert len(result.data) == expected_num_bad_ports diff --git a/tests/pbr_high_scale_check/test_pbr_high_scale_check.py b/tests/checks/pbr_high_scale_check/test_pbr_high_scale_check.py similarity index 90% rename from tests/pbr_high_scale_check/test_pbr_high_scale_check.py rename to tests/checks/pbr_high_scale_check/test_pbr_high_scale_check.py index 5a498f4..9516bea 100644 --- a/tests/pbr_high_scale_check/test_pbr_high_scale_check.py +++ b/tests/checks/pbr_high_scale_check/test_pbr_high_scale_check.py @@ -9,12 +9,14 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "pbr_high_scale_check" # icurl queries vnsAdjacencyDefCont_api = 'vnsAdjacencyDefCont.json' vnsSvcRedirEcmpBucketCons_api = 'vnsSvcRedirEcmpBucketCons.json' count_filter = '?rsp-subtree-include=count' + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -65,10 +67,8 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.pbr_high_scale_check( - 1, - 1, - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/pbr_high_scale_check/vnsAdjacencyDefCont_HIGH.json b/tests/checks/pbr_high_scale_check/vnsAdjacencyDefCont_HIGH.json similarity index 100% rename from tests/pbr_high_scale_check/vnsAdjacencyDefCont_HIGH.json rename to tests/checks/pbr_high_scale_check/vnsAdjacencyDefCont_HIGH.json diff --git a/tests/pbr_high_scale_check/vnsAdjacencyDefCont_LOW.json b/tests/checks/pbr_high_scale_check/vnsAdjacencyDefCont_LOW.json similarity index 100% rename from tests/pbr_high_scale_check/vnsAdjacencyDefCont_LOW.json rename to tests/checks/pbr_high_scale_check/vnsAdjacencyDefCont_LOW.json diff --git a/tests/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_HIGH.json b/tests/checks/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_HIGH.json similarity index 100% rename from tests/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_HIGH.json rename to tests/checks/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_HIGH.json diff --git a/tests/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_LOW.json b/tests/checks/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_LOW.json similarity index 100% rename from tests/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_LOW.json rename to tests/checks/pbr_high_scale_check/vnsSvcRedirEcmpBucketCons_LOW.json diff --git a/tests/post_upgrade_cb_check/moCount_0.json b/tests/checks/post_upgrade_cb_check/moCount_0.json similarity index 100% rename from tests/post_upgrade_cb_check/moCount_0.json rename to tests/checks/post_upgrade_cb_check/moCount_0.json diff --git a/tests/post_upgrade_cb_check/moCount_10.json b/tests/checks/post_upgrade_cb_check/moCount_10.json similarity index 100% rename from tests/post_upgrade_cb_check/moCount_10.json rename to tests/checks/post_upgrade_cb_check/moCount_10.json diff --git a/tests/post_upgrade_cb_check/moCount_8.json b/tests/checks/post_upgrade_cb_check/moCount_8.json similarity index 100% rename from tests/post_upgrade_cb_check/moCount_8.json rename to tests/checks/post_upgrade_cb_check/moCount_8.json diff --git a/tests/post_upgrade_cb_check/test_post_upgrade_cb_check.py b/tests/checks/post_upgrade_cb_check/test_post_upgrade_cb_check.py similarity index 92% rename from tests/post_upgrade_cb_check/test_post_upgrade_cb_check.py rename to tests/checks/post_upgrade_cb_check/test_post_upgrade_cb_check.py index c964ad9..fb14efc 100644 --- a/tests/post_upgrade_cb_check/test_post_upgrade_cb_check.py +++ b/tests/checks/post_upgrade_cb_check/test_post_upgrade_cb_check.py @@ -9,6 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "post_upgrade_cb_check" + # icurl queries mo1_new = "infraRsToImplicitSetPol.json?rsp-subtree-include=count" @@ -29,7 +31,6 @@ mo6_new = 'compatSwitchHw.json?rsp-subtree-include=count&query-target-filter=eq(compatSwitchHw.suppBit,"32")' - # icurl output sets mo_count_pass = { mo1_new: read_data(dir, "moCount_10.json"), @@ -96,11 +97,9 @@ (mo_count_fail, "6.0(3e)", "6.0(3e)", script.FAIL_O), ] ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.post_upgrade_cb_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_new.json b/tests/checks/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_new.json similarity index 100% rename from tests/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_new.json rename to tests/checks/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_new.json diff --git a/tests/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_old.json b/tests/checks/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_old.json similarity index 100% rename from tests/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_old.json rename to tests/checks/prefix_already_in_use_check/faultInst_F0467_prefix-entry-already-in-use_old.json diff --git a/tests/prefix_already_in_use_check/fvCtx.json b/tests/checks/prefix_already_in_use_check/fvCtx.json similarity index 100% rename from tests/prefix_already_in_use_check/fvCtx.json rename to tests/checks/prefix_already_in_use_check/fvCtx.json diff --git a/tests/prefix_already_in_use_check/l3extRsEctx.json b/tests/checks/prefix_already_in_use_check/l3extRsEctx.json similarity index 100% rename from tests/prefix_already_in_use_check/l3extRsEctx.json rename to tests/checks/prefix_already_in_use_check/l3extRsEctx.json diff --git a/tests/prefix_already_in_use_check/l3extSubnet_no_overlap.json b/tests/checks/prefix_already_in_use_check/l3extSubnet_no_overlap.json similarity index 100% rename from tests/prefix_already_in_use_check/l3extSubnet_no_overlap.json rename to tests/checks/prefix_already_in_use_check/l3extSubnet_no_overlap.json diff --git a/tests/prefix_already_in_use_check/l3extSubnet_overlap.json b/tests/checks/prefix_already_in_use_check/l3extSubnet_overlap.json similarity index 100% rename from tests/prefix_already_in_use_check/l3extSubnet_overlap.json rename to tests/checks/prefix_already_in_use_check/l3extSubnet_overlap.json diff --git a/tests/prefix_already_in_use_check/test_prefix_already_in_use_check.py b/tests/checks/prefix_already_in_use_check/test_prefix_already_in_use_check.py similarity index 91% rename from tests/prefix_already_in_use_check/test_prefix_already_in_use_check.py rename to tests/checks/prefix_already_in_use_check/test_prefix_already_in_use_check.py index 8871eb7..10fe53b 100644 --- a/tests/prefix_already_in_use_check/test_prefix_already_in_use_check.py +++ b/tests/checks/prefix_already_in_use_check/test_prefix_already_in_use_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "prefix_already_in_use_check" # icurl queries faultInst = 'faultInst.json?query-target-filter=and(wcard(faultInst.changeSet,"prefix-entry-already-in-use"),wcard(faultInst.dn,"uni/epp/rtd"))' @@ -55,6 +56,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.prefix_already_in_use_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/checks/r_leaf_compatibility_check/fabricNode_no_RL.json b/tests/checks/r_leaf_compatibility_check/fabricNode_no_RL.json new file mode 100644 index 0000000..025af75 --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/fabricNode_no_RL.json @@ -0,0 +1,123 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "id": "2", + "name": "apic2", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-3", + "id": "3", + "name": "apic3", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-104", + "id": "104", + "name": "LF104", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/r_leaf_compatibility_check/fabricNode_with_RL.json b/tests/checks/r_leaf_compatibility_check/fabricNode_with_RL.json new file mode 100644 index 0000000..73e7d5b --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/fabricNode_with_RL.json @@ -0,0 +1,145 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "id": "2", + "name": "apic2", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-3", + "id": "3", + "name": "apic3", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-104", + "id": "104", + "name": "LF104", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_disabled.json b/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_disabled.json new file mode 100644 index 0000000..fd88e04 --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_disabled.json @@ -0,0 +1,13 @@ +{ + "totalCount": "1", + "imdata": [ + { + "infraSetPol": { + "attributes": { + "dn": "uni/infra/settings", + "enableRemoteLeafDirect": "no" + } + } + } + ] +} diff --git a/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_enabled.json b/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_enabled.json new file mode 100644 index 0000000..52e0a3e --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/infraSetPol_DTF_enabled.json @@ -0,0 +1,13 @@ +{ + "totalCount": "1", + "imdata": [ + { + "infraSetPol": { + "attributes": { + "dn": "uni/infra/settings", + "enableRemoteLeafDirect": "yes" + } + } + } + ] +} diff --git a/tests/checks/r_leaf_compatibility_check/infraSetPol_no_DTF.json b/tests/checks/r_leaf_compatibility_check/infraSetPol_no_DTF.json new file mode 100644 index 0000000..0b6f56b --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/infraSetPol_no_DTF.json @@ -0,0 +1,12 @@ +{ + "totalCount": "1", + "imdata": [ + { + "infraSetPol": { + "attributes": { + "dn": "uni/infra/settings" + } + } + } + ] +} diff --git a/tests/checks/r_leaf_compatibility_check/test_r_leaf_compatibility_check.py b/tests/checks/r_leaf_compatibility_check/test_r_leaf_compatibility_check.py new file mode 100644 index 0000000..b5d53ab --- /dev/null +++ b/tests/checks/r_leaf_compatibility_check/test_r_leaf_compatibility_check.py @@ -0,0 +1,103 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +script = importlib.import_module("aci-preupgrade-validation-script") +AciVersion = script.AciVersion + +test_function = "r_leaf_compatibility_check" + +# icurl queries +infraSetPol = "uni/infra/settings.json" + + +@pytest.mark.parametrize( + "icurl_outputs, fabric_nodes, cversion, tversion, expected_result, expected_data", + [ + # MANUAL - No TVER + ( + {infraSetPol: read_data(dir, "infraSetPol_no_DTF.json")}, + read_data(dir, "fabricNode_with_RL.json"), + "4.1(1a)", + None, + script.MANUAL, + [], + ), + # FAIL - pre DTF version, yes RL + ( + {infraSetPol: read_data(dir, "infraSetPol_no_DTF.json")}, + read_data(dir, "fabricNode_with_RL.json"), + "4.1(1a)", + "5.2(1a)", + script.FAIL_O, + [["5.2(1a)", "Present", "Not Supported"]], + ), + # PASS - pre DTF version, no RL + ( + {infraSetPol: read_data(dir, "infraSetPol_no_DTF.json")}, + read_data(dir, "fabricNode_no_RL.json"), + "4.1(1a)", + "5.2(1a)", + script.NA, + [], + ), + # FAIL - bug version upgrade, yes RL + ( + {infraSetPol: read_data(dir, "infraSetPol_DTF_enabled.json")}, + read_data(dir, "fabricNode_with_RL.json"), + "4.1(2a)", + "4.2(2a)", + script.FAIL_O, + [["4.2(2a)", "Present", True]], + ), + # PASS - bug version upgrade, no RL + ( + {infraSetPol: read_data(dir, "infraSetPol_DTF_enabled.json")}, + read_data(dir, "fabricNode_no_RL.json"), + "4.1(2a)", + "4.2(2a)", + script.NA, + [], + ), + # PASS - Fix ver to 5.x, yes RL, DTF enabled + ( + {infraSetPol: read_data(dir, "infraSetPol_DTF_enabled.json")}, + read_data(dir, "fabricNode_with_RL.json"), + "4.2(3a)", + "5.2(3a)", + script.PASS, + [], + ), + # FAIL - Fix ver to 5.x, yes RL, DTF disabled + ( + {infraSetPol: read_data(dir, "infraSetPol_DTF_disabled.json")}, + read_data(dir, "fabricNode_with_RL.json"), + "4.2(3a)", + "5.2(3a)", + script.FAIL_O, + [["5.2(3a)", "Present", False]], + ), + # PASS - Fix ver to 5.x, no RL + ( + {infraSetPol: read_data(dir, "infraSetPol_DTF_disabled.json")}, + read_data(dir, "fabricNode_no_RL.json"), + "4.2(3a)", + "5.2(3a)", + script.NA, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, fabric_nodes, cversion, tversion, expected_result, expected_data): + result = run_check( + cversion=AciVersion(cversion), + tversion=AciVersion(tversion) if tversion else None, + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/rtmap_comm_match_defect_check/rtctrlCtxP_NEG.json b/tests/checks/rtmap_comm_match_defect_check/rtctrlCtxP_NEG.json similarity index 100% rename from tests/rtmap_comm_match_defect_check/rtctrlCtxP_NEG.json rename to tests/checks/rtmap_comm_match_defect_check/rtctrlCtxP_NEG.json diff --git a/tests/rtmap_comm_match_defect_check/rtctrlCtxP_POS.json b/tests/checks/rtmap_comm_match_defect_check/rtctrlCtxP_POS.json similarity index 100% rename from tests/rtmap_comm_match_defect_check/rtctrlCtxP_POS.json rename to tests/checks/rtmap_comm_match_defect_check/rtctrlCtxP_POS.json diff --git a/tests/rtmap_comm_match_defect_check/rtctrlSubjP_NEG.json b/tests/checks/rtmap_comm_match_defect_check/rtctrlSubjP_NEG.json similarity index 100% rename from tests/rtmap_comm_match_defect_check/rtctrlSubjP_NEG.json rename to tests/checks/rtmap_comm_match_defect_check/rtctrlSubjP_NEG.json diff --git a/tests/rtmap_comm_match_defect_check/rtctrlSubjP_POS.json b/tests/checks/rtmap_comm_match_defect_check/rtctrlSubjP_POS.json similarity index 100% rename from tests/rtmap_comm_match_defect_check/rtctrlSubjP_POS.json rename to tests/checks/rtmap_comm_match_defect_check/rtctrlSubjP_POS.json diff --git a/tests/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py b/tests/checks/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py similarity index 88% rename from tests/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py rename to tests/checks/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py index c090d50..2dc9000 100644 --- a/tests/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py +++ b/tests/checks/rtmap_comm_match_defect_check/test_rtmap_comm_match_defect_check.py @@ -9,11 +9,13 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "rtmap_comm_match_defect_check" # icurl queries rtctrlSubjPs = "rtctrlSubjP.json?rsp-subtree=full&rsp-subtree-class=rtctrlMatchCommFactor,rtctrlMatchRtDest&rsp-subtree-include=required" rtctrlCtxPs = "rtctrlCtxP.json?rsp-subtree=full&rsp-subtree-class=rtctrlRsCtxPToSubjP,rtctrlRsScopeToAttrP&rsp-subtree-include=required" + @pytest.mark.parametrize( "icurl_outputs, tversion, expected_result", [ @@ -67,10 +69,8 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.rtmap_comm_match_defect_check( - 1, - 1, - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/service_bd_forceful_routing_check/fvRtEPpInfoToBD.json b/tests/checks/service_bd_forceful_routing_check/fvRtEPpInfoToBD.json similarity index 100% rename from tests/service_bd_forceful_routing_check/fvRtEPpInfoToBD.json rename to tests/checks/service_bd_forceful_routing_check/fvRtEPpInfoToBD.json diff --git a/tests/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py b/tests/checks/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py similarity index 81% rename from tests/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py rename to tests/checks/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py index 86038b3..5867c88 100644 --- a/tests/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py +++ b/tests/checks/service_bd_forceful_routing_check/test_service_bd_forceful_routing_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "service_bd_forceful_routing_check" # icurl queries fvRtEPpInfoToBD = "fvRtEPpInfoToBD.json" @@ -54,8 +55,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - cver = script.AciVersion(cversion) - tver = script.AciVersion(tversion) if tversion else None - result = script.service_bd_forceful_routing_check(1, 1, cver, tver) - assert result == expected_result +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result diff --git a/tests/checks/stale_decomissioned_spine_check/fabricNode.json b/tests/checks/stale_decomissioned_spine_check/fabricNode.json new file mode 100644 index 0000000..7a79d17 --- /dev/null +++ b/tests/checks/stale_decomissioned_spine_check/fabricNode.json @@ -0,0 +1,46 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "id": "101", + "fabricSt": "active", + "role": "leaf", + "name": "leaf1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-201", + "id": "201", + "fabricSt": "active", + "role": "spine", + "name": "spine1" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-106", + "id": "106", + "fabricSt": "active", + "role": "spine", + "name": "spine2" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-107", + "id": "107", + "fabricSt": "disabled", + "role": "spine", + "name": "spine3" + } + } + } +] diff --git a/tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_POS.json b/tests/checks/stale_decomissioned_spine_check/fabricRsDecommissionNode_POS.json similarity index 100% rename from tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_POS.json rename to tests/checks/stale_decomissioned_spine_check/fabricRsDecommissionNode_POS.json diff --git a/tests/checks/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py b/tests/checks/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py new file mode 100644 index 0000000..7d9c6f9 --- /dev/null +++ b/tests/checks/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py @@ -0,0 +1,57 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "stale_decomissioned_spine_check" + +# icurl queries +decomissioned_api = "fabricRsDecommissionNode.json" + + +@pytest.mark.parametrize( + "icurl_outputs, tversion, expected_result, expected_data", + [ + # TVERSION not supplied + ( + {decomissioned_api: read_data(dir, "fabricRsDecommissionNode_POS.json")}, + None, + script.MANUAL, + [], + ), + # No decom objects + ( + {decomissioned_api: []}, + "5.2(5e)", + script.PASS, + [], + ), + # Spine has stale decom object, and going to affected version + ( + {decomissioned_api: read_data(dir, "fabricRsDecommissionNode_POS.json")}, + "5.2(6a)", + script.FAIL_O, + [["106", "spine2", "active"]], + ), + # Fixed Target Version + ( + {decomissioned_api: read_data(dir, "fabricRsDecommissionNode_POS.json")}, + "6.0(4a)", + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, tversion, expected_result, expected_data): + result = run_check( + tversion=script.AciVersion(tversion) if tversion else None, + fabric_nodes=read_data(dir, "fabricNode.json"), + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/standby_sup_sync_check/eqptSupC_NEG.json b/tests/checks/standby_sup_sync_check/eqptSupC_NEG.json similarity index 100% rename from tests/standby_sup_sync_check/eqptSupC_NEG.json rename to tests/checks/standby_sup_sync_check/eqptSupC_NEG.json diff --git a/tests/standby_sup_sync_check/eqptSupC_POS.json b/tests/checks/standby_sup_sync_check/eqptSupC_POS.json similarity index 100% rename from tests/standby_sup_sync_check/eqptSupC_POS.json rename to tests/checks/standby_sup_sync_check/eqptSupC_POS.json diff --git a/tests/checks/standby_sup_sync_check/test_standby_sup_sync_check.py b/tests/checks/standby_sup_sync_check/test_standby_sup_sync_check.py new file mode 100644 index 0000000..7268aff --- /dev/null +++ b/tests/checks/standby_sup_sync_check/test_standby_sup_sync_check.py @@ -0,0 +1,153 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "standby_sup_sync_check" + +# icurl queries +eqptSupC_api = "eqptSupC.json" +eqptSupC_api += '?query-target-filter=eq(eqptSupC.rdSt,"standby")' + +""" +Bug cversion/tversion matrix based on image size + +4.2(7t)+ - fixed versions LT 2 Gigs: 4.2(7t)+ +5.2(5d)+ - fixed versions LT 2 Gigs: 5.2(7f)+ +5.3(1d)+ - fixed versions LT 2 Gigs: 5.3(1d)+ +6.0(1g)+ - fixed versions LT 2 Gigs: 6.0(1g), 6.0(1j). 32-bit only: 6.0(2h), 6.0(2j). 64-bit: NONE +6.1(1f)+ - fixed versions LT 2 Gigs: NONE +""" + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, tversion, expected_result", + [ + # NO TVERSION - MANUAL + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(1a)", + None, + script.MANUAL, + ), + # CVERSION 4.2 + # cversion 4.2 -nofix, tversion 4.2 -fix LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "4.2(8d)", + script.PASS, + ), + # cversion 4.2 -nofix, tversion 5.2 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "5.2(5d)", + script.FAIL_UF, + ), + # cversion 4.2 -nofix, tversion 5.2 -fix and LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "5.2(7f)", + script.PASS, + ), + # cversion 4.2 -nofix, tversion 5.3 -fix and LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "5.3(1d)", + script.PASS, + ), + # cversion 4.2 -nofix, tversion 6.0 -fix and LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "6.0(8d)", + script.FAIL_UF, + ), + # cversion 4.2 -nofix, tversion 6.1 -fix and LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7a)", + "6.1(1f)", + script.FAIL_UF, + ), + # cversion 4.2 -fix, tversion 6.0 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7t)", + "6.0(6h)", + script.PASS, + ), + # cversion 4.2 -fix, tversion 6.1 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "4.2(7t)", + "6.1(1f)", + script.PASS, + ), + # CVERSION 5.2 + # cversion 5.2 -nofix, tversion 5.2 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(4a)", + "5.2(7a)", + script.FAIL_UF, + ), + # cversion 5.2 -nofix, tversion 5.2 -fix LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(4a)", + "5.2(7f)", + script.PASS, + ), + # cversion 5.2 -nofix, tversion 5.3 -fix LT 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(4a)", + "5.3(1d)", + script.PASS, + ), + # cversion 5.2 -nofix, tversion 6.0 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(4a)", + "6.0(8d)", + script.FAIL_UF, + ), + # cversion 5.2 -nofix, tversion 6.1 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(4a)", + "6.1(1f)", + script.FAIL_UF, + ), + # cversion 5.2 -fix, tversion 6.1 -fix but over 2G + ( + {eqptSupC_api: read_data(dir, "eqptSupC_POS.json")}, + "5.2(5d)", + "6.1(1f)", + script.PASS, + ), + # NO STANDBY SUPS + ( + {eqptSupC_api: read_data(dir, "eqptSupC_NEG.json")}, + "4.2(7a)", + "6.1(1f)", + script.PASS, + ), + ], +) +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None + ) + assert result.result == expected_result diff --git a/tests/static_route_overlap_check/fvRsCtx.json b/tests/checks/static_route_overlap_check/fvRsCtx.json similarity index 100% rename from tests/static_route_overlap_check/fvRsCtx.json rename to tests/checks/static_route_overlap_check/fvRsCtx.json diff --git a/tests/static_route_overlap_check/fvSubnet.json b/tests/checks/static_route_overlap_check/fvSubnet.json similarity index 100% rename from tests/static_route_overlap_check/fvSubnet.json rename to tests/checks/static_route_overlap_check/fvSubnet.json diff --git a/tests/static_route_overlap_check/ipRouteP_empty.json b/tests/checks/static_route_overlap_check/ipRouteP_empty.json similarity index 100% rename from tests/static_route_overlap_check/ipRouteP_empty.json rename to tests/checks/static_route_overlap_check/ipRouteP_empty.json diff --git a/tests/static_route_overlap_check/ipRouteP_neg.json b/tests/checks/static_route_overlap_check/ipRouteP_neg.json similarity index 100% rename from tests/static_route_overlap_check/ipRouteP_neg.json rename to tests/checks/static_route_overlap_check/ipRouteP_neg.json diff --git a/tests/static_route_overlap_check/ipRouteP_pos.json b/tests/checks/static_route_overlap_check/ipRouteP_pos.json similarity index 100% rename from tests/static_route_overlap_check/ipRouteP_pos.json rename to tests/checks/static_route_overlap_check/ipRouteP_pos.json diff --git a/tests/static_route_overlap_check/l3extRsEctx.json b/tests/checks/static_route_overlap_check/l3extRsEctx.json similarity index 100% rename from tests/static_route_overlap_check/l3extRsEctx.json rename to tests/checks/static_route_overlap_check/l3extRsEctx.json diff --git a/tests/checks/static_route_overlap_check/test_static_route_overlap_check.py b/tests/checks/static_route_overlap_check/test_static_route_overlap_check.py new file mode 100644 index 0000000..2133313 --- /dev/null +++ b/tests/checks/static_route_overlap_check/test_static_route_overlap_check.py @@ -0,0 +1,91 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "static_route_overlap_check" + +# icurl queries +staticRoutes = 'ipRouteP.json?query-target-filter=and(wcard(ipRouteP.dn,"/32"))' +staticroute_vrf = "l3extRsEctx.json" +bds_in_vrf = "fvRsCtx.json" +subnets_in_bd = "fvSubnet.json" + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, tversion, expected_result", + [ + # FAIL = AFFECTED VERSION + AFFECTED MO + ( + { + staticRoutes: read_data(dir, "ipRouteP_pos.json"), + staticroute_vrf: read_data(dir, "l3extRsEctx.json"), + bds_in_vrf: read_data(dir, "fvRsCtx.json"), + subnets_in_bd: read_data(dir, "fvSubnet.json"), + }, + "4.2(7f)", + "5.2(4d)", + script.FAIL_O, + ), + # FAIL = AFFECTED VERSION + AFFECTED MO + ( + { + staticRoutes: read_data(dir, "ipRouteP_pos.json"), + staticroute_vrf: read_data(dir, "l3extRsEctx.json"), + bds_in_vrf: read_data(dir, "fvRsCtx.json"), + subnets_in_bd: read_data(dir, "fvSubnet.json"), + }, + "5.1(1a)", + "5.2(4d)", + script.FAIL_O, + ), + # PASS = AFFECTED VERSION + NON-AFFECTED MO + ( + { + staticRoutes: read_data(dir, "ipRouteP_neg.json"), + staticroute_vrf: read_data(dir, "l3extRsEctx.json"), + bds_in_vrf: read_data(dir, "fvRsCtx.json"), + subnets_in_bd: read_data(dir, "fvSubnet.json"), + }, + "4.2(7f)", + "5.2(4d)", + script.PASS, + ), + # PASS = AFFECTED VERSION + AFFECTED MO NON EXISTING + ( + { + staticRoutes: read_data(dir, "ipRouteP_empty.json"), + staticroute_vrf: read_data(dir, "l3extRsEctx.json"), + bds_in_vrf: read_data(dir, "fvRsCtx.json"), + subnets_in_bd: read_data(dir, "fvSubnet.json"), + }, + "4.2(7f)", + "5.2(4d)", + script.PASS, + ), + # PASS = NON-AFFECTED VERSION + AFFECTED MO + ( + { + staticRoutes: read_data(dir, "ipRouteP_pos.json"), + staticroute_vrf: read_data(dir, "l3extRsEctx.json"), + bds_in_vrf: read_data(dir, "fvRsCtx.json"), + subnets_in_bd: read_data(dir, "fvSubnet.json"), + }, + "4.2(7f)", + "5.2(6e)", + script.PASS, + ), + ], +) +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion), + ) + assert result.result == expected_result diff --git a/tests/subnet_scope_check/fvAEPg_empty.json b/tests/checks/subnet_scope_check/fvAEPg_empty.json similarity index 100% rename from tests/subnet_scope_check/fvAEPg_empty.json rename to tests/checks/subnet_scope_check/fvAEPg_empty.json diff --git a/tests/subnet_scope_check/fvAEPg_neg.json b/tests/checks/subnet_scope_check/fvAEPg_neg.json similarity index 100% rename from tests/subnet_scope_check/fvAEPg_neg.json rename to tests/checks/subnet_scope_check/fvAEPg_neg.json diff --git a/tests/subnet_scope_check/fvAEPg_pos.json b/tests/checks/subnet_scope_check/fvAEPg_pos.json similarity index 100% rename from tests/subnet_scope_check/fvAEPg_pos.json rename to tests/checks/subnet_scope_check/fvAEPg_pos.json diff --git a/tests/subnet_scope_check/fvBD.json b/tests/checks/subnet_scope_check/fvBD.json similarity index 100% rename from tests/subnet_scope_check/fvBD.json rename to tests/checks/subnet_scope_check/fvBD.json diff --git a/tests/subnet_scope_check/fvRsBd.json b/tests/checks/subnet_scope_check/fvRsBd.json similarity index 100% rename from tests/subnet_scope_check/fvRsBd.json rename to tests/checks/subnet_scope_check/fvRsBd.json diff --git a/tests/checks/subnet_scope_check/test_subnet_scope_check.py b/tests/checks/subnet_scope_check/test_subnet_scope_check.py new file mode 100644 index 0000000..479d480 --- /dev/null +++ b/tests/checks/subnet_scope_check/test_subnet_scope_check.py @@ -0,0 +1,74 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "subnet_scope_check" + +# icurl queries +bd_api = "fvBD.json" +bd_api += "?rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required" + +epg_api = "fvAEPg.json?" +epg_api += "rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required" + + +@pytest.mark.parametrize( + "icurl_outputs, cversion, expected_result", + [ + ( + { + bd_api: read_data(dir, "fvBD.json"), + epg_api: read_data(dir, "fvAEPg_empty.json"), + "fvRsBd.json": read_data(dir, "fvRsBd.json"), + }, + "4.2(6a)", + script.NA, + ), + ( + { + bd_api: read_data(dir, "fvBD.json"), + epg_api: read_data(dir, "fvAEPg_pos.json"), + "fvRsBd.json": read_data(dir, "fvRsBd.json"), + }, + "4.2(6a)", + script.FAIL_O, + ), + ( + { + bd_api: read_data(dir, "fvBD.json"), + epg_api: read_data(dir, "fvAEPg_pos.json"), + "fvRsBd.json": read_data(dir, "fvRsBd.json"), + }, + "5.1(1a)", + script.FAIL_O, + ), + ( + { + bd_api: read_data(dir, "fvBD.json"), + epg_api: read_data(dir, "fvAEPg_neg.json"), + "fvRsBd.json": read_data(dir, "fvRsBd.json"), + }, + "5.1(1a)", + script.PASS, + ), + ( + { + bd_api: read_data(dir, "fvBD.json"), + epg_api: read_data(dir, "fvAEPg_neg.json"), + "fvRsBd.json": read_data(dir, "fvRsBd.json"), + }, + "5.2(8h)", + script.PASS, + ), + ], +) +def test_logic(run_check, mock_icurl, cversion, expected_result): + result = run_check(cversion=script.AciVersion(cversion)) + assert result.result == expected_result diff --git a/tests/sup_a_high_memory_check/eqptSupC_SUP_A.json b/tests/checks/sup_a_high_memory_check/eqptSupC_SUP_A.json similarity index 100% rename from tests/sup_a_high_memory_check/eqptSupC_SUP_A.json rename to tests/checks/sup_a_high_memory_check/eqptSupC_SUP_A.json diff --git a/tests/sup_a_high_memory_check/eqptSupC_SUP_A_Aplus.json b/tests/checks/sup_a_high_memory_check/eqptSupC_SUP_A_Aplus.json similarity index 100% rename from tests/sup_a_high_memory_check/eqptSupC_SUP_A_Aplus.json rename to tests/checks/sup_a_high_memory_check/eqptSupC_SUP_A_Aplus.json diff --git a/tests/sup_a_high_memory_check/eqptSupC_SUP_Aplus.json b/tests/checks/sup_a_high_memory_check/eqptSupC_SUP_Aplus.json similarity index 100% rename from tests/sup_a_high_memory_check/eqptSupC_SUP_Aplus.json rename to tests/checks/sup_a_high_memory_check/eqptSupC_SUP_Aplus.json diff --git a/tests/sup_a_high_memory_check/eqptSupC_no_SUP_A_Aplus.json b/tests/checks/sup_a_high_memory_check/eqptSupC_no_SUP_A_Aplus.json similarity index 100% rename from tests/sup_a_high_memory_check/eqptSupC_no_SUP_A_Aplus.json rename to tests/checks/sup_a_high_memory_check/eqptSupC_no_SUP_A_Aplus.json diff --git a/tests/sup_a_high_memory_check/test_sup_a_high_memory_check.py b/tests/checks/sup_a_high_memory_check/test_sup_a_high_memory_check.py similarity index 87% rename from tests/sup_a_high_memory_check/test_sup_a_high_memory_check.py rename to tests/checks/sup_a_high_memory_check/test_sup_a_high_memory_check.py index 8098235..17ce861 100644 --- a/tests/sup_a_high_memory_check/test_sup_a_high_memory_check.py +++ b/tests/checks/sup_a_high_memory_check/test_sup_a_high_memory_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "sup_a_high_memory_check" # icurl queries eqptSupCs = "eqptSupC.json" @@ -55,6 +56,6 @@ ), ], ) -def test_logic(mock_icurl, tversion, expected_result): - result = script.sup_a_high_memory_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/sup_hwrev_check/eqptSpCmnBlk_NEG.json b/tests/checks/sup_hwrev_check/eqptSpCmnBlk_NEG.json similarity index 100% rename from tests/sup_hwrev_check/eqptSpCmnBlk_NEG.json rename to tests/checks/sup_hwrev_check/eqptSpCmnBlk_NEG.json diff --git a/tests/sup_hwrev_check/eqptSpCmnBlk_POS.json b/tests/checks/sup_hwrev_check/eqptSpCmnBlk_POS.json similarity index 100% rename from tests/sup_hwrev_check/eqptSpCmnBlk_POS.json rename to tests/checks/sup_hwrev_check/eqptSpCmnBlk_POS.json diff --git a/tests/sup_hwrev_check/test_sup_hwrev_check.py b/tests/checks/sup_hwrev_check/test_sup_hwrev_check.py similarity index 77% rename from tests/sup_hwrev_check/test_sup_hwrev_check.py rename to tests/checks/sup_hwrev_check/test_sup_hwrev_check.py index 5ad6baa..80ed301 100644 --- a/tests/sup_hwrev_check/test_sup_hwrev_check.py +++ b/tests/checks/sup_hwrev_check/test_sup_hwrev_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "sup_hwrev_check" # icurl queries eqptSpCmnBlk = 'eqptSpCmnBlk.json?&query-target-filter=wcard(eqptSpromSupBlk.dn,"sup")' @@ -17,6 +18,13 @@ @pytest.mark.parametrize( "icurl_outputs, cversion, tversion, expected_result", [ + # Affected versions. No Sups found + ( + {eqptSpCmnBlk: []}, + "5.2(1g)", + "5.2(8e)", + script.MANUAL, + ), # Affected Sups and on 5.2. VRM and FPGA Concern ( {eqptSpCmnBlk: read_data(dir, "eqptSpCmnBlk_POS.json")}, @@ -54,8 +62,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.sup_hwrev_check( - 1, 1, script.AciVersion(cversion), script.AciVersion(tversion) +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion), ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/switch_bootflash_usage_check/eqptcapacityFSPartition.json b/tests/checks/switch_bootflash_usage_check/eqptcapacityFSPartition.json similarity index 100% rename from tests/switch_bootflash_usage_check/eqptcapacityFSPartition.json rename to tests/checks/switch_bootflash_usage_check/eqptcapacityFSPartition.json diff --git a/tests/switch_bootflash_usage_check/maintUpgJob_not_downloaded.json b/tests/checks/switch_bootflash_usage_check/maintUpgJob_not_downloaded.json similarity index 100% rename from tests/switch_bootflash_usage_check/maintUpgJob_not_downloaded.json rename to tests/checks/switch_bootflash_usage_check/maintUpgJob_not_downloaded.json diff --git a/tests/switch_bootflash_usage_check/maintUpgJob_old_ver_no_prop.json b/tests/checks/switch_bootflash_usage_check/maintUpgJob_old_ver_no_prop.json similarity index 100% rename from tests/switch_bootflash_usage_check/maintUpgJob_old_ver_no_prop.json rename to tests/checks/switch_bootflash_usage_check/maintUpgJob_old_ver_no_prop.json diff --git a/tests/switch_bootflash_usage_check/maintUpgJob_pre_downloaded.json b/tests/checks/switch_bootflash_usage_check/maintUpgJob_pre_downloaded.json similarity index 100% rename from tests/switch_bootflash_usage_check/maintUpgJob_pre_downloaded.json rename to tests/checks/switch_bootflash_usage_check/maintUpgJob_pre_downloaded.json diff --git a/tests/checks/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py b/tests/checks/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py new file mode 100644 index 0000000..18e9c1c --- /dev/null +++ b/tests/checks/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py @@ -0,0 +1,62 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "switch_bootflash_usage_check" + +# icurl queries +partitions = "eqptcapacityFSPartition.json" +partitions += '?query-target-filter=eq(eqptcapacityFSPartition.path,"/bootflash")' + +download_sts = "maintUpgJob.json" +download_sts += '?query-target-filter=and(eq(maintUpgJob.dnldStatus,"downloaded")' +download_sts += ',eq(maintUpgJob.desiredVersion,"n9000-16.0(2h)"))' + + +@pytest.mark.parametrize( + "icurl_outputs, tversion, expected_result", + [ + ( + { + partitions: [], + download_sts: [], + }, + "6.0(2h)", + script.MANUAL, + ), + ( + { + partitions: read_data(dir, "eqptcapacityFSPartition.json"), + download_sts: read_data(dir, "maintUpgJob_not_downloaded.json"), + }, + "6.0(2h)", + script.FAIL_UF, + ), + ( + { + partitions: read_data(dir, "eqptcapacityFSPartition.json"), + download_sts: read_data(dir, "maintUpgJob_pre_downloaded.json"), + }, + "6.0(2h)", + script.PASS, + ), + ( + { + partitions: read_data(dir, "eqptcapacityFSPartition.json"), + download_sts: read_data(dir, "maintUpgJob_old_ver_no_prop.json"), + }, + "6.0(2h)", + script.FAIL_UF, + ), + ], +) +def test_logic(run_check, mock_icurl, tversion, expected_result): + result = run_check(tversion=script.AciVersion(tversion)) + assert result.result == expected_result diff --git a/tests/checks/switch_group_guideline_check/bgpRRNodePEp_1001_1002_2001_2002.json b/tests/checks/switch_group_guideline_check/bgpRRNodePEp_1001_1002_2001_2002.json new file mode 100644 index 0000000..dfd90b2 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/bgpRRNodePEp_1001_1002_2001_2002.json @@ -0,0 +1,38 @@ +[ + { + "bgpRRNodePEp": { + "attributes": { + "dn": "uni/fabric/bgpInstP-default/rr/node-1001", + "id": "1001", + "podId": "1" + } + } + }, + { + "bgpRRNodePEp": { + "attributes": { + "dn": "uni/fabric/bgpInstP-default/rr/node-1002", + "id": "1002", + "podId": "1" + } + } + }, + { + "bgpRRNodePEp": { + "attributes": { + "dn": "uni/fabric/bgpInstP-default/rr/node-2001", + "id": "2001", + "podId": "2" + } + } + }, + { + "bgpRRNodePEp": { + "attributes": { + "dn": "uni/fabric/bgpInstP-default/rr/node-2002", + "id": "2002", + "podId": "2" + } + } + } +] diff --git a/tests/checks/switch_group_guideline_check/fabricExplicitGEp.json b/tests/checks/switch_group_guideline_check/fabricExplicitGEp.json new file mode 100644 index 0000000..05cb7bc --- /dev/null +++ b/tests/checks/switch_group_guideline_check/fabricExplicitGEp.json @@ -0,0 +1,92 @@ +[ + { + "fabricExplicitGEp": { + "attributes": { + "dn": "uni/fabric/protpol/expgep-101-102", + "id": "12", + "name": "101-102", + "virtualIp": "10.0.112.64/32" + }, + "children": [ + { + "fabricNodePEp": { + "attributes": { + "id": "102", + "podId": "1", + "rn": "nodepep-102" + } + } + }, + { + "fabricNodePEp": { + "attributes": { + "id": "101", + "podId": "1", + "rn": "nodepep-101" + } + } + } + ] + } + }, + { + "fabricExplicitGEp": { + "attributes": { + "dn": "uni/fabric/protpol/expgep-111-112", + "id": "112", + "name": "111-112", + "virtualIp": "10.0.188.64/32" + }, + "children": [ + { + "fabricNodePEp": { + "attributes": { + "id": "111", + "podId": "1", + "rn": "nodepep-111" + } + } + }, + { + "fabricNodePEp": { + "attributes": { + "id": "112", + "podId": "1", + "rn": "nodepep-112" + } + } + } + ] + } + }, + { + "fabricExplicitGEp": { + "attributes": { + "dn": "uni/fabric/protpol/expgep-201-202", + "id": "212", + "name": "201-202", + "virtualIp": "10.0.188.64/32" + }, + "children": [ + { + "fabricNodePEp": { + "attributes": { + "id": "201", + "podId": "2", + "rn": "nodepep-201" + } + } + }, + { + "fabricNodePEp": { + "attributes": { + "id": "202", + "podId": "2", + "rn": "nodepep-202" + } + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/fabricNode.json b/tests/checks/switch_group_guideline_check/fabricNode.json new file mode 100644 index 0000000..09e4c84 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/fabricNode.json @@ -0,0 +1,206 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "name": "apic2", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "name": "apic3", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-201", + "fabricSt": "active", + "id": "201", + "name": "LF201", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-202", + "fabricSt": "active", + "id": "202", + "name": "LF202", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "fabricSt": "active", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "fabricSt": "active", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "fabricSt": "active", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "fabricSt": "active", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "fabricSt": "active", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1003", + "fabricSt": "active", + "id": "1003", + "name": "SP1003", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1004", + "fabricSt": "active", + "id": "1004", + "name": "SP1004", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-2001", + "fabricSt": "active", + "id": "2001", + "name": "SP2001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-2/node-2002", + "fabricSt": "active", + "id": "2002", + "name": "SP2002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1001_1002_2001_2002.json b/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1001_1002_2001_2002.json new file mode 100644 index 0000000..5ecfc3f --- /dev/null +++ b/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1001_1002_2001_2002.json @@ -0,0 +1,50 @@ +[ + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_121/rsnodeL3OutAtt-[topology/pod-1/node-121]", + "tDn": "topology/pod-1/node-121" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_122/rsnodeL3OutAtt-[topology/pod-1/node-122]", + "tDn": "topology/pod-1/node-122" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_1001/rsnodeL3OutAtt-[topology/pod-1/node-1001]", + "tDn": "topology/pod-1/node-1001" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_1002/rsnodeL3OutAtt-[topology/pod-1/node-1002]", + "tDn": "topology/pod-1/node-1002" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_2001/rsnodeL3OutAtt-[topology/pod-2/node-2001]", + "tDn": "topology/pod-2/node-2001" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-node-2002-profile/rsnodeL3OutAtt-[topology/pod-2/node-2002]", + "tDn": "topology/pod-2/node-2002" + } + } + } +] diff --git a/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1003_1004_2001_2002.json b/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1003_1004_2001_2002.json new file mode 100644 index 0000000..f7af8e8 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/l3extRsNodeL3OutAtt_1003_1004_2001_2002.json @@ -0,0 +1,50 @@ +[ + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_121/rsnodeL3OutAtt-[topology/pod-1/node-121]", + "tDn": "topology/pod-1/node-121" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_122/rsnodeL3OutAtt-[topology/pod-1/node-122]", + "tDn": "topology/pod-1/node-122" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_1003/rsnodeL3OutAtt-[topology/pod-1/node-1003]", + "tDn": "topology/pod-1/node-1003" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_1004/rsnodeL3OutAtt-[topology/pod-1/node-1004]", + "tDn": "topology/pod-1/node-1004" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-LNodeP_2001/rsnodeL3OutAtt-[topology/pod-2/node-2001]", + "tDn": "topology/pod-2/node-2001" + } + } + }, + { + "l3extRsNodeL3OutAtt": { + "attributes": { + "dn": "uni/tn-infra/out-multipodL3Out/lnodep-node-2002-profile/rsnodeL3OutAtt-[topology/pod-2/node-2002]", + "tDn": "topology/pod-2/node-2002" + } + } + } +] diff --git a/tests/checks/switch_group_guideline_check/lldpCtrlrAdjEp.json b/tests/checks/switch_group_guideline_check/lldpCtrlrAdjEp.json new file mode 100644 index 0000000..139f5bd --- /dev/null +++ b/tests/checks/switch_group_guideline_check/lldpCtrlrAdjEp.json @@ -0,0 +1,62 @@ +[ + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-1/node-101/sys/lldp/inst/if-[eth1/1]/ctrlradj", + "id": "1", + "portRole": "active" + } + } + }, + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-1/node-101/sys/lldp/inst/if-[eth1/2]/ctrlradj", + "id": "2", + "portRole": "active" + } + } + }, + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-1/node-102/sys/lldp/inst/if-[eth1/2]/ctrlradj", + "id": "2", + "portRole": "backup" + } + } + }, + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-1/node-102/sys/lldp/inst/if-[eth1/1]/ctrlradj", + "id": "1", + "portRole": "backup" + } + } + }, + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-2/node-201/sys/lldp/inst/if-[eth1/3]/ctrlradj", + "id": "3", + "portRole": "active" + } + } + }, + { + "lldpCtrlrAdjEp": { + "attributes": { + "apicMode": "active", + "dn": "topology/pod-2/node-202/sys/lldp/inst/if-[eth1/3]/ctrlradj", + "id": "3", + "portRole": "active" + } + } + } +] diff --git a/tests/checks/switch_group_guideline_check/maintMaintGrp_ALL.json b/tests/checks/switch_group_guideline_check/maintMaintGrp_ALL.json new file mode 100644 index 0000000..74719f4 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/maintMaintGrp_ALL.json @@ -0,0 +1,94 @@ +[ + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-ALL", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "ALL", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-ALL", + "tnMaintMaintPName": "ALL" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk101-101", "from_": "101", "to_": "101"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk102-102", "from_": "102", "to_": "102"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk111-111", "from_": "111", "to_": "111"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk112-112", "from_": "112", "to_": "112"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk121-121", "from_": "121", "to_": "121"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk122-122", "from_": "122", "to_": "122"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk201-201", "from_": "201", "to_": "201"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk202-202", "from_": "202", "to_": "202"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1001-1001", "from_": "1001", "to_": "1001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1002-1002", "from_": "1002", "to_": "1002"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1003-1003", "from_": "1003", "to_": "1003"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1004-1004", "from_": "1004", "to_": "1004"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2001-2001", "from_": "2001", "to_": "2001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2002-2002", "from_": "2002", "to_": "2002"} + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_GRP1_GRP2.json b/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_GRP1_GRP2.json new file mode 100644 index 0000000..ed909d9 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_GRP1_GRP2.json @@ -0,0 +1,116 @@ +[ + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-GRP1", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "GRP1", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-GRP1", + "tnMaintMaintPName": "GRP1" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk101-101", "from_": "101", "to_": "101"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk102-102", "from_": "102", "to_": "102"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk121-121", "from_": "121", "to_": "121"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk122-122", "from_": "122", "to_": "122"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1001-1001", "from_": "1001", "to_": "1001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1002-1002", "from_": "1002", "to_": "1002"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-GRP2", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "GRP2", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-GRP2", + "tnMaintMaintPName": "GRP2" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk111-111", "from_": "111", "to_": "111"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk112-112", "from_": "112", "to_": "112"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk201-201", "from_": "201", "to_": "201"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk202-202", "from_": "202", "to_": "202"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1003-1003", "from_": "1003", "to_": "1003"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1004-1004", "from_": "1004", "to_": "1004"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2001-2001", "from_": "2001", "to_": "2001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2002-2002", "from_": "2002", "to_": "2002"} + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json b/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json new file mode 100644 index 0000000..dace96c --- /dev/null +++ b/tests/checks/switch_group_guideline_check/maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json @@ -0,0 +1,160 @@ +[ + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-SPINE_GRP1", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "SPINE_GRP1", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-SPINE_GRP1", + "tnMaintMaintPName": "SPINE_GRP1" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1001-1001", "from_": "1001", "to_": "1001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1002-1002", "from_": "1002", "to_": "1002"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1003-1003", "from_": "1003", "to_": "1003"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2001-2001", "from_": "2001", "to_": "2001"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-SPINE_GRP2", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "SPINE_GRP2", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-SPINE_GRP2", + "tnMaintMaintPName": "SPINE_GRP2" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1004-1004", "from_": "1004", "to_": "1004"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2002-2002", "from_": "2002", "to_": "2002"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-ODD", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "ODD", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-ODD", + "tnMaintMaintPName": "ODD" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk101-101", "from_": "101", "to_": "101"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk111-111", "from_": "111", "to_": "111"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk121-121", "from_": "121", "to_": "121"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk201-201", "from_": "201", "to_": "201"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-EVEN", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "EVEN", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-EVEN", + "tnMaintMaintPName": "EVEN" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk102-102", "from_": "102", "to_": "102"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk112-112", "from_": "112", "to_": "112"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk122-122", "from_": "122", "to_": "122"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk202-202", "from_": "202", "to_": "202"} + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/maintMaintGrp_EVEN_ODD.json b/tests/checks/switch_group_guideline_check/maintMaintGrp_EVEN_ODD.json new file mode 100644 index 0000000..678fff3 --- /dev/null +++ b/tests/checks/switch_group_guideline_check/maintMaintGrp_EVEN_ODD.json @@ -0,0 +1,116 @@ +[ + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-ODD", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "ODD", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-ODD", + "tnMaintMaintPName": "ODD" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk101-101", "from_": "101", "to_": "101"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk111-111", "from_": "111", "to_": "111"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk121-121", "from_": "121", "to_": "121"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk201-201", "from_": "201", "to_": "201"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1001-1001", "from_": "1001", "to_": "1001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1003-1003", "from_": "1003", "to_": "1003"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2001-2001", "from_": "2001", "to_": "2001"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-EVEN", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "EVEN", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-EVEN", + "tnMaintMaintPName": "EVEN" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk102-102", "from_": "102", "to_": "102"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk112-112", "from_": "112", "to_": "112"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk122-122", "from_": "122", "to_": "122"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk202-202", "from_": "202", "to_": "202"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1002-1002", "from_": "1002", "to_": "1002"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1004-1004", "from_": "1004", "to_": "1004"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2002-2002", "from_": "2002", "to_": "2002"} + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/maintMaintGrp_SPINE_LEAF.json b/tests/checks/switch_group_guideline_check/maintMaintGrp_SPINE_LEAF.json new file mode 100644 index 0000000..f07b60e --- /dev/null +++ b/tests/checks/switch_group_guideline_check/maintMaintGrp_SPINE_LEAF.json @@ -0,0 +1,116 @@ +[ + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-SPINE", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "SPINE", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-SPINE", + "tnMaintMaintPName": "SPINE" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1001-1001", "from_": "1001", "to_": "1001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1002-1002", "from_": "1002", "to_": "1002"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1003-1003", "from_": "1003", "to_": "1003"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk1004-1004", "from_": "1004", "to_": "1004"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2001-2001", "from_": "2001", "to_": "2001"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk2002-2002", "from_": "2002", "to_": "2002"} + } + } + ] + } + }, + { + "maintMaintGrp": { + "attributes": { + "dn": "uni/fabric/maintgrp-LEAF", + "fwtype": "switch", + "monPolDn": "uni/fabric/monfab-default", + "name": "LEAF", + "type": "range" + }, + "children": [ + { + "maintRsMgrpp": { + "attributes": { + "tCl": "maintMaintP", + "tDn": "uni/fabric/maintpol-LEAF", + "tnMaintMaintPName": "LEAF" + } + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk101-101", "from_": "101", "to_": "101"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk102-102", "from_": "102", "to_": "102"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk111-111", "from_": "111", "to_": "111"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk112-112", "from_": "112", "to_": "112"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk121-121", "from_": "121", "to_": "121"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk122-122", "from_": "122", "to_": "122"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk201-201", "from_": "201", "to_": "201"} + } + }, + { + "fabricNodeBlk": { + "attributes": {"name": "blk202-202", "from_": "202", "to_": "202"} + } + } + ] + } + } +] diff --git a/tests/checks/switch_group_guideline_check/test_switch_group_guideline_check.py b/tests/checks/switch_group_guideline_check/test_switch_group_guideline_check.py new file mode 100644 index 0000000..332e85f --- /dev/null +++ b/tests/checks/switch_group_guideline_check/test_switch_group_guideline_check.py @@ -0,0 +1,193 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "switch_group_guideline_check" + +# icurl queries +upgrade_grp = "maintMaintGrp.json?rsp-subtree=children" +bgp_rr = "bgpRRNodePEp.json" +ipn_spines = 'l3extRsNodeL3OutAtt.json?query-target-filter=wcard(l3extRsNodeL3OutAtt.dn,"tn-infra/")' +apic_lldp = "lldpCtrlrAdjEp.json" +vpc_pairs = "fabricExplicitGEp.json?rsp-subtree=children&rsp-subtree-class=fabricNodePEp" + + +@pytest.mark.parametrize( + "icurl_outputs, fabric_nodes, expected_result, expected_data", + [ + # PASS + # Upgrade Grp: EVEN and ODD + # Spines: + # [Pod 1] 1001-1004, RR: 1001,1002, IPN 1001,1002 + # [Pod 2] 2001-2002, RR: 2001,2002, IPN 2001,2002 + # APIC Leaves: + # [Pod 1] 101-102 (APIC 1, 2) + # [Pod 2] 201-202 (APIC 3) + # VPC Leaves: + # [Pod 1] 101-102, 111-112 + # [Pod 2] 201-202 + ( + { + upgrade_grp: read_data(dir, "maintMaintGrp_EVEN_ODD.json"), + bgp_rr: read_data(dir, "bgpRRNodePEp_1001_1002_2001_2002.json"), + ipn_spines: read_data(dir, "l3extRsNodeL3OutAtt_1001_1002_2001_2002.json"), + apic_lldp: read_data(dir, "lldpCtrlrAdjEp.json"), + vpc_pairs: read_data(dir, "fabricExplicitGEp.json"), + }, + read_data(dir, "fabricNode.json"), + script.PASS, + [], + ), + # FAIL - All HA broken + # Upgrade Grp: all in one + # Spines: + # [Pod 1] 1001-1004, RR: 1001,1002, IPN 1001,1002 + # [Pod 2] 2001-2002, RR: 2001,2002, IPN 2001,2002 + # APIC Leaves: + # [Pod 1] 101-102 (APIC 1, 2) + # [Pod 2] 201-202 (APIC 3) + # VPC Leaves: + # [Pod 1] 101-102, 111-112 + # [Pod 2] 201-202 + ( + { + upgrade_grp: read_data(dir, "maintMaintGrp_ALL.json"), + bgp_rr: read_data(dir, "bgpRRNodePEp_1001_1002_2001_2002.json"), + ipn_spines: read_data(dir, "l3extRsNodeL3OutAtt_1001_1002_2001_2002.json"), + apic_lldp: read_data(dir, "lldpCtrlrAdjEp.json"), + vpc_pairs: read_data(dir, "fabricExplicitGEp.json"), + }, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["ALL", "1", "1001,1002,1003,1004", "All spine nodes in this pod are in the same group."], + ["ALL", "2", "2001,2002", "All spine nodes in this pod are in the same group."], + ["ALL", "1", "1001,1002", "All RR spine nodes in this pod are in the same group."], + ["ALL", "2", "2001,2002", "All RR spine nodes in this pod are in the same group."], + ["ALL", "1", "1001,1002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["ALL", "2", "2001,2002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["ALL", "1", "101,102", "All leaf nodes connected to APIC 1 are in the same group."], + ["ALL", "1", "101,102", "All leaf nodes connected to APIC 2 are in the same group."], + ["ALL", "2", "201,202", "All leaf nodes connected to APIC 3 are in the same group."], + ["ALL", "1", "101,102", "Both leaf nodes in the same vPC pair are in the same group."], + ["ALL", "1", "111,112", "Both leaf nodes in the same vPC pair are in the same group."], + ["ALL", "2", "201,202", "Both leaf nodes in the same vPC pair are in the same group."], + ], + ), + # FAIL - All HA broken + # Upgrade Grp: leaves in one group and spines in another + # Spines: + # [Pod 1] 1001-1004, RR: 1001,1002, IPN 1001,1002 + # [Pod 2] 2001-2002, RR: 2001,2002, IPN 2001,2002 + # APIC Leaves: + # [Pod 1] 101-102 (APIC 1, 2) + # [Pod 2] 201-202 (APIC 3) + # VPC Leaves: + # [Pod 1] 101-102, 111-112 + # [Pod 2] 201-202 + ( + { + upgrade_grp: read_data(dir, "maintMaintGrp_SPINE_LEAF.json"), + bgp_rr: read_data(dir, "bgpRRNodePEp_1001_1002_2001_2002.json"), + ipn_spines: read_data(dir, "l3extRsNodeL3OutAtt_1001_1002_2001_2002.json"), + apic_lldp: read_data(dir, "lldpCtrlrAdjEp.json"), + vpc_pairs: read_data(dir, "fabricExplicitGEp.json"), + }, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["SPINE", "1", "1001,1002,1003,1004", "All spine nodes in this pod are in the same group."], + ["SPINE", "2", "2001,2002", "All spine nodes in this pod are in the same group."], + ["SPINE", "1", "1001,1002", "All RR spine nodes in this pod are in the same group."], + ["SPINE", "2", "2001,2002", "All RR spine nodes in this pod are in the same group."], + ["SPINE", "1", "1001,1002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["SPINE", "2", "2001,2002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["LEAF", "1", "101,102", "All leaf nodes connected to APIC 1 are in the same group."], + ["LEAF", "1", "101,102", "All leaf nodes connected to APIC 2 are in the same group."], + ["LEAF", "2", "201,202", "All leaf nodes connected to APIC 3 are in the same group."], + ["LEAF", "1", "101,102", "Both leaf nodes in the same vPC pair are in the same group."], + ["LEAF", "1", "111,112", "Both leaf nodes in the same vPC pair are in the same group."], + ["LEAF", "2", "201,202", "Both leaf nodes in the same vPC pair are in the same group."], + ], + ), + # FAIL - All HA except for pod1 spine broken + # Upgrade Grp: + # GRP1: 101,102,121,122,1001,1002 + # GRP2: 111,112,201,202,1003,1004,2001,2003 + # Spines: + # [Pod 1] 1001-1004, RR: 1001,1002, IPN 1001,1002 + # [Pod 2] 2001-2002, RR: 2001,2002, IPN 2001,2002 + # APIC Leaves: + # [Pod 1] 101-102 (APIC 1, 2) + # [Pod 2] 201-202 (APIC 3) + # VPC Leaves: + # [Pod 1] 101-102, 111-112 + # [Pod 2] 201-202 + ( + { + upgrade_grp: read_data(dir, "maintMaintGrp_BAD_GRP1_GRP2.json"), + bgp_rr: read_data(dir, "bgpRRNodePEp_1001_1002_2001_2002.json"), + ipn_spines: read_data(dir, "l3extRsNodeL3OutAtt_1001_1002_2001_2002.json"), + apic_lldp: read_data(dir, "lldpCtrlrAdjEp.json"), + vpc_pairs: read_data(dir, "fabricExplicitGEp.json"), + }, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["GRP1", "1", "1001,1002", "All RR spine nodes in this pod are in the same group."], + ["GRP1", "1", "1001,1002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["GRP1", "1", "101,102", "All leaf nodes connected to APIC 1 are in the same group."], + ["GRP1", "1", "101,102", "All leaf nodes connected to APIC 2 are in the same group."], + ["GRP1", "1", "101,102", "Both leaf nodes in the same vPC pair are in the same group."], + ["GRP2", "2", "2001,2002", "All spine nodes in this pod are in the same group."], + ["GRP2", "2", "2001,2002", "All RR spine nodes in this pod are in the same group."], + ["GRP2", "2", "2001,2002", "All IPN/ISN spine nodes in this pod are in the same group."], + ["GRP2", "2", "201,202", "All leaf nodes connected to APIC 3 are in the same group."], + ["GRP2", "1", "111,112", "Both leaf nodes in the same vPC pair are in the same group."], + ["GRP2", "2", "201,202", "Both leaf nodes in the same vPC pair are in the same group."], + ], + ), + # FAIL - Only pod1 spine RR HA is broken + # Upgrade Grp: + # SPINE_GRP1: 1001-1003, 2001 + # SPINE_GRP2: 1004, 2002 + # EVEN: even leaves + # ODD: odd leaves + # Spines: + # [Pod 1] 1001-1004, RR: 1001,1002, IPN 1003,1004 + # [Pod 2] 2001-2002, RR: 2001,2002, IPN 2001,2002 + # APIC Leaves: + # [Pod 1] 101-102 (APIC 1, 2) + # [Pod 2] 201-202 (APIC 3) + # VPC Leaves: + # [Pod 1] 101-102, 111-112 + # [Pod 2] 201-202 + ( + { + upgrade_grp: read_data(dir, "maintMaintGrp_BAD_ONLY_POD1_SPINE_RR.json"), + bgp_rr: read_data(dir, "bgpRRNodePEp_1001_1002_2001_2002.json"), + ipn_spines: read_data(dir, "l3extRsNodeL3OutAtt_1003_1004_2001_2002.json"), + apic_lldp: read_data(dir, "lldpCtrlrAdjEp.json"), + vpc_pairs: read_data(dir, "fabricExplicitGEp.json"), + }, + read_data(dir, "fabricNode.json"), + script.FAIL_O, + [ + ["SPINE_GRP1", "1", "1001,1002", "All RR spine nodes in this pod are in the same group."], + ], + ), + ], +) +def test_logic(run_check, mock_icurl, fabric_nodes, expected_result, expected_data): + result = run_check( + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/switch_ssd_check/faultInst.json b/tests/checks/switch_ssd_check/faultInst.json new file mode 100644 index 0000000..4d3166b --- /dev/null +++ b/tests/checks/switch_ssd_check/faultInst.json @@ -0,0 +1,28 @@ +[ + { + "faultInst": { + "attributes": { + "cause": "equipment-flash-worn-out", + "changeSet": "acc:read-write, cap:244198, deltape:0, descr:flash, gbb:0, id:1, lba:0, lifetime:155, majorAlarm:yes, mfgTm:2025-11-03T04:22:06.834+00:00, minorAlarm:no, model:Micron_M550_MTFDDAT256MAY, operSt:ok, peCycles:4285, readErr:39, rev:MU03, ser:14270C7D9F13, tbw:336.453735, type:flash, vendor:Micron, warning:no, wlc:0", + "code": "F3073", + "descr": "SSD has reached 90% lifetime endurance limit. Please replace Switch/Supervisor with ID 0 as soon as possible", + "dn": "topology/pod-1/node-205/sys/ch/supslot-1/sup/flash/fault-F3073", + "rule": "eqpt-flash-flash-worn-out", + "subject": "flash-worn-out" + } + } + }, + { + "faultInst": { + "attributes": { + "cause": "equipment-flash-warning", + "changeSet": "acc:read-write, cap:61057, deltape:23, descr:flash, gbb:0, id:1, lba:0, lifetime:85, majorAlarm:no, mfgTm:2020-09-22T02:21:45.675+00:00, minorAlarm:yes, model:Micron_M600_MTFDDAT064MBF, operSt:ok, peCycles:4290, readErr:0, rev:MC04, ser:MSA20400892, tbw:21.279228, type:flash, vendor:Micron, warning:yes, wlc:0", + "code": "F3074", + "descr": "SSD has reached 80% lifetime and is nearing its endurance limit. Please plan for Switch/Supervisor replacement soon", + "dn": "topology/pod-1/node-101/sys/ch/supslot-1/sup/flash/fault-F3074", + "rule": "eqpt-flash-flash-minor-alarm", + "subject": "flash-minor-alarm" + } + } + } +] diff --git a/tests/checks/switch_ssd_check/test_switch_ssd_check.py b/tests/checks/switch_ssd_check/test_switch_ssd_check.py new file mode 100644 index 0000000..1f7202f --- /dev/null +++ b/tests/checks/switch_ssd_check/test_switch_ssd_check.py @@ -0,0 +1,54 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") +Result = script.Result + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "switch_ssd_check" + +# icurl queries +faultInst = 'faultInst.json?query-target-filter=or(eq(faultInst.code,"F3073"),eq(faultInst.code,"F3074"))' + + +@pytest.mark.parametrize( + "icurl_outputs, expected_result, expected_data", + [ + ( + {faultInst: []}, + script.PASS, + [], + ), + ( + {faultInst: read_data(dir, "faultInst.json")}, + script.FAIL_O, + [ + [ + "F3073", + "1", + "205", + "Micron_M550_MTFDDAT256MAY", + "90%", + "Contact Cisco TAC for replacement procedure", + ], + [ + "F3074", + "1", + "101", + "Micron_M600_MTFDDAT064MBF", + "80%", + "Monitor (no impact to upgrades)", + ], + ], + ), + ], +) +def test_logic(run_check, mock_icurl, expected_result, expected_data): + result = run_check() + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/checks/switch_status_check/fabricNode_NEG.json b/tests/checks/switch_status_check/fabricNode_NEG.json new file mode 100644 index 0000000..3dffa26 --- /dev/null +++ b/tests/checks/switch_status_check/fabricNode_NEG.json @@ -0,0 +1,122 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "fabricSt": "active", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "fabricSt": "active", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "fabricSt": "active", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "fabricSt": "active", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "fabricSt": "active", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "fabricSt": "active", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/switch_status_check/fabricNode_POS.json b/tests/checks/switch_status_check/fabricNode_POS.json new file mode 100644 index 0000000..1a009a1 --- /dev/null +++ b/tests/checks/switch_status_check/fabricNode_POS.json @@ -0,0 +1,122 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "fabricSt": "inactive", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "fabricSt": "active", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "fabricSt": "inactive", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "fabricSt": "inactive", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "fabricSt": "active", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "fabricSt": "active", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/switch_status_check/fabricRsDecommissionNode.json b/tests/checks/switch_status_check/fabricRsDecommissionNode.json new file mode 100644 index 0000000..9fb2e77 --- /dev/null +++ b/tests/checks/switch_status_check/fabricRsDecommissionNode.json @@ -0,0 +1,12 @@ +[ + { + "fabricRsDecommissionNode": { + "attributes": { + "dn": "uni/fabric/outofsvc/rsdecommissionNode-[topology/pod-1/node-112]", + "debug": "yes", + "tDn": "topology/pod-1/node-112", + "targetId": "112" + } + } + } +] diff --git a/tests/checks/switch_status_check/test_switch_status_check.py b/tests/checks/switch_status_check/test_switch_status_check.py new file mode 100644 index 0000000..e16f501 --- /dev/null +++ b/tests/checks/switch_status_check/test_switch_status_check.py @@ -0,0 +1,46 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "switch_status_check" + +# icurl queries +gir_nodes = 'fabricRsDecommissionNode.json?&query-target-filter=eq(fabricRsDecommissionNode.debug,"yes")' + + +@pytest.mark.parametrize( + "icurl_outputs, fabric_nodes, expected_result, expected_data", + [ + # FAIL - Some switches are not active + ( + {gir_nodes: read_data(dir, "fabricRsDecommissionNode.json")}, + read_data(dir, "fabricNode_POS.json"), + script.FAIL_UF, + [ + ["1", "103", "inactive"], + ["1", "112", "inactive (Maintenance)"], + ["1", "121", "inactive"], + ], + ), + # PASS - All switches are active + ( + {gir_nodes: []}, + read_data(dir, "fabricNode_NEG.json"), + script.PASS, + [], + ), + ], +) +def test_logic(run_check, mock_icurl, fabric_nodes, expected_result, expected_data): + result = run_check( + fabric_nodes=fabric_nodes, + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/telemetryStatsServerP_object_check/telemetryStatsServerP_neg.json b/tests/checks/telemetryStatsServerP_object_check/telemetryStatsServerP_neg.json similarity index 100% rename from tests/telemetryStatsServerP_object_check/telemetryStatsServerP_neg.json rename to tests/checks/telemetryStatsServerP_object_check/telemetryStatsServerP_neg.json diff --git a/tests/telemetryStatsServerP_object_check/telemetryStatsServerP_pos.json b/tests/checks/telemetryStatsServerP_object_check/telemetryStatsServerP_pos.json similarity index 100% rename from tests/telemetryStatsServerP_object_check/telemetryStatsServerP_pos.json rename to tests/checks/telemetryStatsServerP_object_check/telemetryStatsServerP_pos.json diff --git a/tests/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py b/tests/checks/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py similarity index 86% rename from tests/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py rename to tests/checks/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py index 9c23434..10d6fef 100644 --- a/tests/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py +++ b/tests/checks/telemetryStatsServerP_object_check/test_telemetryStatsServerP_object_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "telemetryStatsServerP_object_check" # icurl queries telemetryStatsServerPs = "telemetryStatsServerP.json" @@ -79,11 +80,9 @@ ), ], ) -def test_logic(mock_icurl, sw_cversion, tversion, expected_result): - result = script.telemetryStatsServerP_object_check( - 1, - 1, - script.AciVersion(sw_cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, sw_cversion, tversion, expected_result): + result = run_check( + sw_cversion=script.AciVersion(sw_cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/tep-to-tep_atomic_counter_check/dbgAcPath_max.json b/tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_max.json similarity index 100% rename from tests/tep-to-tep_atomic_counter_check/dbgAcPath_max.json rename to tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_max.json diff --git a/tests/tep-to-tep_atomic_counter_check/dbgAcPath_na.json b/tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_na.json similarity index 100% rename from tests/tep-to-tep_atomic_counter_check/dbgAcPath_na.json rename to tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_na.json diff --git a/tests/tep-to-tep_atomic_counter_check/dbgAcPath_pass.json b/tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_pass.json similarity index 100% rename from tests/tep-to-tep_atomic_counter_check/dbgAcPath_pass.json rename to tests/checks/tep-to-tep_atomic_counter_check/dbgAcPath_pass.json diff --git a/tests/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py b/tests/checks/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py similarity index 71% rename from tests/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py rename to tests/checks/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py index 447adaf..6db1790 100644 --- a/tests/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py +++ b/tests/checks/tep-to-tep_atomic_counter_check/test_tep_to_tep_ac_count_check.py @@ -9,6 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "tep_to_tep_ac_counter_check" + # icurl queries atomic_counter_api = 'dbgAcPath.json' atomic_counter_api += '?rsp-subtree-include=count' @@ -17,26 +19,23 @@ @pytest.mark.parametrize( "icurl_outputs, expected_result", [ - ##FAILING = COUNT > 1600 + # FAILING = COUNT > 1600 ( - {atomic_counter_api: read_data(dir, "dbgAcPath_max.json"), - }, + {atomic_counter_api: read_data(dir, "dbgAcPath_max.json")}, script.FAIL_UF, ), - ##PASSING = COUNT > 0 < = 1600 + # PASSING = COUNT > 0 < = 1600 ( - {atomic_counter_api: read_data(dir, "dbgAcPath_pass.json"), - }, + {atomic_counter_api: read_data(dir, "dbgAcPath_pass.json")}, script.PASS, ), - ##N/A = COUNT EQUAL 0 + # N/A = COUNT EQUAL 0 ( - {atomic_counter_api: read_data(dir, "dbgAcPath_na.json"), - }, + {atomic_counter_api: read_data(dir, "dbgAcPath_na.json")}, script.NA, ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.tep_to_tep_ac_counter_check(1, 1) - assert result == expected_result \ No newline at end of file +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py b/tests/checks/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py similarity index 82% rename from tests/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py rename to tests/checks/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py index 0990c76..d9fd9a8 100644 --- a/tests/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py +++ b/tests/checks/unsupported_fec_configuration_ex_check/test_unsupported_fec_configuration_ex_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "unsupported_fec_configuration_ex_check" # icurl queries topSystems = 'topSystem.json' @@ -16,6 +17,7 @@ topSystems += '&rsp-subtree-filter=or(eq(l1PhysIf.fecMode,"ieee-rs-fec"),eq(l1PhysIf.fecMode,"cons16-rs-fec"),eq(eqptCh.model,"N9K-C93180YC-EX"))' topSystems += '&rsp-subtree-include=required' + @pytest.mark.parametrize( "icurl_outputs, sw_cversion, tversion, expected_result", [ @@ -63,11 +65,9 @@ ), ], ) -def test_logic(mock_icurl, sw_cversion, tversion, expected_result): - result = script.unsupported_fec_configuration_ex_check( - 1, - 1, - script.AciVersion(sw_cversion) if sw_cversion else None, - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, sw_cversion, tversion, expected_result): + result = run_check( + sw_cversion=script.AciVersion(sw_cversion) if sw_cversion else None, + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result \ No newline at end of file + assert result.result == expected_result diff --git a/tests/unsupported_fec_configuration_ex_check/topSystem_neg.json b/tests/checks/unsupported_fec_configuration_ex_check/topSystem_neg.json similarity index 100% rename from tests/unsupported_fec_configuration_ex_check/topSystem_neg.json rename to tests/checks/unsupported_fec_configuration_ex_check/topSystem_neg.json diff --git a/tests/unsupported_fec_configuration_ex_check/topSystem_pos.json b/tests/checks/unsupported_fec_configuration_ex_check/topSystem_pos.json similarity index 100% rename from tests/unsupported_fec_configuration_ex_check/topSystem_pos.json rename to tests/checks/unsupported_fec_configuration_ex_check/topSystem_pos.json diff --git a/tests/uplink_limit_check/eqptPortP_NEG.json b/tests/checks/uplink_limit_check/eqptPortP_NEG.json similarity index 100% rename from tests/uplink_limit_check/eqptPortP_NEG.json rename to tests/checks/uplink_limit_check/eqptPortP_NEG.json diff --git a/tests/uplink_limit_check/eqptPortP_POS.json b/tests/checks/uplink_limit_check/eqptPortP_POS.json similarity index 100% rename from tests/uplink_limit_check/eqptPortP_POS.json rename to tests/checks/uplink_limit_check/eqptPortP_POS.json diff --git a/tests/uplink_limit_check/eqptPortP_empty.json b/tests/checks/uplink_limit_check/eqptPortP_empty.json similarity index 100% rename from tests/uplink_limit_check/eqptPortP_empty.json rename to tests/checks/uplink_limit_check/eqptPortP_empty.json diff --git a/tests/uplink_limit_check/test_uplink_limit_check.py b/tests/checks/uplink_limit_check/test_uplink_limit_check.py similarity index 82% rename from tests/uplink_limit_check/test_uplink_limit_check.py rename to tests/checks/uplink_limit_check/test_uplink_limit_check.py index 0cf9dc8..128ae31 100644 --- a/tests/uplink_limit_check/test_uplink_limit_check.py +++ b/tests/checks/uplink_limit_check/test_uplink_limit_check.py @@ -8,6 +8,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "uplink_limit_check" # icurl queries eqptPortP = 'eqptPortP.json?query-target-filter=eq(eqptPortP.ctrl,"uplink")' @@ -50,8 +51,9 @@ ) ], ) -def test_logic(mock_icurl, cver, tver, expected_result): - result = script.uplink_limit_check( - 1, 1, script.AciVersion(cver), script.AciVersion(tver) +def test_logic(run_check, mock_icurl, cver, tver, expected_result): + result = run_check( + cversion=script.AciVersion(cver), + tversion=script.AciVersion(tver), ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_empty.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_empty.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_empty.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_empty.json diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_neg.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_neg.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_neg.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_neg.json diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_pos.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_pos.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos.json diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_pos2.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos2.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_pos2.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos2.json diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_pos3.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos3.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_pos3.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos3.json diff --git a/tests/validate_32_64_bit_image_check/firmwareFirmware_pos4.json b/tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos4.json similarity index 100% rename from tests/validate_32_64_bit_image_check/firmwareFirmware_pos4.json rename to tests/checks/validate_32_64_bit_image_check/firmwareFirmware_pos4.json diff --git a/tests/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py b/tests/checks/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py similarity index 59% rename from tests/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py rename to tests/checks/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py index 75eac75..2371d5a 100644 --- a/tests/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py +++ b/tests/checks/validate_32_64_bit_image_check/test_validate_32_64_bit_image_check.py @@ -9,97 +9,88 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "validate_32_64_bit_image_check" + # icurl queries -firmware_60_api = 'firmwareFirmware.json' -firmware_60_api += '?query-target-filter=eq(firmwareFirmware.fullVersion,"n9000-16.0(3e)")' +firmware_60_api = "firmwareFirmware.json" +firmware_60_api += '?query-target-filter=eq(firmwareFirmware.fullVersion,"n9000-16.0(3e)")' # icurl queries -firmware_52_api = 'firmwareFirmware.json' -firmware_52_api += '?query-target-filter=eq(firmwareFirmware.fullVersion,"n9000-15.2(7d)")' +firmware_52_api = "firmwareFirmware.json" +firmware_52_api += '?query-target-filter=eq(firmwareFirmware.fullVersion,"n9000-15.2(7d)")' + @pytest.mark.parametrize( "icurl_outputs, cversion, tversion, expected_result", [ - ## NO TVERSION - MANUAL + # NO TVERSION - MANUAL ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json")}, "5.2(1a)", None, script.MANUAL, ), - ## APIC not yet upgraded to 6.0(2)+ - POST + # APIC not yet upgraded to 6.0(2)+ - POST ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json")}, "5.2(1a)", "6.0(3e)", script.POST, ), - ## FAILING = AFFECTED VERSION + ONLY 64 BIT Image + # FAILING = AFFECTED VERSION + ONLY 64 BIT Image ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos.json")}, "6.0(3e)", "6.0(3e)", script.FAIL_UF, ), - ## FAILING = AFFECTED VERSION + Images were uploaded before upgrade + # FAILING = AFFECTED VERSION + Images were uploaded before upgrade ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos2.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos2.json")}, "6.0(3e)", "6.0(3e)", script.FAIL_UF, ), - ## FAILING = AFFECTED VERSION + 32-bit image shows NA + # FAILING = AFFECTED VERSION + 32-bit image shows NA ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos3.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos3.json")}, "6.0(3e)", "6.0(3e)", script.FAIL_UF, ), - ## FAILING = AFFECTED VERSION + 64-bit image shows NA + # FAILING = AFFECTED VERSION + 64-bit image shows NA ( - {firmware_60_api: read_data(dir, "firmwareFirmware_pos4.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_pos4.json")}, "6.0(3e)", "6.0(3e)", script.FAIL_UF, ), - ## FAILING = AFFECTED VERSION + AFFECTED MO NON EXISTING + # FAILING = AFFECTED VERSION + AFFECTED MO NON EXISTING ( - {firmware_60_api: read_data(dir, "firmwareFirmware_empty.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_empty.json")}, "6.0(3e)", "6.0(3e)", script.FAIL_UF, ), - ## PASSING = AFFECTED VERSION + NON-AFFECTED MO + # PASSING = AFFECTED VERSION + NON-AFFECTED MO ( - {firmware_60_api: read_data(dir, "firmwareFirmware_neg.json"), - }, + {firmware_60_api: read_data(dir, "firmwareFirmware_neg.json")}, "6.0(3e)", "6.0(3e)", script.PASS, ), - ## PASSING = NON-AFFECTED VERSION + AFFECTED MO + # PASSING = NON-AFFECTED VERSION + AFFECTED MO ( - {firmware_52_api: read_data(dir, "firmwareFirmware_empty.json"), - }, + {firmware_52_api: read_data(dir, "firmwareFirmware_empty.json")}, "5.2(1a)", "5.2(7d)", script.NA, ), - ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.validate_32_64_bit_image_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None ) - assert result == expected_result \ No newline at end of file + assert result.result == expected_result diff --git a/tests/vmm_active_uplinks_check/fvUplinkOrderCont_neg.json b/tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_neg.json similarity index 100% rename from tests/vmm_active_uplinks_check/fvUplinkOrderCont_neg.json rename to tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_neg.json diff --git a/tests/vmm_active_uplinks_check/fvUplinkOrderCont_not_exist.json b/tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_not_exist.json similarity index 100% rename from tests/vmm_active_uplinks_check/fvUplinkOrderCont_not_exist.json rename to tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_not_exist.json diff --git a/tests/vmm_active_uplinks_check/fvUplinkOrderCont_pos.json b/tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_pos.json similarity index 100% rename from tests/vmm_active_uplinks_check/fvUplinkOrderCont_pos.json rename to tests/checks/vmm_active_uplinks_check/fvUplinkOrderCont_pos.json diff --git a/tests/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py b/tests/checks/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py similarity index 78% rename from tests/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py rename to tests/checks/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py index 846f343..5920efd 100644 --- a/tests/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py +++ b/tests/checks/vmm_active_uplinks_check/test_vmm_active_uplinks_check.py @@ -9,9 +9,10 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "vmm_active_uplinks_check" # icurl queries -uplink_api = 'fvUplinkOrderCont.json' +uplink_api = "fvUplinkOrderCont.json" uplink_api += '?query-target-filter=eq(fvUplinkOrderCont.active,"")' @@ -32,6 +33,6 @@ ), ], ) -def test_logic(mock_icurl, expected_result): - result = script.vmm_active_uplinks_check(1, 1) - assert result == expected_result +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result diff --git a/tests/checks/vpc_paired_switches_check/fabricNode.json b/tests/checks/vpc_paired_switches_check/fabricNode.json new file mode 100644 index 0000000..73e7d5b --- /dev/null +++ b/tests/checks/vpc_paired_switches_check/fabricNode.json @@ -0,0 +1,145 @@ +[ + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1", + "id": "1", + "name": "apic1", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-2", + "id": "2", + "name": "apic2", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-3", + "id": "3", + "name": "apic3", + "role": "controller", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-101", + "id": "101", + "name": "LF101", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-102", + "id": "102", + "name": "LF102", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-103", + "id": "103", + "name": "LF103", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-104", + "id": "104", + "name": "LF104", + "role": "leaf", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-111", + "id": "111", + "name": "T2_LF111", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-112", + "id": "112", + "name": "T2_LF112", + "role": "leaf", + "nodeType": "tier-2-leaf" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-121", + "id": "121", + "name": "RL_LF121", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-122", + "id": "122", + "name": "RL_LF122", + "role": "leaf", + "nodeType": "remote-leaf-wan" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1001", + "id": "1001", + "name": "SP1001", + "role": "spine", + "nodeType": "unspecified" + } + } + }, + { + "fabricNode": { + "attributes": { + "dn": "topology/pod-1/node-1002", + "id": "1002", + "name": "SP1002", + "role": "spine", + "nodeType": "unspecified" + } + } + } +] diff --git a/tests/checks/vpc_paired_switches_check/test_vpc_paired_switches_check.py b/tests/checks/vpc_paired_switches_check/test_vpc_paired_switches_check.py new file mode 100644 index 0000000..c6bb8e7 --- /dev/null +++ b/tests/checks/vpc_paired_switches_check/test_vpc_paired_switches_check.py @@ -0,0 +1,38 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "vpc_paired_switches_check" + + +@pytest.mark.parametrize( + "vpc_node_ids, expected_result, expected_data", + [ + # all leaf switches are in vPC + ( + ["101", "102", "103", "104", "111", "112", "121", "122"], + script.PASS, + [], + ), + # not all leaf switches are in vPC + ( + ["101", "102", "111", "112"], + script.MANUAL, + [["103", "LF103"], ["104", "LF104"], ["121", "RL_LF121"], ["122", "RL_LF122"]], + ), + ], +) +def test_logic(run_check, vpc_node_ids, expected_result, expected_data): + result = run_check( + vpc_node_ids=vpc_node_ids, + fabric_nodes=read_data(dir, "fabricNode.json"), + ) + assert result.result == expected_result + assert result.data == expected_data diff --git a/tests/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py b/tests/checks/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py similarity index 92% rename from tests/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py rename to tests/checks/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py index 91072c2..44f7f1c 100644 --- a/tests/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py +++ b/tests/checks/vzany_vzany_service_epg_check/test_vzany_vzany_service_epg_check.py @@ -9,6 +9,7 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) +test_function = "vzany_vzany_service_epg_check" # icurl queries vzRsSubjGraphAtts = "vzRsSubjGraphAtt.json" @@ -120,11 +121,9 @@ ), ], ) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.vzany_vzany_service_epg_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, ) - assert result == expected_result + assert result.result == expected_result diff --git a/tests/vzany_vzany_service_epg_check/vzRsSubjGraphAtt.json b/tests/checks/vzany_vzany_service_epg_check/vzRsSubjGraphAtt.json similarity index 100% rename from tests/vzany_vzany_service_epg_check/vzRsSubjGraphAtt.json rename to tests/checks/vzany_vzany_service_epg_check/vzRsSubjGraphAtt.json diff --git a/tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_cons_diff_VRFs.json b/tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_cons_diff_VRFs.json similarity index 100% rename from tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_cons_diff_VRFs.json rename to tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_cons_diff_VRFs.json diff --git a/tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_only.json b/tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_only.json similarity index 100% rename from tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_only.json rename to tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_prov_only.json diff --git a/tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny.json b/tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny.json similarity index 100% rename from tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny.json rename to tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny.json diff --git a/tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny_2_VRFs.json b/tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny_2_VRFs.json similarity index 100% rename from tests/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny_2_VRFs.json rename to tests/checks/vzany_vzany_service_epg_check/vzRtAny_vzAny_vzAny_2_VRFs.json diff --git a/tests/conftest.py b/tests/conftest.py index 92ba9a8..348c7d7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,7 +3,7 @@ import pytest import logging import importlib -from subprocess import CalledProcessError +from itertools import product TEST_DIR = os.path.dirname(os.path.abspath(__file__)) PROJECT_DIR = os.path.dirname(TEST_DIR) @@ -16,7 +16,7 @@ @pytest.fixture(scope="session", autouse=True) def init(): - script.initialize() + script.init_system() @pytest.fixture @@ -93,8 +93,7 @@ def mock_icurl(monkeypatch, icurl_outputs): def _mock_icurl(apitype, query, page=0, page_size=100000): output = icurl_outputs.get(query) if output is None: - log.error("Query `%s` not found in test data", query) - data = {"totalCount": "0", "imdata": []} + raise KeyError("Query `{}` not found in test data".format(query)) elif isinstance(output, list): # icurl_outputs option 1 - output is imdata which is empty if not output: @@ -116,116 +115,365 @@ def _mock_icurl(apitype, query, page=0, page_size=100000): else: data = {"totalCount": output["totalCount"], "imdata": []} - script._icurl_error_handler(data['imdata']) + script._icurl_error_handler(data["imdata"]) return data monkeypatch.setattr(script, "_icurl", _mock_icurl) @pytest.fixture -def conn_failure(): - return False - - -@pytest.fixture -def conn_cmds(): - ''' - Set of test parameters for mocked `Connection.cmd()`. - - ex) - ``` - { - <apic_ip>: [{ - "cmd": "ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin", - "output": """\ - ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin - 6.1G -rwxr-xr-x 1 root root 6.1G Apr 3 16:36 /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin - f2-apic1# - """, - "exception": None - }] +def expected_common_data(request): + data = { # default + "username": "admin", + "password": "mypassword", + "cversion": script.AciVersion("6.1(1a)"), + "tversion": script.AciVersion("6.2(1a)"), + "sw_cversion": script.AciVersion("6.0(9d)"), + "vpc_node_ids": ["101", "102"], + "fabric_nodes": [ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.111", + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine1001", + "nodeType": "unspecified", + "role": "spine", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-2/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf201", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.0(9d)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.211", + "dn": "topology/pod-2/node-2001", + "fabricSt": "active", + "id": "2001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine2001", + "nodeType": "unspecified", + "role": "spine", + "version": "n9000-16.1(1a)", + } + } + }, + ], } - ``` - - The real output from `Connection.cmd()` (i.e. `Connection.output`) contains many ANSI characters. In this fixture, those characters are not considered. - ''' - return {} - - -class MockConnection(script.Connection): - conn_failure = False - conn_cmds = None - - def connect(self): - """ - `Connection.connect()` is just instantiating `pexepect.spawn()` which does not - initiate the SSH connection yet. Not exception likely happens here. - """ - if self.conn_failure: - raise Exception("Simulated exception at connect()") - - def cmd(self, command, **kargs): - """ - `Connection.cmd()` initiates the SSH connection (if not done yet) and sends the command. - Each check typically has multiple `cmd()` with different commands. - To cover that, this mock func uses a dictionary `conn_cmds` as the test data. - """ - _conn_cmds = self.conn_cmds[self.hostname] - for conn_cmd in _conn_cmds: - if command == conn_cmd["cmd"]: - if conn_cmd["exception"]: - raise conn_cmd["exception"] - self.output = conn_cmd["output"] - break + param = getattr(request, "param", {}) + for key in data: + if param.get(key, "non_falsy_default") != "non_falsy_default": + data[key] = param[key] + return data + + +@pytest.fixture(scope="session") +def result_objects_factory(): + def _result_objects_factory(profile, requested_status=None): + result_props = [ + # result (status) + [ + script.PASS, + script.FAIL_O, + script.FAIL_UF, + script.MANUAL, + script.POST, + script.NA, + script.ERROR, + ], + # msg + [ + "", + "test msg", + "long test msg " + "x" * 120, # > 120 char + ], + # headers + [ + [], + ["H1", "H2", "H3"], + ], + # data + [ + [], + [["Data1", "Data2", "Data3"], ["Data4", "Data5", "Data6"], ["Loooooong Data7", "Data8", "Data9"]], + ], + # unformatted_headers + [ + [], + ["Unformatted_H1"], + ], + # unformatted_data + [ + [], + [["Data1"], ["Data2"]], + ], + # recommended_action + [ + "", + "This is your recommendation to remediate the issue", + ], + # doc_url + [ + "", + "https://fake_doc_url.local/path1/#section1", + ], + ] + + def _generate_result_obj(result_prop): + return script.Result( + result=result_prop[0], + msg=result_prop[1], + headers=result_prop[2], + data=result_prop[3], + unformatted_headers=result_prop[4], + unformatted_data=result_prop[5], + recommended_action=result_prop[6], + doc_url=result_prop[7], + ) + + def _get_requested_status(requested_status): + if not isinstance(requested_status, list): + requested_status = [requested_status] + all_status = result_props[0] + if not requested_status: + return all_status + return [status for status in all_status if status in requested_status] + + if profile == "fullmesh": + raw_product = product(*result_props) + return [_generate_result_obj(prop) for prop in raw_product] + elif profile == "fail_full": + # All props populated (mainly for FAIL_O, FAIL_UF, MANUAL) + status_list = [script.FAIL_O, script.FAIL_UF, script.MANUAL] + if requested_status: + status_list = _get_requested_status(requested_status) + return [ + _generate_result_obj( + [ + status, # result + result_props[1][1], # msg + result_props[2][1], # headers + result_props[3][1], # data + result_props[4][1], # unformatted_headers + result_props[5][1], # unformatted_data + result_props[6][1], # recommended_action + result_props[7][1], # doc_url + ] + ) + for status in status_list + ] + elif profile == "fail_simple": + # No msg nor unformatted_headers/data (mojority of FAIL_O, FAIL_UF, MANUAL) + status_list = [script.FAIL_O, script.FAIL_UF, script.MANUAL] + if requested_status: + status_list = _get_requested_status(requested_status) + return [ + _generate_result_obj( + [ + status, # result + result_props[1][0], # msg + result_props[2][1], # headers + result_props[3][1], # data + result_props[4][0], # unformatted_headers + result_props[5][0], # unformatted_data + result_props[6][1], # recommended_action + result_props[7][1], # doc_url + ] + ) + for status in status_list + ] + elif profile == "pass": + # Only rec_action and doc (mainly for PASS) + status_list = [script.PASS] + if requested_status: + status_list = _get_requested_status(requested_status) + return [ + _generate_result_obj( + [ + status, # result + result_props[1][0], # msg + result_props[2][0], # headers + result_props[3][0], # data + result_props[4][0], # unformatted_headers + result_props[5][0], # unformatted_data + result_props[6][1], # recommended_action + result_props[7][1], # doc_url + ] + ) + for status in status_list + ] + elif profile == "only_msg": + # Only msg (mainly for PASS, NA, MANUAL, POST, ERROR) + status_list = [script.PASS, script.NA, script.MANUAL, script.POST, script.ERROR] + if requested_status: + status_list = _get_requested_status(requested_status) + return [ + _generate_result_obj( + [ + status, # result + result_props[1][1], # msg + result_props[2][0], # headers + result_props[3][0], # data + result_props[4][0], # unformatted_headers + result_props[5][0], # unformatted_data + result_props[6][0], # recommended_action + result_props[7][0], # doc_url + ] + ) + for status in status_list + ] + elif profile == "only_long_msg": + # Only long msg (mainly for NA and ERROR) + status_list = [script.NA, script.ERROR] + if requested_status: + status_list = _get_requested_status(requested_status) + return [ + _generate_result_obj( + [ + status, # result + result_props[1][2], # msg + result_props[2][0], # headers + result_props[3][0], # data + result_props[4][0], # unformatted_headers + result_props[5][0], # unformatted_data + result_props[6][0], # recommended_action + result_props[7][0], # doc_url + ] + ) + for status in status_list + ] else: - log.error("Command `%s` not found in test data `conn_cmds`", command) - raise Exception("FAILURE IN PYTEST") + raise ValueError("Unexpected profile - {}".format(profile)) + return _result_objects_factory -@pytest.fixture -def mock_conn(monkeypatch, conn_failure, conn_cmds): - MockConnection.conn_failure = conn_failure - MockConnection.conn_cmds = conn_cmds - monkeypatch.setattr(script, "Connection", MockConnection) +@pytest.fixture(scope="session") +def check_factory(): + def _check_factory(check_id, check_title, result_obj): + @script.check_wrapper(check_title=check_title) + def _check(**kwargs): + if result_obj.result == script.ERROR: + raise Exception(result_obj.msg) + else: + return result_obj + + _check.__name__ = check_id # Set the function name for the check + return _check -@pytest.fixture -def cmd_outputs(): - """ - Mocked output for `run_cmd` function. - This is used to avoid executing real commands in tests. - """ - return { - "ls -aslh /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin": { - "splitlines": False, - "output": "6.1G -rwxr-xr-x 1 root root 6.1G Apr 3 16:36 /firmware/fwrepos/fwrepo/aci-apic-dk9.6.0.5h.bin\napic1#", - } - } + return _check_factory -@pytest.fixture -def mock_run_cmd(monkeypatch, cmd_outputs): - """ - Mock the `run_cmd` function to avoid executing real commands. - This is useful for tests that do not require actual command execution. - """ - def _mock_run_cmd(cmd, splitlines=False): - details = cmd_outputs.get(cmd) - if details is None: - log.error("Command `%s` not found in test data", cmd) - return "" - if details.get("CalledProcessError"): - raise CalledProcessError(127, cmd) - - splitlines = details.get("splitlines", False) - output = details.get("output") - if output is None: - log.error("Output for cmd `%s` not found in test data", cmd) - output = "" +@pytest.fixture(scope="session") +def check_funcs_factory(check_factory): + def _check_funcs_factory(result_objects): + check_funcs = [] + for idx, result_obj in enumerate(result_objects): + check_id = "fake_{}_check".format(idx) + check_title = "Fake Check {}".format(idx) + check_func = check_factory(check_id, check_title, result_obj) + check_funcs.append(check_func) + return check_funcs - log.debug("Mocked run_cmd called with args: %s, kwargs: %s", cmd, splitlines) - if splitlines: - return output.splitlines() - else: - return output - monkeypatch.setattr(script, "run_cmd", _mock_run_cmd) + return _check_funcs_factory diff --git a/tests/fabric_link_redundancy_check/fabricNode.json b/tests/fabric_link_redundancy_check/fabricNode.json deleted file mode 100644 index 7bb924e..0000000 --- a/tests/fabric_link_redundancy_check/fabricNode.json +++ /dev/null @@ -1,101 +0,0 @@ -[ - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-101", - "id": "101", - "name": "LF101", - "role": "leaf", - "nodeType": "unspecified" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "id": "102", - "name": "LF102", - "role": "leaf", - "nodeType": "unspecified" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-103", - "id": "103", - "name": "LF103", - "role": "leaf", - "nodeType": "unspecified" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-111", - "id": "111", - "name": "T2_LF111", - "role": "leaf", - "nodeType": "tier-2-leaf" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-112", - "id": "112", - "name": "T2_LF112", - "role": "leaf", - "nodeType": "tier-2-leaf" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-121", - "id": "121", - "name": "RL_LF121", - "role": "leaf", - "nodeType": "remote-leaf-wan" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-122", - "id": "122", - "name": "RL_LF122", - "role": "leaf", - "nodeType": "remote-leaf-wan" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-1001", - "id": "1001", - "name": "SP1001", - "role": "spine", - "nodeType": "unspecified" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-1002", - "id": "1002", - "name": "SP1002", - "role": "spine", - "nodeType": "unspecified" - } - } - } -] diff --git a/tests/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py b/tests/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py deleted file mode 100644 index 3570cce..0000000 --- a/tests/fabric_link_redundancy_check/test_fabric_link_redundancy_check.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -fabric_nodes_api = 'fabricNode.json' -fabric_nodes_api += '?query-target-filter=and(or(eq(fabricNode.role,"leaf"),eq(fabricNode.role,"spine")),eq(fabricNode.fabricSt,"active"))' - -# icurl queries -lldp_adj_api = 'lldpAdjEp.json' -lldp_adj_api += '?query-target-filter=wcard(lldpAdjEp.sysDesc,"topology/pod")' - - -@pytest.mark.parametrize( - "icurl_outputs, expected_result", - [ - # FAILING = T1 leaf101 single-homed, T1 leaf102 none, T1 leaf103 multi-homed - ( - { - fabric_nodes_api: read_data(dir, "fabricNode.json"), - lldp_adj_api: read_data(dir, "lldpAdjEp_pos_spine_only.json"), - }, - script.FAIL_O, - ), - # FAILING = T1 leafs multi-homed, T2 leaf111 single-homed, T2 leaf112 multi-homed - ( - { - fabric_nodes_api: read_data(dir, "fabricNode.json"), - lldp_adj_api: read_data(dir, "lldpAdjEp_pos_t1_only.json"), - }, - script.FAIL_O, - ), - # FAILING = T1 leaf101 single-homed, T1 leaf102 none, T1 leaf103 multi-homed - # T2 leaf111 single-homed, T2 leaf112 multi-homed - ( - { - fabric_nodes_api: read_data(dir, "fabricNode.json"), - lldp_adj_api: read_data(dir, "lldpAdjEp_pos_spine_t1.json"), - }, - script.FAIL_O, - ), - # PASSING = ALL LEAF SWITCHES ARE MULTI-HOMED except for RL - ( - { - fabric_nodes_api: read_data(dir, "fabricNode.json"), - lldp_adj_api: read_data(dir, "lldpAdjEp_neg.json"), - }, - script.PASS, - ), - ], -) -def test_logic(mock_icurl , expected_result): - result = script.fabric_link_redundancy_check(1, 1) - assert result == expected_result diff --git a/tests/fabricdomain_name_check/test_fabricdomain_name_check.py b/tests/fabricdomain_name_check/test_fabricdomain_name_check.py deleted file mode 100644 index 61ae397..0000000 --- a/tests/fabricdomain_name_check/test_fabricdomain_name_check.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -topSystem = 'topSystem.json?query-target-filter=eq(topSystem.role,"controller")' - - -@pytest.mark.parametrize( - "icurl_outputs, cversion, tversion, expected_result", - [ - # # char test - ( - {topSystem: read_data(dir, "topSystem_1POS.json")}, - "5.2(3g)", - "6.0(2h)", - script.FAIL_O, - ), - ( - {topSystem: read_data(dir, "topSystem_1POS.json")}, - "6.0(3a)", - "6.0(2h)", - script.FAIL_O, - ), - # ; char test - ( - {topSystem: read_data(dir, "topSystem_2POS.json")}, - "5.2(3g)", - "6.0(2h)", - script.FAIL_O, - ), - ( - {topSystem: read_data(dir, "topSystem_2POS.json")}, - "6.0(3a)", - "6.0(2h)", - script.FAIL_O, - ), - # Neither ; or # in fabricDomain - ( - {topSystem: read_data(dir, "topSystem_NEG.json")}, - "5.2(3g)", - "6.0(2h)", - script.PASS, - ), - # only affected 6.0(2h), regardless of special chars - ( - {topSystem: read_data(dir, "topSystem_1POS.json")}, - "5.2(3g)", - "6.0(1j)", - script.PASS, - ), - # Eventual 6.0(3) has fix - ( - {topSystem: read_data(dir, "topSystem_1POS.json")}, - "5.2(3g)", - "6.0(3a)", - script.PASS, - ), - ( - {topSystem: read_data(dir, "topSystem_1POS.json")}, - "6.0(3a)", - "6.0(4a)", - script.PASS, - ), - ], -) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.fabricdomain_name_check( - 1, 1, script.AciVersion(cversion), script.AciVersion(tversion) - ) - assert result == expected_result diff --git a/tests/fabricdomain_name_check/topSystem_1POS.json b/tests/fabricdomain_name_check/topSystem_1POS.json deleted file mode 100644 index e0764f0..0000000 --- a/tests/fabricdomain_name_check/topSystem_1POS.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "topSystem": { - "attributes": { - "address": "10.0.0.1", - "fabricId": "1", - "id": "1", - "fabricDomain": "fabric;4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.2", - "fabricId": "1", - "id": "2", - "fabricDomain": "fabric;4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.3", - "fabricId": "1", - "id": "3", - "fabricDomain": "fabric;4", - "role": "controller" - } - } - } -] diff --git a/tests/fabricdomain_name_check/topSystem_2POS.json b/tests/fabricdomain_name_check/topSystem_2POS.json deleted file mode 100644 index e353b36..0000000 --- a/tests/fabricdomain_name_check/topSystem_2POS.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "topSystem": { - "attributes": { - "address": "10.0.0.1", - "fabricId": "1", - "id": "1", - "fabricDomain": "fabric#4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.2", - "fabricId": "1", - "id": "2", - "fabricDomain": "fabric#4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.3", - "fabricId": "1", - "id": "3", - "fabricDomain": "fabric#4", - "role": "controller" - } - } - } -] diff --git a/tests/fabricdomain_name_check/topSystem_NEG.json b/tests/fabricdomain_name_check/topSystem_NEG.json deleted file mode 100644 index 7a1c1e7..0000000 --- a/tests/fabricdomain_name_check/topSystem_NEG.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "topSystem": { - "attributes": { - "address": "10.0.0.1", - "fabricId": "1", - "id": "1", - "fabricDomain": "fabric4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.2", - "fabricId": "1", - "id": "2", - "fabricDomain": "fabric4", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.3", - "fabricId": "1", - "id": "3", - "fabricDomain": "fabric4", - "role": "controller" - } - } - } -] diff --git a/tests/fc_ex_model_check/fabricNode_NEG.json b/tests/fc_ex_model_check/fabricNode_NEG.json deleted file mode 100644 index 0637a08..0000000 --- a/tests/fc_ex_model_check/fabricNode_NEG.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/tests/fc_ex_model_check/fabricNode_POS.json b/tests/fc_ex_model_check/fabricNode_POS.json deleted file mode 100644 index 0d12e7c..0000000 --- a/tests/fc_ex_model_check/fabricNode_POS.json +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-101", - "model": "N9K-C93180YC-EX" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "model": "N9K-C93108TC-EX" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-103", - "model": "N9K-C93108LC-EX" - } - } - } -] \ No newline at end of file diff --git a/tests/fc_ex_model_check/fcEntity_NEG.json b/tests/fc_ex_model_check/fcEntity_NEG.json deleted file mode 100644 index 0637a08..0000000 --- a/tests/fc_ex_model_check/fcEntity_NEG.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/tests/fc_ex_model_check/fcEntity_POS.json b/tests/fc_ex_model_check/fcEntity_POS.json deleted file mode 100644 index 162a77c..0000000 --- a/tests/fc_ex_model_check/fcEntity_POS.json +++ /dev/null @@ -1,18 +0,0 @@ -[ - { - "fcEntity": { - "attributes": { - "adminSt": "enabled", - "dn": "topology/pod-1/node-102/sys/fc" - } - } - }, - { - "fcEntity": { - "attributes": { - "adminSt": "enabled", - "dn": "topology/pod-1/node-101/sys/fc" - } - } - } -] diff --git a/tests/fc_ex_model_check/test_fc_ex_model_check.py b/tests/fc_ex_model_check/test_fc_ex_model_check.py deleted file mode 100644 index 1cd0833..0000000 --- a/tests/fc_ex_model_check/test_fc_ex_model_check.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries - -fcEntity_api = 'fcEntity.json' -fabricNode_api = 'fabricNode.json' -fabricNode_api += '?query-target-filter=wcard(fabricNode.model,".*EX")' - -@pytest.mark.parametrize( - "icurl_outputs, tversion, expected_result", - [ - ## FABRIC HAS EX NODES and FC/FCOE CONFIG - ( - {fcEntity_api: read_data(dir, "fcEntity_POS.json"), - fabricNode_api: read_data(dir, "fabricNode_POS.json")}, - "6.1(1f)", - script.FAIL_O, - ), - ( - {fcEntity_api: read_data(dir, "fcEntity_POS.json"), - fabricNode_api: read_data(dir, "fabricNode_POS.json")}, - "6.0(7e)", - script.FAIL_O, - ), - # TVERSION NOT AFFECTED - ( - {fcEntity_api: read_data(dir, "fcEntity_POS.json"), - fabricNode_api: read_data(dir, "fabricNode_POS.json")}, - "6.0(1f)", - script.PASS, - ), - ## FABRIC DOES NOT HAVE EX NODES - ( - {fcEntity_api: read_data(dir, "fcEntity_NEG.json"), - fabricNode_api: read_data(dir, "fabricNode_NEG.json")}, - "6.1(1f)", - script.PASS, - ), - ( - {fcEntity_api: read_data(dir, "fcEntity_NEG.json"), - fabricNode_api: read_data(dir, "fabricNode_NEG.json")}, - "6.0(7e)", - script.PASS, - ), - ( - {fcEntity_api: read_data(dir, "fcEntity_POS.json"), - fabricNode_api: read_data(dir, "fabricNode_NEG.json")}, - "6.0(7e)", - script.PASS, - ), - ], -) -def test_logic(mock_icurl, tversion, expected_result): - result = script.fc_ex_model_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result diff --git a/tests/mini_aci_6_0_2/test_mini_aci_6_0_2_check.py b/tests/mini_aci_6_0_2/test_mini_aci_6_0_2_check.py deleted file mode 100644 index c93578e..0000000 --- a/tests/mini_aci_6_0_2/test_mini_aci_6_0_2_check.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -topSystems = 'topSystem.json?query-target-filter=wcard(topSystem.role,"controller")' - - -@pytest.mark.parametrize( - "icurl_outputs, cversion, tversion, expected_result", - [ - ( - {topSystems: read_data(dir, "topSystem_controller_neg.json")}, - "3.2(1a)", - "5.2(6a)", - script.PASS, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_neg.json")}, - "6.0(2e)", - "6.0(5d)", - script.PASS, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_neg.json")}, - "5.2(3a)", - "6.0(3d)", - script.PASS, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_pos.json")}, - "4.1(1a)", - "5.2(7f)", - script.PASS, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_pos.json")}, - "4.2(2a)", - "6.0(2c)", - script.FAIL_UF, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_pos.json")}, - "6.0(1a)", - "6.0(2c)", - script.FAIL_UF, - ), - ( - {topSystems: read_data(dir, "topSystem_controller_pos.json")}, - "6.0(2c)", - "6.0(5c)", - script.PASS, - ), - ], -) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.mini_aci_6_0_2_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None, - ) - assert result == expected_result diff --git a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json b/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json deleted file mode 100644 index f1bdd0b..0000000 --- a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3H.json +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-2/node-113", - "fabricSt": "active", - "id": "113", - "model": "N9K-C93108TC-FX3H", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf113" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-2/node-114", - "fabricSt": "active", - "id": "114", - "model": "N9K-C93108TC-FX3H", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf114" - } - } - } -] diff --git a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json b/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json deleted file mode 100644 index 39538a0..0000000 --- a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P.json +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "fabricSt": "active", - "id": "101", - "model": "N9K-C93108TC-FX3P", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf101" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "fabricSt": "active", - "id": "102", - "model": "N9K-C93108TC-FX3P", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf102" - } - } - } -] diff --git a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json b/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json deleted file mode 100644 index b9d7780..0000000 --- a/tests/n9k_c93108tc_fx3p_interface_down_check/fabricNode_FX3P3H.json +++ /dev/null @@ -1,50 +0,0 @@ -[ - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "fabricSt": "active", - "id": "101", - "model": "N9K-C93108TC-FX3P", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf101" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-1/node-102", - "fabricSt": "active", - "id": "102", - "model": "N9K-C93108TC-FX3P", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf102" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-2/node-113", - "fabricSt": "active", - "id": "113", - "model": "N9K-C93108TC-FX3H", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf113" - } - } - }, - { - "fabricNode": { - "attributes": { - "dn": "topology/pod-2/node-114", - "fabricSt": "active", - "id": "114", - "model": "N9K-C93108TC-FX3H", - "monPolDn": "uni/fabric/monfab-default", - "name": "leaf114" - } - } - } -] diff --git a/tests/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py b/tests/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py deleted file mode 100644 index 2c35bff..0000000 --- a/tests/n9k_c93108tc_fx3p_interface_down_check/test_n9k_c93108tc_fx3p_interface_down_check.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -api = 'fabricNode.json?query-target-filter=or(eq(fabricNode.model,"N9K-C93108TC-FX3P"),eq(fabricNode.model,"N9K-C93108TC-FX3H"))' - - -@pytest.mark.parametrize( - "icurl_outputs, tversion, expected_result", - [ - # Version not supplied - ({api: []}, None, script.MANUAL), - # Version not affected - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "5.2(8h)", script.PASS), - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "5.3(2b)", script.PASS), - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "6.0(4c)", script.PASS), - # Affected version, no FX3P or FX3H - ({api: []}, "5.2(8g)", script.PASS), - ({api: []}, "5.3(1d)", script.PASS), - ({api: []}, "6.0(2h)", script.PASS), - # Affected version, FX3P - ({api: read_data(dir, "fabricNode_FX3P.json")}, "5.2(8g)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3P.json")}, "5.3(1d)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3P.json")}, "6.0(2h)", script.FAIL_O), - # Affected version, FX3H - ({api: read_data(dir, "fabricNode_FX3H.json")}, "5.2(8g)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3H.json")}, "5.3(1d)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3H.json")}, "6.0(2h)", script.FAIL_O), - # Affected version, FX3P and FX3H - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "5.2(8g)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "5.3(1d)", script.FAIL_O), - ({api: read_data(dir, "fabricNode_FX3P3H.json")}, "6.0(2h)", script.FAIL_O), - ], -) -def test_logic(mock_icurl, tversion, expected_result): - result = script.n9k_c93108tc_fx3p_interface_down_check( - 1, - 1, - script.AciVersion(tversion) if tversion else None, - ) - assert result == expected_result diff --git a/tests/observer_db_size_check/test_observer_db_size_check.py b/tests/observer_db_size_check/test_observer_db_size_check.py deleted file mode 100644 index 9a85663..0000000 --- a/tests/observer_db_size_check/test_observer_db_size_check.py +++ /dev/null @@ -1,128 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - -topSystem_api = 'topSystem.json' -topSystem_api += '?query-target-filter=eq(topSystem.role,"controller")' - -topSystem = read_data(dir, "topSystem.json") -apic_ips = [ - mo["topSystem"]["attributes"]["address"] - for mo in topSystem["imdata"] -] - -ls_cmd = "ls -lh /data2/dbstats | awk '{print $5, $9}'" -ls_output_neg = """\ - -11M observer_8.db -11M observer_9.db -11M observer_10.db -11M observer_template.db -apic1# -""" -ls_output_pos = """\ - -1.0G observer_8.db -12G observer_9.db -999M observer_10.db -11M observer_template.db -apic1# -""" -ls_output_no_such_file = """\ -ls: cannot access /data2/dbstats: No such file or directory -apic1# -""" - -@pytest.mark.parametrize( - "icurl_outputs, conn_failure, conn_cmds, expected_result", - [ - # Connection failure - ( - {topSystem_api: read_data(dir, "topSystem.json")}, - True, - [], - script.ERROR, - ), - # Simulatated exception at `ls` command - ( - {topSystem_api: read_data(dir, "topSystem.json")}, - False, - { - apic_ip: [ - { - "cmd": ls_cmd, - "output": "", - "exception": Exception("Simulated exception at `ls` command"), - } - ] - for apic_ip in apic_ips - }, - script.ERROR, - ), - # dbstats dir not found/not accessible - ( - {topSystem_api: read_data(dir, "topSystem.json")}, - False, - { - apic_ip: [ - { - "cmd": ls_cmd, - "output": "\n".join([ls_cmd, ls_output_no_such_file]), - "exception": None, - } - ] - for apic_ip in apic_ips - }, - script.ERROR, - ), - # dbstats dir found, all DBs under 1G - ( - {topSystem_api: read_data(dir, "topSystem.json")}, - False, - { - apic_ip: [ - { - "cmd": ls_cmd, - "output": "\n".join([ls_cmd, ls_output_neg]), - "exception": None, - } - ] - for apic_ip in apic_ips - }, - script.PASS, - ), - # dbstats dir found, found DBs over 1G - ( - {topSystem_api: read_data(dir, "topSystem.json")}, - False, - { - apic_ip: [ - { - "cmd": ls_cmd, - "output": "\n".join([ls_cmd, ls_output_pos]), - "exception": None, - } - ] - for apic_ip in apic_ips - }, - script.FAIL_UF, - ), - # ERROR, topsystem failure - ( - {topSystem_api: read_data(dir, "topSystem_empty.json")}, - False, - [], - script.ERROR, - ), - ], -) -def test_logic(mock_icurl, mock_conn, expected_result): - result = script.observer_db_size_check(1, 1, "fake_username", "fake_password") - assert result == expected_result diff --git a/tests/observer_db_size_check/topSystem.json b/tests/observer_db_size_check/topSystem.json deleted file mode 100644 index 5b265e9..0000000 --- a/tests/observer_db_size_check/topSystem.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "totalCount": "3", - "imdata": [ - { - "topSystem": { - "attributes": { - "address": "10.0.0.1", - "dn": "topology/pod-1/node-1/sys", - "id": "1", - "name": "fab5-apic1" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.2", - "dn": "topology/pod-1/node-2/sys", - "id": "2", - "name": "fab5-apic2" - } - } - }, - { - "topSystem": { - "attributes": { - "address": "10.0.0.3", - "dn": "topology/pod-1/node-3/sys", - "id": "3", - "name": "fab5-apic3" - } - } - } - ] -} \ No newline at end of file diff --git a/tests/observer_db_size_check/topSystem_empty.json b/tests/observer_db_size_check/topSystem_empty.json deleted file mode 100644 index 0637a08..0000000 --- a/tests/observer_db_size_check/topSystem_empty.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_NEG.json b/tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_NEG.json deleted file mode 100644 index 0637a08..0000000 --- a/tests/stale_decomissioned_spine_check/fabricRsDecommissionNode_NEG.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/tests/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py b/tests/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py deleted file mode 100644 index fade813..0000000 --- a/tests/stale_decomissioned_spine_check/test_stale_decomissioned_spine_check.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -decomissioned_api ='fabricRsDecommissionNode.json' - -active_spine_api = 'topSystem.json' -active_spine_api += '?query-target-filter=eq(topSystem.role,"spine")' - -@pytest.mark.parametrize( - "icurl_outputs, tversion, expected_result", - [ - # TVERSION not supplied - ( - { - active_spine_api: read_data(dir, "topSystem.json"), - decomissioned_api: read_data(dir,"fabricRsDecommissionNode_NEG.json") - }, - None, - script.MANUAL, - ), - # No decom objects - ( - { - active_spine_api: read_data(dir, "topSystem.json"), - decomissioned_api: read_data(dir,"fabricRsDecommissionNode_NEG.json") - }, - "5.2(5e)", - script.PASS, - ), - # Spine has stale decom object, and going to affected version - ( - { - active_spine_api: read_data(dir, "topSystem.json"), - decomissioned_api: read_data(dir,"fabricRsDecommissionNode_POS.json") - }, - "5.2(6a)", - script.FAIL_O, - ), - # Fixed Target Version - ( - { - active_spine_api: read_data(dir, "topSystem.json"), - decomissioned_api: read_data(dir,"fabricRsDecommissionNode_POS.json") - }, - "6.0(4a)", - script.PASS, - ), - ], -) -def test_logic(mock_icurl, tversion, expected_result): - result = script.stale_decomissioned_spine_check( - 1, - 1, - script.AciVersion(tversion) if tversion else None, - ) - assert result == expected_result diff --git a/tests/stale_decomissioned_spine_check/topSystem.json b/tests/stale_decomissioned_spine_check/topSystem.json deleted file mode 100644 index 38d8d14..0000000 --- a/tests/stale_decomissioned_spine_check/topSystem.json +++ /dev/null @@ -1,24 +0,0 @@ -[ - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-201/sys", - "id": "201", - "state": "in-service", - "role": "spine", - "name": "spine1" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-106/sys", - "id": "106", - "state": "in-service", - "role": "spine", - "name": "spine2" - } - } - } - ] \ No newline at end of file diff --git a/tests/standby_sup_sync_check/test_standby_sup_sync_check.py b/tests/standby_sup_sync_check/test_standby_sup_sync_check.py deleted file mode 100644 index f11a0a4..0000000 --- a/tests/standby_sup_sync_check/test_standby_sup_sync_check.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - -# icurl queries -eqptSupC_api = 'eqptSupC.json' -eqptSupC_api += '?query-target-filter=eq(eqptSupC.rdSt,"standby")' - -""" -Bug cversion/tversion matrix based on image size - -4.2(7t)+ - fixed versions LT 2 Gigs: 4.2(7t)+ -5.2(5d)+ - fixed versions LT 2 Gigs: 5.2(7f)+ -5.3(1d)+ - fixed versions LT 2 Gigs: 5.3(1d)+ -6.0(1g)+ - fixed versions LT 2 Gigs: 6.0(1g), 6.0(1j). 32-bit only: 6.0(2h), 6.0(2j). 64-bit: NONE -6.1(1f)+ - fixed versions LT 2 Gigs: NONE -""" - -@pytest.mark.parametrize( - "icurl_outputs, cversion, tversion, expected_result", - [ - ## NO TVERSION - MANUAL - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(1a)", - None, - script.MANUAL, - ), - - ### CVERSION 4.2 - ## cversion 4.2 -nofix, tversion 4.2 -fix LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "4.2(8d)", - script.PASS, - ), - ## cversion 4.2 -nofix, tversion 5.2 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "5.2(5d)", - script.FAIL_UF, - ), - ## cversion 4.2 -nofix, tversion 5.2 -fix and LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "5.2(7f)", - script.PASS, - ), - ## cversion 4.2 -nofix, tversion 5.3 -fix and LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "5.3(1d)", - script.PASS, - ), - ## cversion 4.2 -nofix, tversion 6.0 -fix and LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "6.0(8d)", - script.FAIL_UF, - ), - ## cversion 4.2 -nofix, tversion 6.1 -fix and LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7a)", - "6.1(1f)", - script.FAIL_UF, - ), - ## cversion 4.2 -fix, tversion 6.0 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7t)", - "6.0(6h)", - script.PASS, - ), - ## cversion 4.2 -fix, tversion 6.1 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "4.2(7t)", - "6.1(1f)", - script.PASS, - ), - - - ### CVERSION 5.2 - ## cversion 5.2 -nofix, tversion 5.2 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(4a)", - "5.2(7a)", - script.FAIL_UF, - ), - ## cversion 5.2 -nofix, tversion 5.2 -fix LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(4a)", - "5.2(7f)", - script.PASS, - ), - ## cversion 5.2 -nofix, tversion 5.3 -fix LT 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(4a)", - "5.3(1d)", - script.PASS, - ), - ## cversion 5.2 -nofix, tversion 6.0 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(4a)", - "6.0(8d)", - script.FAIL_UF, - ), - ## cversion 5.2 -nofix, tversion 6.1 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(4a)", - "6.1(1f)", - script.FAIL_UF, - ), - ## cversion 5.2 -fix, tversion 6.1 -fix but over 2G - ( - {eqptSupC_api: read_data(dir, "eqptSupC_POS.json"), - }, - "5.2(5d)", - "6.1(1f)", - script.PASS, - ), - - - ## NO STANDBY SUPS - ( - {eqptSupC_api: read_data(dir, "eqptSupC_NEG.json"), - }, - "4.2(7a)", - "6.1(1f)", - script.PASS, - ), - - ], -) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.standby_sup_sync_check( - 1, - 1, - script.AciVersion(cversion), - script.AciVersion(tversion) if tversion else None - ) - assert result == expected_result \ No newline at end of file diff --git a/tests/static_route_overlap_check/test_static_route_overlap_check.py b/tests/static_route_overlap_check/test_static_route_overlap_check.py deleted file mode 100644 index 270641e..0000000 --- a/tests/static_route_overlap_check/test_static_route_overlap_check.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -staticRoutes = 'ipRouteP.json?query-target-filter=and(wcard(ipRouteP.dn,"/32"))' -staticroute_vrf = 'l3extRsEctx.json' -bds_in_vrf = 'fvRsCtx.json' -subnets_in_bd = 'fvSubnet.json' - - -@pytest.mark.parametrize( - "icurl_outputs, cversion, tversion, expected_result", - [ - ##FAIL = AFFECTED VERSION + AFFECTED MO - ( - {staticRoutes: read_data(dir, "ipRouteP_pos.json"), - staticroute_vrf: read_data(dir, "l3extRsEctx.json"), - bds_in_vrf: read_data(dir, "fvRsCtx.json"), - subnets_in_bd: read_data(dir, "fvSubnet.json")}, - "4.2(7f)", "5.2(4d)", script.FAIL_O, - ), - ##FAIL = AFFECTED VERSION + AFFECTED MO - ( - {staticRoutes: read_data(dir, "ipRouteP_pos.json"), - staticroute_vrf: read_data(dir, "l3extRsEctx.json"), - bds_in_vrf: read_data(dir, "fvRsCtx.json"), - subnets_in_bd: read_data(dir, "fvSubnet.json")}, - "5.1(1a)", "5.2(4d)", script.FAIL_O, - ), - ##PASS = AFFECTED VERSION + NON-AFFECTED MO - ( - {staticRoutes: read_data(dir, "ipRouteP_neg.json"), - staticroute_vrf: read_data(dir, "l3extRsEctx.json"), - bds_in_vrf: read_data(dir, "fvRsCtx.json"), - subnets_in_bd: read_data(dir, "fvSubnet.json")}, - "4.2(7f)", "5.2(4d)", script.PASS, - ), - ## PASS = AFFECTED VERSION + AFFECTED MO NON EXISTING - ( - {staticRoutes: read_data(dir, "ipRouteP_empty.json"), - staticroute_vrf: read_data(dir, "l3extRsEctx.json"), - bds_in_vrf: read_data(dir, "fvRsCtx.json"), - subnets_in_bd: read_data(dir, "fvSubnet.json")}, - "4.2(7f)", "5.2(4d)", script.PASS, - ), - ## PASS = NON-AFFECTED VERSION + AFFECTED MO - ( - {staticRoutes: read_data(dir, "ipRouteP_pos.json"), - staticroute_vrf: read_data(dir, "l3extRsEctx.json"), - bds_in_vrf: read_data(dir, "fvRsCtx.json"), - subnets_in_bd: read_data(dir, "fvSubnet.json")}, - "4.2(7f)", "5.2(6e)", script.PASS, - ), - ], -) -def test_logic(mock_icurl, cversion, tversion, expected_result): - result = script.static_route_overlap_check(1, 1, script.AciVersion(cversion), script.AciVersion(tversion)) - assert result == expected_result diff --git a/tests/subnet_scope_check/test_subnet_scope_check.py b/tests/subnet_scope_check/test_subnet_scope_check.py deleted file mode 100644 index 2b9a53c..0000000 --- a/tests/subnet_scope_check/test_subnet_scope_check.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -bd_api = 'fvBD.json' -bd_api += '?rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required' - -epg_api = 'fvAEPg.json?' -epg_api += 'rsp-subtree=children&rsp-subtree-class=fvSubnet&rsp-subtree-include=required' - -@pytest.mark.parametrize( - "icurl_outputs, cversion, expected_result", - [ - ( - {bd_api: read_data(dir, "fvBD.json"), - epg_api: read_data(dir, "fvAEPg_empty.json"), - "fvRsBd.json": read_data(dir, "fvRsBd.json")}, - "4.2(6a)", - script.NA, - ), - ( - {bd_api: read_data(dir, "fvBD.json"), - epg_api: read_data(dir, "fvAEPg_pos.json"), - "fvRsBd.json": read_data(dir, "fvRsBd.json")}, - "4.2(6a)", - script.FAIL_O, - ), - ( - {bd_api: read_data(dir, "fvBD.json"), - epg_api: read_data(dir, "fvAEPg_pos.json"), - "fvRsBd.json": read_data(dir, "fvRsBd.json")}, - "5.1(1a)", - script.FAIL_O, - ), - ( - {bd_api: read_data(dir, "fvBD.json"), - epg_api: read_data(dir, "fvAEPg_neg.json"), - "fvRsBd.json": read_data(dir, "fvRsBd.json")}, - "5.1(1a)", - script.PASS, - ), - ( - {bd_api: read_data(dir, "fvBD.json"), - epg_api: read_data(dir, "fvAEPg_neg.json"), - "fvRsBd.json": read_data(dir, "fvRsBd.json")}, - "5.2(8h)", - script.PASS, - ), - - ], -) -def test_logic(mock_icurl, cversion, expected_result): - result = script.subnet_scope_check(1, 1, script.AciVersion(cversion)) - assert result == expected_result diff --git a/tests/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py b/tests/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py deleted file mode 100644 index 5b16064..0000000 --- a/tests/switch_bootflash_usage_check/test_switch_bootflash_usage_check.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -partitions = 'eqptcapacityFSPartition.json' -partitions += '?query-target-filter=eq(eqptcapacityFSPartition.path,"/bootflash")' - -download_sts = 'maintUpgJob.json' -download_sts += '?query-target-filter=and(eq(maintUpgJob.dnldStatus,"downloaded")' -download_sts += ',eq(maintUpgJob.desiredVersion,"n9000-16.0(2h)"))' - -@pytest.mark.parametrize( - "icurl_outputs, tversion, expected_result", - [ - ( - {partitions: read_data(dir, "eqptcapacityFSPartition.json"), - download_sts: read_data(dir, "maintUpgJob_not_downloaded.json")}, - "6.0(2h)", - script.FAIL_UF, - ), - ( - {partitions: read_data(dir, "eqptcapacityFSPartition.json"), - download_sts: read_data(dir, "maintUpgJob_pre_downloaded.json")}, - "6.0(2h)", - script.PASS, - ), - ( - {partitions: read_data(dir, "eqptcapacityFSPartition.json"), - download_sts: read_data(dir, "maintUpgJob_old_ver_no_prop.json")}, - "6.0(2h)", - script.FAIL_UF, - ), - ], -) -def test_logic(mock_icurl, tversion, expected_result): - result = script.switch_bootflash_usage_check(1, 1, script.AciVersion(tversion)) - assert result == expected_result diff --git a/tests/test_AciResult.py b/tests/test_AciResult.py index c427712..b4b9e6c 100644 --- a/tests/test_AciResult.py +++ b/tests/test_AciResult.py @@ -1,27 +1,29 @@ import pytest import importlib -import json from six import string_types script = importlib.import_module("aci-preupgrade-validation-script") +AciResult = script.AciResult +Result = script.Result @pytest.mark.parametrize( - "func_name, name, description, result, recommended_action, reason, doc_url, column, row, unformatted_column, unformatted_rows, expected_show, expected_criticality, expected_passed", + "func_name, name, result_obj, expected_show, expected_criticality, expected_passed", [ # Check 1: NA ( "fake_func_name_NA_test", "NA", - "", - script.NA, - "", - "", - "", - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.NA, + recommended_action="", + msg="", + doc_url="", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), False, "informational", "passed" @@ -30,15 +32,16 @@ ( "fake_func_name_PASS_test", "PASS", - "", - script.PASS, - "", - "", - "", - [], - [], - [], - [], + Result( + result=script.PASS, + recommended_action="", + msg="", + doc_url="", + headers=[], + data=[], + unformatted_headers=[], + unformatted_data=[], + ), True, "informational", "passed" @@ -47,15 +50,16 @@ ( "fake_func_name_POST_test", "POST", - "", - script.POST, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.POST, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), False, "informational", "failed" @@ -64,15 +68,16 @@ ( "fake_func_name_MANUAL_test", "MANUAL", - "", - script.MANUAL, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.MANUAL, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), True, "warning", "failed" @@ -81,15 +86,16 @@ ( "fake_func_name_ERROR_test", "ERROR", - "", - script.ERROR, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.ERROR, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), True, "major", "failed" @@ -98,15 +104,16 @@ ( "fake_func_name_FAIL_UF_test", "FAIL_UF", - "", - script.FAIL_UF, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], - ["col1", "col2"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.FAIL_UF, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), True, "critical", "failed" @@ -115,15 +122,16 @@ ( "fake_func_name_FAIL_O_test", "FAIL_O", - "", - script.FAIL_O, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2", "col3"], - [["row1", "row2", "row3"], ["row4", "row5", "row6"]], - ["col4", "col5"], - [["row1", "row2"], ["row3", "row4"]], + Result( + result=script.FAIL_O, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2", "col3"], + data=[["row1", "row2", "row3"], ["row4", "row5", "row6"]], + unformatted_headers=["col4", "col5"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), True, "critical", "failed" @@ -132,15 +140,16 @@ ( "fake_func_name_FAIL_O_formatted_only_test", "FAIL_O Formatted only", - "", - script.FAIL_O, - "reboot", - "test reason", - "https://test_doc_url.html", - ["col1", "col2", "col3"], - [["row1", None, 3], ["row4", None, 3]], - [], - [], + Result( + result=script.FAIL_O, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2", "col3"], + data=[["row1", None, 3], ["row4", None, 3]], + unformatted_headers=[], + unformatted_data=[], + ), True, "critical", "failed" @@ -149,15 +158,16 @@ ( "fake_func_name_FAIL_O_unformatted_only_test", "FAIL_O Unformatted only", - "", - script.FAIL_O, - "reboot", - "test reason", - "https://test_doc_url.html", - [], - [], - ["col1", "col2", "col3"], - [["row1", None, 3], ["row4", None, 3]], + Result( + result=script.FAIL_O, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=[], + data=[], + unformatted_headers=["col1", "col2", "col3"], + unformatted_data=[["row1", None, 3], ["row4", None, 3]], + ), True, "critical", "failed" @@ -167,35 +177,24 @@ def test_AciResult( func_name, name, - description, - result, - recommended_action, - reason, - doc_url, - column, - row, - unformatted_column, - unformatted_rows, + result_obj, expected_show, expected_criticality, expected_passed, ): - synth = script.AciResult(func_name, name, description) - synth.updateWithResults(result, recommended_action, reason, doc_url, column, row, unformatted_column, unformatted_rows) - file = synth.writeResult() - with open(file, "r") as f: - data = json.load(f) - assert data["ruleId"] == func_name - assert data["showValidation"] == expected_show - assert data["severity"] == expected_criticality - assert data["ruleStatus"] == expected_passed - for entry in data["failureDetails"]["data"]: + synth = AciResult(func_name, name, result_obj) + assert synth.ruleId == func_name + assert synth.showValidation == expected_show + assert synth.severity == expected_criticality + assert synth.ruleStatus == expected_passed + for entry in synth.failureDetails["data"]: for vals in entry.values(): assert isinstance(vals, string_types) - for entry in data["failureDetails"]["unformatted_data"]: + for entry in synth.failureDetails["unformatted_data"]: for vals in entry.values(): assert isinstance(vals, string_types) + @pytest.mark.parametrize( "headers, data", [ @@ -206,12 +205,13 @@ def test_AciResult( ) def test_invalid_headers_or_data(headers, data): with pytest.raises(TypeError): - synth = script.AciResult("func_name", "Check Title", "A Description") - synth.craftData( + synth = AciResult("func_name", "Check Title") + synth.convert_data( column=headers, rows=data, ) + @pytest.mark.parametrize( "headers, data", [ @@ -235,8 +235,8 @@ def test_invalid_headers_or_data(headers, data): ) def test_mismatched_lengths(headers, data): with pytest.raises(ValueError): - synth = script.AciResult("func_name", "Check Title", "A Description") - synth.craftData( + synth = AciResult("func_name", "Check Title") + synth.convert_data( column=headers, rows=data, ) diff --git a/tests/test_CheckManager.py b/tests/test_CheckManager.py new file mode 100644 index 0000000..e5c7e79 --- /dev/null +++ b/tests/test_CheckManager.py @@ -0,0 +1,353 @@ +import pytest +import importlib +import logging +import time +import json +import os + +script = importlib.import_module("aci-preupgrade-validation-script") +AciVersion = script.AciVersion +AciResult = script.AciResult +Result = script.Result +CheckManager = script.CheckManager +check_wrapper = script.check_wrapper + + +# ---------------------------- +# Fixtures, Helper Functions +# ---------------------------- +def assert_aci_result_file_with_error(cm, check_id, check_title, msg): + filepath = cm.rm.get_result_filepath(check_id) + with open(filepath, "r") as f: + aci_result = json.load(f) + assert aci_result["ruleId"] == check_id + assert aci_result["name"] == check_title + assert aci_result["ruleStatus"] == AciResult.FAIL + assert aci_result["severity"] == "major" + assert aci_result["reason"] == msg + assert aci_result["failureDetails"]["failType"] == script.ERROR + + +@pytest.fixture +def mock_generate_thread(monkeypatch, request): + """Mock thread in ThreadManager to raise an exception in thread.start(). + + This is to test exception handleing in ThreadManager._start_thread(). + """ + + check_id = request.param.get("check_id") + exception = request.param.get("exception") + + def thread_start_with_exception(timeout=5.0): + raise exception + + def _mock_generate_thread(self, target, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + thread = script.CustomThread(target=target, name=target.__name__, args=args, kwargs=kwargs) + thread.daemon = True + # Mock only a specifieid thread instance. Otherwise, exception is raised in + # the monitoring thread which doesn't use _start_thread(). + if thread.name == check_id: + thread.start = thread_start_with_exception + return thread + + monkeypatch.setattr(script.ThreadManager, "_generate_thread", _mock_generate_thread) + + +# ---------------------------- +# Tests +# ---------------------------- +class TestCheckManager: + @pytest.fixture(scope="class") + def expected_result_objects(self, result_objects_factory): + return result_objects_factory("fullmesh") + + @pytest.fixture(scope="class") + def check_funcs(self, check_funcs_factory, expected_result_objects): + return check_funcs_factory(expected_result_objects) + + @pytest.fixture(scope="class") + def cm(self, check_funcs): + _cm = CheckManager() + _cm.check_funcs = check_funcs + return _cm + + def test_initialize_checks(self, caplog, cm): + caplog.set_level(logging.CRITICAL) # Skip logging as it's too noisy for this + + cm.initialize_checks() + + # With init, only titles should be available + assert cm.get_check_title("fake_0_check") == "Fake Check 0" + assert cm.get_check_title("fake_10_check") == "Fake Check 10" + assert cm.get_check_result("fake_0_check") is None + assert cm.get_check_result("fake_10_check") is None + + # Check number of initialized checks in result files + result_files = os.listdir(script.JSON_DIR) + assert len(result_files) == cm.total_checks + + # Check the filename of result files and their `ruleStatus` + for check_id in cm.check_ids: + filepath = cm.rm.get_result_filepath(check_id) + assert os.path.exists(filepath), "Missing result file: {}".format(filepath) + + with open(filepath, "r") as f: + aci_result = json.load(f) + # At initialize, ruleStatus must be always in-progress + assert aci_result["ruleStatus"] == AciResult.IN_PROGRESS + + def test_run_checks(self, caplog, cm, expected_common_data, expected_result_objects): + caplog.set_level(logging.CRITICAL) # Skip logging as it's too noisy for this + + cm.run_checks(expected_common_data) + + # With run_checks, both titles and result obj should be available + assert cm.get_check_title("fake_0_check") == "Fake Check 0" + assert cm.get_check_title("fake_10_check") == "Fake Check 10" + assert cm.get_check_result("fake_0_check") is expected_result_objects[0] + assert cm.get_check_result("fake_10_check") is expected_result_objects[10] + + # Check the result files + for check_id in cm.check_ids: + filepath = cm.rm.get_result_filepath(check_id) + assert os.path.exists(filepath), "Missing result file: {}".format(filepath) + + with open(filepath, "r") as f: + aci_result = json.load(f) + assert aci_result["ruleId"] == check_id + assert aci_result["name"] == cm.get_check_title(check_id) + assert aci_result["ruleStatus"] in (AciResult.PASS, AciResult.FAIL) + r = cm.get_check_result(check_id) + + # recommended_action: additional note is added with a certain condition + if aci_result["ruleStatus"] == AciResult.FAIL and r.unformatted_headers and r.unformatted_data: + assert aci_result["recommended_action"].startswith(r.recommended_action + "\n Note") + else: + assert aci_result["recommended_action"] == r.recommended_action + + # docUrl + assert aci_result["docUrl"] == r.doc_url + + # failureDetails: matters only when ruleStatus is FAIL + if aci_result["ruleStatus"] == AciResult.FAIL: + assert aci_result["failureDetails"]["failType"] == r.result + try: + data = AciResult.convert_data(r.headers, r.data) + assert aci_result["failureDetails"]["header"] == r.headers + assert aci_result["failureDetails"]["data"] == data + except Exception: + assert aci_result["failureDetails"]["failType"] == script.ERROR + assert aci_result["failureDetails"]["header"] == [] + assert aci_result["failureDetails"]["data"] == [] + + if r.unformatted_headers and r.unformatted_data: + try: + unformatted_data = AciResult.convert_data(r.unformatted_headers, r.unformatted_data) + assert aci_result["failureDetails"]["unformatted_header"] == r.unformatted_headers + assert aci_result["failureDetails"]["unformatted_data"] == unformatted_data + except Exception: + assert aci_result["failureDetails"]["failType"] == script.ERROR + assert aci_result["failureDetails"]["unformatted_header"] == [] + assert aci_result["failureDetails"]["unformatted_data"] == [] + + +@pytest.mark.parametrize( + "api_only, debug_function, expected_total", + [ + (False, None, len(CheckManager.api_checks) + len(CheckManager.ssh_checks) + len(CheckManager.cli_checks)), + (True, None, len(CheckManager.api_checks)), + (False, CheckManager.api_checks[0].__name__, 1), + (True, CheckManager.api_checks[0].__name__, 1), + (False, CheckManager.ssh_checks[0].__name__, 1), + (True, CheckManager.ssh_checks[0].__name__, 0), # api_only for non-api check = 0 + ], +) +def test_total_checks(api_only, debug_function, expected_total): + cm = CheckManager(api_only, debug_function) + assert cm.total_checks == expected_total + + +def test_exception_in_initialize(): + """Exception in initialize is not captured by CheckManager. + The exception should go up to the script's main() and abort the script + because there is likely some fundamental issue in the system""" + cm = CheckManager() + cm.initialize_check = lambda x, y: 1 / 0 # Zero Division Error for a quick exception + with pytest.raises(ZeroDivisionError): + cm.initialize_checks() + + +def test_memerror_in_check(): + @check_wrapper(check_title="Memory Error Check") + def memerr_check(**kwargs): + raise MemoryError + + cm = CheckManager() + cm.check_funcs = [memerr_check] + cm.initialize_checks() + cm.run_checks({"fake_common_data": True}) + + assert_aci_result_file_with_error( + cm, "memerr_check", "Memory Error Check", "Not enough memory to complete this check." + ) + + +def test_exception_in_check(): + @check_wrapper(check_title="Bad Check") + def bad_check(**kwargs): + raise Exception("This is a test exception") + + cm = CheckManager() + cm.check_funcs = [bad_check] + cm.initialize_checks() + cm.run_checks({"fake_common_data": True}) + + assert_aci_result_file_with_error(cm, "bad_check", "Bad Check", "Unexpected Error: This is a test exception") + + +def test_exception_in_finalize_check_due_to_bad_check(): + """Exceptions in `finalize_check` due to bad return value from each check. + + This exception happens in `try` of `check_wrapper`, but `finalize_check` + in the corresponding `except` works as it calls `finalize_check` again + with a valid `Result` object with status ERROR. + """ + + @check_wrapper(check_title="Non-Result Obj Check") + def non_result_check(**kwargs): + return "Not Result Obj" # instead of `Result` obj + + @check_wrapper(check_title="Invalid Result Check") + def invalid_result_check(**kwargs): + # length of header and each row of data must be the same + return Result(result=script.FAIL_O, headers=["H1", "H2"], data=[["D1"], ["D2"]]) + + cm = CheckManager() + cm.check_funcs = [non_result_check, invalid_result_check] + cm.initialize_checks() + cm.run_checks({"fake_common_data": True}) + + checks = [ + { + "id": "non_result_check", + "title": "Non-Result Obj Check", + "msg": "Unexpected Error: The result of non_result_check is not a `Result` object", + }, + { + "id": "invalid_result_check", + "title": "Invalid Result Check", + "msg": "Unexpected Error: Row length (1), data: ['D1'] does not match column length (2).", + }, + ] + for check in checks: + assert_aci_result_file_with_error(cm, check["id"], check["title"], check["msg"]) + + +def test_exception_in_finalize_check(): + """Exception in `finalize_check` itself. + + This could happen when the filesystem is full, permission denied to write + the result file etc. + """ + + # Check exception is caught in try of check_wrapper, then finalize_check + # fails in the corresponding except. + @check_wrapper(check_title="Bad Check With Bad Finalizer") + def bad_check_with_bad_finalizer(**kwargs): + raise Exception("Bad check to test finalize_check failure") + + # Check is good but finalize_check failed in try of check_wrapper, then it + # fails in the corresponding except again. + @check_wrapper(check_title="Good Check With Bad Finalizer") + def good_check_with_bad_finalizer(**kwargs): + return Result(result=script.PASS) + + cm = CheckManager() + cm.finalize_check = lambda x, y: 1 / 0 # Zero Division Error for a quick exception + cm.check_funcs = [bad_check_with_bad_finalizer, good_check_with_bad_finalizer] + cm.initialize_checks() + with pytest.raises(ZeroDivisionError): + cm.run_checks({"fake_common_data": True}) + + +@pytest.mark.parametrize( + "mock_generate_thread", + [ + {"check_id": "good_check_with_thread_start_failure", "exception": RuntimeError("can't start new thread")}, + {"check_id": "good_check_with_thread_start_failure", "exception": RuntimeError("unknown runtime error")}, + {"check_id": "good_check_with_thread_start_failure", "exception": Exception("unknown exception")}, + ], + indirect=True, +) +def test_exception_in_starting_thread(mock_generate_thread): + @check_wrapper(check_title="Good Check With Failure in Starting Thread") + def good_check_with_thread_start_failure(**kwargs): + return Result(result=script.PASS) + + cm = CheckManager() + cm.check_funcs = [good_check_with_thread_start_failure] + cm.initialize_checks() + cm.run_checks({"fake_common_data": True}) + + assert_aci_result_file_with_error( + cm, + "good_check_with_thread_start_failure", + "Good Check With Failure in Starting Thread", + "Skipped due to a failure in starting a thread for this check.", + ) + + +@pytest.mark.parametrize( + "mock_generate_thread", + [ + {"check_id": "good_check_with_start_failure_and_exc_in_callback", "exception": Exception("unknown exception")}, + ], + indirect=True, +) +def test_exception_in_finalize_check_on_thread_failure(mock_generate_thread): + """Exception in failure callback. Should not catch the exception and let the script fail""" + @check_wrapper(check_title="Good Check With Failure in Starting Thread") + def good_check_with_start_failure_and_exc_in_callback(**kwargs): + return Result(result=script.PASS) + + cm = CheckManager() + cm.finalize_check_on_thread_failure = lambda x: 1 / 0 # Zero Division Error for a quick exception + cm.check_funcs = [good_check_with_start_failure_and_exc_in_callback] + cm.initialize_checks() + with pytest.raises(ZeroDivisionError): + cm.run_checks({"fake_common_data": True}) + + +def test_monitor_timeout(): + @check_wrapper(check_title="Timeout Check") + def timeout_check(**kwargs): + time.sleep(60) + + timeout = 1 # sec + cm = CheckManager(timeout=timeout) + cm.check_funcs = [timeout_check] + cm.initialize_checks() + cm.run_checks({"fake_common_data": True}) + assert cm.timeout_event.is_set() + + assert_aci_result_file_with_error( + cm, "timeout_check", "Timeout Check", "Timeout. Unable to finish in time ({} sec).".format(timeout) + ) + + +def test_exception_in_finalize_check_on_thread_timeout(): + """Exception in failure callback. Should not catch the exception and let the script fail""" + @check_wrapper(check_title="Timeout Check") + def timeout_check_with_exc_in_callback(**kwargs): + time.sleep(60) + + timeout = 1 # sec + cm = CheckManager(timeout=timeout) + cm.finalize_check_on_thread_timeout = lambda x: 1 / 0 # Zero Division Error for a quick exception + cm.check_funcs = [timeout_check_with_exc_in_callback] + cm.initialize_checks() + with pytest.raises(ZeroDivisionError): + cm.run_checks({"fake_common_data": True}) + assert cm.timeout_event.is_set() diff --git a/tests/test_ResultManager.py b/tests/test_ResultManager.py new file mode 100644 index 0000000..893b99a --- /dev/null +++ b/tests/test_ResultManager.py @@ -0,0 +1,123 @@ +import importlib +import json + +script = importlib.import_module("aci-preupgrade-validation-script") +AciResult = script.AciResult +Result = script.Result + + +def _test_init_result(rm, fake_checks): + for fake_check in fake_checks: + rm.init_result(**fake_check) + + assert len(rm.titles) == len(fake_checks) + + for check_id in rm.titles: + expected_title = [check["check_title"] for check in fake_checks if check["check_id"] == check_id][0] + assert rm.titles[check_id] == expected_title + + filepath = rm.get_result_filepath(check_id) + with open(filepath, "r") as f: + aci_result = json.load(f) + assert aci_result["ruleId"] == check_id + assert aci_result["name"] == rm.titles[check_id] + assert aci_result["ruleStatus"] == AciResult.IN_PROGRESS + assert aci_result["severity"] == "informational" + assert aci_result["recommended_action"] == "" + assert aci_result["docUrl"] == "" + assert aci_result["failureDetails"]["failType"] == "" + assert aci_result["failureDetails"]["header"] == [] + assert aci_result["failureDetails"]["data"] == [] + assert aci_result["failureDetails"]["unformatted_header"] == [] + assert aci_result["failureDetails"]["unformatted_data"] == [] + + +def _test_update_result(rm, fake_checks): + for fake_check in fake_checks: + rm.update_result(**fake_check) + + inited_fake_checks = [check for check in fake_checks if check["check_id"] in rm.titles] + assert len(rm.results) == len(inited_fake_checks) + + for check_id in rm.results: + expected_result_obj = [check["result_obj"] for check in fake_checks if check["check_id"] == check_id][0] + r = rm.results[check_id] + assert r == expected_result_obj + + filepath = rm.get_result_filepath(check_id) + with open(filepath, "r") as f: + aci_result = json.load(f) + assert aci_result["ruleId"] == check_id + assert aci_result["name"] == rm.titles[check_id] + assert aci_result["ruleStatus"] in (AciResult.PASS, AciResult.FAIL) + if r.unformatted_data: + assert aci_result["recommended_action"].startswith(r.recommended_action) + else: + assert aci_result["recommended_action"] == r.recommended_action + assert aci_result["docUrl"] == r.doc_url + assert aci_result["failureDetails"]["failType"] == "" if r.result == script.PASS else r.result + assert aci_result["failureDetails"]["header"] == r.headers + assert aci_result["failureDetails"]["data"] == AciResult.convert_data(r.headers, r.data) + assert aci_result["failureDetails"]["unformatted_header"] == r.unformatted_headers + assert aci_result["failureDetails"]["unformatted_data"] == AciResult.convert_data(r.unformatted_headers, r.unformatted_data) + + +def test_ResultManager(): + rm = script.ResultManager() + fake_checks_for_init = [ + {"check_id": "puv_1_check", "check_title": "PUV 1"}, + {"check_id": "puv_2_check", "check_title": "PUV 2"}, + ] + fake_checks_for_update = [ + { + "check_id": "puv_1_check", + "result_obj": Result( + result=script.PASS, + recommended_action="", + msg="", + doc_url="", + headers=[], + data=[], + unformatted_headers=[], + unformatted_data=[], + ), + }, + { + "check_id": "puv_2_check", + "result_obj": Result( + result=script.FAIL_UF, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), + }, + { + "check_id": "no_init_check", + "result_obj": Result( + result=script.FAIL_UF, + recommended_action="reboot", + msg="test reason", + doc_url="https://test_doc_url.html", + headers=["col1", "col2"], + data=[["row1", "row2"], ["row3", "row4"]], + unformatted_headers=["col1", "col2"], + unformatted_data=[["row1", "row2"], ["row3", "row4"]], + ), + }, + ] + _test_init_result(rm, fake_checks_for_init) + _test_update_result(rm, fake_checks_for_update) + + summary = rm.get_summary() + assert len(summary) == 8 # [PASS, FAIL_O, FAIL_UF, MANUAL, POST, NA, ERROR, 'TOTAL'] + for key in summary: + if key == "TOTAL": + expected_num = len([c for c in fake_checks_for_update if c["check_id"] != "no_init_check"]) + else: + expected_num = len([c for c in fake_checks_for_update if c["result_obj"].result == key and c["check_id"] != "no_init_check"]) + + assert summary[key] == expected_num diff --git a/tests/test_ThreadManager.py b/tests/test_ThreadManager.py new file mode 100644 index 0000000..4b02f3b --- /dev/null +++ b/tests/test_ThreadManager.py @@ -0,0 +1,49 @@ +from __future__ import print_function +import importlib +import time + +script = importlib.import_module("aci-preupgrade-validation-script") + + +global_timeout = False + + +def task1(data=""): + time.sleep(2.5) + if not global_timeout: + print("Thread task1: Finishing with data {}".format(data)) + + +def task2(data=""): + time.sleep(0.5) + if not global_timeout: + print("Thread task2: Finishing with data {}".format(data)) + + +def task3(data=""): + time.sleep(0.2) + if not global_timeout: + print("Thread task3: Finishing with data {}".format(data)) + + +def test_ThreadManager(capsys): + global global_timeout + tm = script.ThreadManager( + funcs=[task1, task2, task3], + common_kwargs={"data": "common_data"}, + monitor_timeout=1, + callback_on_timeout=lambda x: print("Timeout. Abort {}".format(x)) + ) + tm.start() + tm.join() + + if tm.is_timeout(): + global_timeout = True + + expected_output = """\ +Thread task3: Finishing with data common_data +Thread task2: Finishing with data common_data +Timeout. Abort task1 +""" + captured = capsys.readouterr() + assert captured.out == expected_output diff --git a/tests/test_common_data.py b/tests/test_common_data.py new file mode 100644 index 0000000..b927628 --- /dev/null +++ b/tests/test_common_data.py @@ -0,0 +1,634 @@ +import pytest +import importlib +import logging +import json +import sys + +script = importlib.import_module("aci-preupgrade-validation-script") +AciVersion = script.AciVersion + + +# ------------------------------ +# Data and fixtures +# ------------------------------ + + +@pytest.fixture(autouse=True) +def mock_get_credentials(monkeypatch): + """Mock the get_credentials function to return a fixed username and password.""" + + def _mock_get_credentials(): + return ("admin", "mypassword") + + monkeypatch.setattr(script, "get_credentials", _mock_get_credentials) + + +@pytest.fixture(autouse=True) +def mock_get_target_version(monkeypatch): + """ + Mock `get_target_version()` to return a fixed target version. + Used when the script is run without the `-t` option which is simulated by + `arg_tversion`. + Not using `mock_icurl` because this function involves a user interaction to + select a version. + """ + + def _mock_get_target_version(arg_tversion): + if arg_tversion: + script.prints("Target APIC version is overridden to %s\n" % arg_tversion) + try: + target_version = AciVersion(arg_tversion) + except ValueError as e: + script.prints(e) + sys.exit(1) + return target_version + return AciVersion("6.2(1a)") + + monkeypatch.setattr(script, "get_target_version", _mock_get_target_version) + + +_icurl_outputs = { + "fabricNode.json": [ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller", + "version": "6.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.111", + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine1001", + "nodeType": "unspecified", + "role": "spine", + "version": "n9000-16.1(1a)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-2/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf201", + "nodeType": "unspecified", + "role": "leaf", + "version": "n9000-16.0(9d)", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.211", + "dn": "topology/pod-2/node-2001", + "fabricSt": "active", + "id": "2001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine2001", + "nodeType": "unspecified", + "role": "spine", + "version": "n9000-16.1(1a)", + } + } + }, + ], + "fabricNodePEp.json": [ + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-102/nodepep-101", "id": "101"}}}, + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-102/nodepep-102", "id": "102"}}}, + ], +} + +_icurl_outputs_old = { + # fabricNode.version in older versions like 3.2 shows an invalid version like "A" + # for controller and empty for active switches. + "fabricNode.json": [ + { + "fabricNode": { + "attributes": { + "address": "10.0.0.1", + "dn": "topology/pod-1/node-1", + "fabricSt": "commissioned", + "id": "1", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic1", + "nodeType": "unspecified", + "role": "controller", + "version": "A", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.2", + "dn": "topology/pod-1/node-2", + "fabricSt": "commissioned", + "id": "2", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic2", + "nodeType": "unspecified", + "role": "controller", + "version": "A", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.3", + "dn": "topology/pod-2/node-3", + "fabricSt": "commissioned", + "id": "3", + "model": "APIC-SERVER-L2", + "monPolDn": "uni/fabric/monfab-default", + "name": "apic3", + "nodeType": "unspecified", + "role": "controller", + "version": "A", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.101", + "dn": "topology/pod-1/node-101", + "fabricSt": "active", + "id": "101", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf101", + "nodeType": "unspecified", + "role": "leaf", + "version": "", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.102", + "dn": "topology/pod-1/node-102", + "fabricSt": "active", + "id": "102", + "model": "N9K-C93180YC-FX", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf102", + "nodeType": "unspecified", + "role": "leaf", + "version": "", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.111", + "dn": "topology/pod-1/node-1001", + "fabricSt": "active", + "id": "1001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine1001", + "nodeType": "unspecified", + "role": "spine", + "version": "", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.201", + "dn": "topology/pod-2/node-201", + "fabricSt": "active", + "id": "201", + "model": "N9K-C93180YC-FX3", + "monPolDn": "uni/fabric/monfab-default", + "name": "leaf201", + "nodeType": "unspecified", + "role": "leaf", + "version": "", + } + } + }, + { + "fabricNode": { + "attributes": { + "address": "10.0.0.211", + "dn": "topology/pod-2/node-2001", + "fabricSt": "active", + "id": "2001", + "model": "N9K-C9504", + "monPolDn": "uni/fabric/monfab-default", + "name": "spine2001", + "nodeType": "unspecified", + "role": "spine", + "version": "", + } + } + }, + ], + "fabricNodePEp.json": _icurl_outputs["fabricNodePEp.json"], + "topology/pod-1/node-1/sys/ctrlrfwstatuscont/ctrlrrunning.json": [ + { + "firmwareCtrlrRunning": { + "attributes": { + "dn": "topology/pod-1/node-1/sys/ctrlrfwstatuscont/ctrlrrunning", + "type": "controller", + "version": "3.2(7f)" + } + } + } + ], + "firmwareRunning.json": [ + { + "firmwareRunning": { + "attributes": { + "dn": "topology/pod-1/node-101/sys/fwstatuscont/running", + "peVer": "3.1(2u)", + "type": "switch", + "version": "n9000-13.1(2u)" + } + } + }, + { + "firmwareRunning": { + "attributes": { + "dn": "topology/pod-1/node-102/sys/fwstatuscont/running", + "peVer": "3.2(7f)", + "type": "switch", + "version": "n9000-13.2(7f)" + } + } + }, + { + "firmwareRunning": { + "attributes": { + "dn": "topology/pod-1/node-1001/sys/fwstatuscont/running", + "peVer": "3.2(7f)", + "type": "switch", + "version": "n9000-13.2(7f)" + } + } + }, + { + "firmwareRunning": { + "attributes": { + "dn": "topology/pod-2/node-201/sys/fwstatuscont/running", + "peVer": "3.2(7f)", + "type": "switch", + "version": "n9000-13.2(7f)" + } + } + }, + { + "firmwareRunning": { + "attributes": { + "dn": "topology/pod-2/node-2001/sys/fwstatuscont/running", + "peVer": "3.2(7f)", + "type": "switch", + "version": "n9000-13.2(7f)" + } + } + }, + + ], +} + + +@pytest.fixture(scope="function") +def fake_args(request): + data = { + "api_only": False, + "cversion": None, + "tversion": None, + } + # update data contents when parametrize provides non-falsy values + for key in data: + if request.param.get(key, "non_falsy_default") != "non_falsy_default": + data[key] = request.param[key] + return data + + +# ------------------------------ +# Tests +# ------------------------------ + + +@pytest.mark.parametrize( + "icurl_outputs, fake_args, expected_common_data", + [ + # Default, no argparse arguments + pytest.param( + _icurl_outputs, + {}, + { + "cversion": AciVersion("6.1(1a)"), + "sw_cversion": AciVersion("6.0(9d)"), + "tversion": AciVersion("6.2(1a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="default_no_args", + ), + # Default, no argparse arguments, old ACI version + pytest.param( + _icurl_outputs_old, + {}, + { + "cversion": AciVersion("3.2(7f)"), + "sw_cversion": AciVersion("3.1(2u)"), + "tversion": AciVersion("6.2(1a)"), + "fabric_nodes": _icurl_outputs_old["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="default_no_args_old_aci", + ), + # `api_only` is True. + # No `get_credentials()`, no username nor password + pytest.param( + _icurl_outputs, + { + "api_only": True, + }, + { + "username": None, + "password": None, + "cversion": AciVersion("6.1(1a)"), + "sw_cversion": AciVersion("6.0(9d)"), + "tversion": AciVersion("6.2(1a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="api_only", + ), + # `arg_tversion` is provided (i.e. -t 6.1(4a)) + pytest.param( + _icurl_outputs, + { + "tversion": "6.1(4a)", + }, + { + "cversion": AciVersion("6.1(1a)"), + "sw_cversion": AciVersion("6.0(9d)"), + "tversion": AciVersion("6.1(4a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="tversion", + ), + # `arg_tversion` and `arg_cversion` are both provided (i.e. -t 6.1(4a)) + pytest.param( + _icurl_outputs, + { + "cversion": "6.0(8d)", + "tversion": "6.1(4a)", + }, + { + "cversion": AciVersion("6.0(8d)"), + "sw_cversion": AciVersion("6.0(8d)"), + "tversion": AciVersion("6.1(4a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="cversion_tversion", + ), + # versions are switch syntax + pytest.param( + _icurl_outputs, + { + "cversion": "16.0(4d)", + "tversion": "16.1(4a)", + }, + { + "cversion": AciVersion("6.0(4d)"), + "sw_cversion": AciVersion("6.0(4d)"), + "tversion": AciVersion("6.1(4a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="cversion_tversion_with_switch_version_syntax", + ), + # versions are APIC image name syntax + pytest.param( + _icurl_outputs, + { + "cversion": "aci-apic-dk9.6.0.1a.bin", + "tversion": "aci-apic-dk9.6.2.1a.bin", + }, + { + "cversion": AciVersion("6.0(1a)"), + "sw_cversion": AciVersion("6.0(1a)"), + "tversion": AciVersion("6.2(1a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="cversion_tversion_with_apic_image_name_syntax", + ), + # versions are Switch image name syntax + pytest.param( + _icurl_outputs, + { + "cversion": "n9000-16.0(1a).bin", + "tversion": "n9000-16.2(1a).bin", + }, + { + "cversion": AciVersion("6.0(1a)"), + "sw_cversion": AciVersion("6.0(1a)"), + "tversion": AciVersion("6.2(1a)"), + "fabric_nodes": _icurl_outputs["fabricNode.json"], + "vpc_node_ids": ["101", "102"], + }, + id="cversion_tversion_with_switch_image_name_syntax", + ), + ], + indirect=["fake_args", "expected_common_data"], +) +def test_common_data(mock_icurl, fake_args, expected_common_data): + """test query_common_data and write_script_metadata""" + # --- test for `query_common_data()` + common_data = script.query_common_data( + api_only=fake_args["api_only"], arg_cversion=fake_args["cversion"], arg_tversion=fake_args["tversion"] + ) + for key in common_data: + if isinstance(common_data[key], AciVersion): + assert str(common_data[key]) == str(expected_common_data[key]) + else: + assert common_data[key] == expected_common_data[key] + + # --- test for `write_script_metadata()` + script.write_script_metadata( + api_only=fake_args["api_only"], timeout=1200, total_checks=100, common_data=expected_common_data + ) + with open(script.META_FILE, "r") as f: + meta = json.load(f) + assert meta["name"] == "PreupgradeCheck" + assert meta["method"] == "standalone script" + assert meta["datetime"] == script.ts + script.tz + assert meta["script_version"] == script.SCRIPT_VERSION + assert meta["cversion"] == str(expected_common_data["cversion"]) + assert meta["tversion"] == str(expected_common_data["tversion"]) + assert meta["sw_cversion"] == str(expected_common_data["sw_cversion"]) + assert meta["api_only"] == fake_args["api_only"] + assert meta["timeout"] == 1200 + assert meta["total_checks"] == 100 + + +@pytest.mark.parametrize("icurl_outputs", [_icurl_outputs]) +def test_tversion_invald(capsys, mock_icurl): + with pytest.raises(SystemExit): + script.query_common_data(arg_cversion="6.0(1a)", arg_tversion="invalid_version") + + captured = capsys.readouterr() + expected_output = """\ +Gathering Node Information... + +Current version is overridden to 6.0(1a) + +Target APIC version is overridden to invalid_version + +Parsing failure of ACI version `invalid_version` +""" + assert captured.out.endswith(expected_output), "captured.out is:\n{}".format(captured.out) + + +@pytest.mark.parametrize("icurl_outputs", [_icurl_outputs]) +def test_cversion_invald(capsys, mock_icurl): + with pytest.raises(SystemExit): + script.query_common_data(arg_cversion="invalid_version", arg_tversion="6.0(1a)") + + captured = capsys.readouterr() + expected_output = """\ +Gathering Node Information... + +Current version is overridden to invalid_version + +Parsing failure of ACI version `invalid_version` +""" + assert captured.out.endswith(expected_output), "captured.out is:\n{}".format(captured.out) + + +@pytest.mark.parametrize( + "icurl_outputs, print_output", + [ + # `get_fabric_nodes()` failure + ( + { + "fabricNode.json": [{"error": {"attributes": {"code": "400", "text": "Request failed, unresolved class for dummyClass"}}}], + "fabricNodePEp.json": _icurl_outputs["fabricNodePEp.json"], + }, + "Gathering Node Information...\n\n", + ), + # `get_vpc_nodes()` failure + ( + { + "fabricNode.json": _icurl_outputs["fabricNode.json"], + "fabricNodePEp.json": [{"error": {"attributes": {"code": "400", "text": "Request failed, unresolved class for dummyClass"}}}], + }, + "Collecting VPC Node IDs...", + ), + ], +) +def test_icurl_failure_in_query_common_data(capsys, caplog, mock_icurl, print_output): + caplog.set_level(logging.CRITICAL) + with pytest.raises(SystemExit): + script.query_common_data() + captured = capsys.readouterr() + expected_output = ( + print_output + + """ + +Error: Your current ACI version does not have requested class +Initial query failed. Ensure APICs are healthy. Ending script run. +""" + ) + assert captured.out.endswith(expected_output), "captured.out is:\n{}".format(captured.out) diff --git a/tests/test_icurl.py b/tests/test_icurl.py index 68417a1..351e3cf 100644 --- a/tests/test_icurl.py +++ b/tests/test_icurl.py @@ -3,6 +3,14 @@ script = importlib.import_module("aci-preupgrade-validation-script") + +# TimeoutError is only from py3.3 +try: + TimeoutError +except NameError: + TimeoutError = script.TimeoutError + + # icurl queries fabricNodePEps = "fabricNodePEp.json" @@ -134,6 +142,34 @@ def test_icurl(mock_icurl, apitype, query, expected_result): ], script.OldVerClassNotFound, ), + # Query timeout (90 sec) - pre-4.1 + ( + [ + { + "error": { + "attributes": { + "code": "503", + "text": "Unable to deliver the message, Resolve timeout from (type/num/svc/shard) = apic:1:7:1, apic:1:7:32, apic:1:7:31, apic:1:7:30, apic:1:7:13, apic:1:7:12, apic:1:7:11, apic:1:7:10, apic:1:7:9, apic:1:7:8, apic:1:7:7, apic:1:7:3, apic:1:7:14, apic:1:7:15, apic:1:7:16, apic:1:7:17, apic:1:7:18, apic:1:7:19, apic:1:7:20, apic:1:7:21, apic:1:7:22, apic:1:7:23, apic:1:7:24, apic:1:7:25, apic:1:7:26, apic:1:7:27, apic:1:7:28, apic:1:7:29", + } + } + } + ], + TimeoutError, + ), + # Query timeout (90 sec) - from-4.1 + ( + [ + { + "error": { + "attributes": { + "code": "503", + "text": "Unable to deliver the message, Resolve timeout", + } + } + } + ], + TimeoutError, + ), ], ) def test_icurl_error_handler(imdata, expected_exception): diff --git a/tests/test_main.py b/tests/test_main.py index 41c4543..00f5413 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1,22 +1,158 @@ import pytest import importlib +import os +import json script = importlib.import_module("aci-preupgrade-validation-script") -AciVersion = script.AciVersion +AciResult = script.AciResult +CheckManager = script.CheckManager +# ---------------------------- +# Fixtures +# ---------------------------- +@pytest.fixture +def mock_query_common_data(monkeypatch, expected_common_data): + def _mock_query_common_data(api_only, args_cversion, args_tversion): + return expected_common_data + + monkeypatch.setattr(script, "query_common_data", _mock_query_common_data) + + +@pytest.fixture +def expected_result_objects(result_objects_factory): + return ( + result_objects_factory("pass") + + result_objects_factory("fail_full", script.FAIL_O) + + result_objects_factory("fail_simple", script.FAIL_UF) + + result_objects_factory("only_msg") + + result_objects_factory("pass") + + result_objects_factory("only_long_msg") + ) + + +@pytest.fixture +def mock_CheckManager_get_check_funcs(monkeypatch, check_funcs_factory, expected_result_objects): + check_funcs = check_funcs_factory(expected_result_objects) + + def _mock_CheckManager_get_check_funcs(self): + return check_funcs + + monkeypatch.setattr(script.CheckManager, "get_check_funcs", _mock_CheckManager_get_check_funcs) + + +# ---------------------------- +# Tests +# ---------------------------- def test_args_version(capsys): script.main(["--version"]) captured = capsys.readouterr() - print(captured.out) - assert "{}\n".format(script.SCRIPT_VERSION) == captured.out + assert "{}\n".format(script.SCRIPT_VERSION) == captured.out, "captured.out is =\n{}".format(captured.out) @pytest.mark.parametrize("api_only", [False, True]) def test_args_total_checks(capsys, api_only): args = ["--total-checks", "--api-only"] if api_only else ["--total-checks"] - checks = script.get_checks(api_only=api_only, debug_function=None) + + cm = CheckManager(api_only) + expected_output = "Total Number of Checks: {}\n".format(cm.total_checks) + script.main(args) captured = capsys.readouterr() - print(captured.out) - assert "Total Number of Checks: {}\n".format(len(checks)) == captured.out + assert captured.out == expected_output, "captured.out is =\n{}".format(captured.out) + + +def test_main(capsys, mock_query_common_data, mock_CheckManager_get_check_funcs, expected_result_objects): + script.main(["--no-cleanup"]) + + for idx, result_obj in enumerate(expected_result_objects): + check_id = "fake_{}_check".format(idx) + check_title = "Fake Check {}".format(idx) + expected_aci_result_obj = AciResult(check_id, check_title, result_obj) + expected_aci_result = expected_aci_result_obj.as_dict() + # Err msg from try/except in `check_wrapper()` + if result_obj.result == script.ERROR: + expected_aci_result["reason"] = "Unexpected Error: {}".format(expected_aci_result["reason"]) + + with open(os.path.join(script.JSON_DIR, check_id + ".json")) as f: + aci_result = json.load(f) + assert aci_result == expected_aci_result + + captured = capsys.readouterr() + assert captured.out.startswith( + """\ + ==== {ts}{tz}, Script Version {version} ==== + +!!!! Check https://github.com/datacenter/ACI-Pre-Upgrade-Validation-Script for Latest Release !!!! + +Progress:""".format( + ts=script.ts, + tz=script.tz, + version=script.SCRIPT_VERSION, + ) + ), "captured.out is =\n{}".format(captured.out) + + assert captured.out.endswith( + """\ +11/11 checks completed\r + + +=== Check Result (failed only) === + +[Check 2/11] Fake Check 1... test msg FAIL - OUTAGE WARNING!! + H1 H2 H3 + -- -- -- + Data1 Data2 Data3 + Data4 Data5 Data6 + Loooooong Data7 Data8 Data9 + + Unformatted_H1 + -------------- + Data1 + Data2 + + Recommended Action: This is your recommendation to remediate the issue + Reference Document: https://fake_doc_url.local/path1/#section1 + + +[Check 3/11] Fake Check 2... FAIL - UPGRADE FAILURE!! + H1 H2 H3 + -- -- -- + Data1 Data2 Data3 + Data4 Data5 Data6 + Loooooong Data7 Data8 Data9 + + Recommended Action: This is your recommendation to remediate the issue + Reference Document: https://fake_doc_url.local/path1/#section1 + + +[Check 6/11] Fake Check 5... test msg MANUAL CHECK REQUIRED +[Check 7/11] Fake Check 6... test msg POST UPGRADE CHECK REQUIRED +[Check 8/11] Fake Check 7... Unexpected Error: test msg ERROR !! +[Check 11/11] Fake Check 10... Unexpected Error: long test msg xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ERROR !! + +=== Summary Result === + +PASS : 3 +FAIL - OUTAGE WARNING!! : 1 +FAIL - UPGRADE FAILURE!! : 1 +MANUAL CHECK REQUIRED : 1 +POST UPGRADE CHECK REQUIRED : 1 +N/A : 2 +ERROR !! : 2 +TOTAL : 11 + + Pre-Upgrade Check Complete. + Next Steps: Address all checks flagged as FAIL, ERROR or MANUAL CHECK REQUIRED + + Result output and debug info saved to below bundle for later reference. + Attach this bundle to Cisco TAC SRs opened to address the flagged checks. + + Result Bundle: {bundle_loc} + +==== Script Version {version} FIN ==== +""".format( + version=script.SCRIPT_VERSION, + bundle_loc="/".join([os.getcwd(), script.BUNDLE_NAME]), + ) + ), "captured.out is =\n{}".format(captured.out) diff --git a/tests/test_prepare.py b/tests/test_prepare.py deleted file mode 100644 index 5906bb4..0000000 --- a/tests/test_prepare.py +++ /dev/null @@ -1,327 +0,0 @@ -import pytest -import importlib -import logging -import json -import os - -script = importlib.import_module("aci-preupgrade-validation-script") -AciVersion = script.AciVersion -AciResult = script.AciResult - - -@pytest.fixture(autouse=True) -def mock_get_credentials(monkeypatch): - """Mock the get_credentials function to return a fixed username and password.""" - - def _mock_get_credentials(): - return ("admin", "mypassword") - - monkeypatch.setattr(script, "get_credentials", _mock_get_credentials) - - -@pytest.fixture(autouse=True) -def mock_get_target_version(monkeypatch): - """ - Mock `get_target_version()` to return a fixed target version. - Used when the script is run without the `-t` option which is simulated by - `arg_tversion`. - Not using `mock_icurl` because this function involves a user interaction to - select a version. - """ - - def _mock_get_target_version(arg_tversion): - if arg_tversion: - try: - return AciVersion(arg_tversion) - except ValueError as e: - script.prints(e) - raise SystemExit(1) - return AciVersion("6.2(1a)") - - monkeypatch.setattr(script, "get_target_version", _mock_get_target_version) - - -outputs = { - "cversion": [ - { - "firmwareCtrlrRunning": { - "attributes": { - "dn": "topology/pod-1/node-1/sys/ctrlrfwstatuscont/ctrlrrunning", - "version": "6.1(1a)", - } - } - } - ], - "switch_version": [ - {"firmwareRunning": {"attributes": {"peVer": "6.1(1a)", "version": "n9000-16.1(1a)"}}}, - {"firmwareRunning": {"attributes": {"peVer": "6.0(9d)", "version": "n9000-16.0(9d)"}}}, - ], - "vpc_nodes": [ - {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-102/nodepep-101", "id": "101"}}}, - {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-102/nodepep-102", "id": "102"}}}, - ], -} - - -@pytest.mark.parametrize( - "icurl_outputs, api_only, arg_tversion, arg_cversion, debug_function, expected_result", - [ - # Default, no argparse arguments - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - None, - None, - None, - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.1(1a)"), "tversion": AciVersion("6.2(1a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # `api_only` is True (i.e. --puv) - # No `get_credentials()`, no username nor password - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - True, - None, - None, - None, - {"username": None, "password": None, "cversion": AciVersion("6.1(1a)"), "tversion": AciVersion("6.2(1a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # `arg_tversion` is provided (i.e. -t 6.1(4a)) - # The version `get_target_version()` is ignored. - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - "6.1(4a)", - None, - None, - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.1(1a)"), "tversion": AciVersion("6.1(4a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # `arg_tversion` and `arg_cversion` are both provided (i.e. -t 6.1(4a)) - # The version `get_target_version()` is ignored. - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - "6.1(4a)", - "6.0(8d)", - None, - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.0(8d)"), "tversion": AciVersion("6.1(4a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # `arg_tversion`, `arg_cversion` and 'debug_function' are all provided - # The version `get_target_version()` is ignored. - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - "6.1(4a)", - "6.0(4d)", - "ave_eol_check", - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.0(4d)"), "tversion": AciVersion("6.1(4a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # versions are switch syntax - # The version `get_target_version()` is ignored. - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - "16.1(4a)", - "16.0(4d)", - "ave_eol_check", - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.0(4d)"), "tversion": AciVersion("6.1(4a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - # versions are switch or APIC syntax - # The version `get_target_version()` is ignored. - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - "n9000-16.2(1a).bin", - "aci-apic-dk9.6.0.1a.bin", - "ave_eol_check", - {"username": "admin", "password": "mypassword", "cversion": AciVersion("6.0(1a)"), "tversion": AciVersion("6.2(1a)"), "sw_cversion": AciVersion("6.0(9d)"), "vpc_node_ids": ["101", "102"]}, - ), - ], -) -def test_prepare(mock_icurl, api_only, arg_tversion, arg_cversion, debug_function, expected_result): - script.initialize() - checks = script.get_checks(api_only, debug_function) - inputs = script.prepare(api_only, arg_tversion, arg_cversion, checks) - for key, value in expected_result.items(): - if "version" in key: # cversion or tversion - assert isinstance(inputs[key], AciVersion) - assert str(inputs[key]) == str(value) - else: - assert inputs[key] == value - - result_files = os.listdir(script.JSON_DIR) - # Result files should be created for all checks - assert len(result_files) == len(checks) - for check in checks: - # Rule name is known only through the wrapper `check_wrapper`. - # Rule name content should be checked via another unit test. - # Use AciResult class here just to get the filename from `check.__name__`. - ar = AciResult(check.__name__, "unknown_name", "") - file_path = os.path.join(script.JSON_DIR, ar.filename) - assert os.path.exists(file_path), "Missing result file: {}".format(file_path) - with open(file_path, "r") as f: - result = json.load(f) - assert result["ruleId"] == check.__name__ - assert result["ruleStatus"] == AciResult.IN_PROGRESS - - with open(script.META_FILE, "r") as f: - meta = json.load(f) - assert meta["name"] == "PreupgradeCheck" - assert meta["method"] == "standalone script" - assert meta.get("datetime") is not None - assert meta["script_version"] == script.SCRIPT_VERSION - assert meta["cversion"] == str(expected_result["cversion"]) - assert meta["tversion"] == str(expected_result["tversion"]) - assert meta["sw_cversion"] == str(expected_result["sw_cversion"]) - assert meta["api_only"] == api_only - assert meta["total_checks"] == len(checks) - if debug_function: - assert meta["total_checks"] == 1 - - -def test_tversion_invald(): - with pytest.raises(SystemExit): - with pytest.raises(ValueError): - script.prepare(False, "invalid_version", "6.0(1a)", []) - - -def test_cversion_invald(): - with pytest.raises(SystemExit): - with pytest.raises(ValueError): - script.prepare(False, "6.0(1a)", "invalid_version", []) - - -@pytest.mark.parametrize( - "icurl_outputs, api_only, arg_tversion, arg_cversion, debug_function, expected_result", - [ - # `get_cversion()` failure - ( - { - "firmwareCtrlrRunning.json": [{"error": {"attributes": {"code": "400", "text": "Request failed, unresolved class for firmwareCtrlrRunning_fake"}}}], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - None, - None, - None, - """\ -Checking current APIC version... - -Error: Your current ACI version does not have requested class -Initial query failed. Ensure APICs are healthy. Ending script run. -""", - ), - # `get_switch_version()` failure - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": [{"error": {"attributes": {"code": "400", "text": "Request failed, unresolved class for firmwareRunning_fake"}}}], - "fabricNodePEp.json": outputs["vpc_nodes"], - }, - False, - None, - None, - None, - """\ -Gathering Lowest Switch Version from Firmware Repository... - -Error: Your current ACI version does not have requested class -Initial query failed. Ensure APICs are healthy. Ending script run. -""", - ), - # `get_vpc_nodes()` failure - ( - { - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": [{"error": {"attributes": {"code": "400", "text": "Request failed, unresolved class for fabricNodePEp_fake"}}}], - }, - False, - None, - None, - None, - """\ -Collecting VPC Node IDs... - -Error: Your current ACI version does not have requested class -Initial query failed. Ensure APICs are healthy. Ending script run. -""", - ), - ], -) -def test_prepare_exception(capsys, caplog, mock_icurl, api_only, arg_tversion, arg_cversion, debug_function, expected_result): - caplog.set_level(logging.CRITICAL) - with pytest.raises(SystemExit): - with pytest.raises(Exception): - checks = script.get_checks(api_only, debug_function) - script.prepare(api_only, arg_tversion, arg_cversion, checks) - captured = capsys.readouterr() - print(captured.out) - assert captured.out.endswith(expected_result) - - -# Unit test focusing only on the result file creation -def test_prepare_initial_result_files(mock_icurl, icurl_outputs): - # Provide required API outputs used inside prepare() - icurl_outputs.update({ - "firmwareCtrlrRunning.json": outputs["cversion"], - "firmwareRunning.json": outputs["switch_version"], - "fabricNodePEp.json": outputs["vpc_nodes"], - }) - - # Create two simple checks with known titles - @script.check_wrapper(check_title="Prepare Check A") - def prep_check_a(**kwargs): - return script.Result(result=script.PASS) - - @script.check_wrapper(check_title="Prepare Check B") - def prep_check_b(**kwargs): - return script.Result(result=script.PASS) - - checks = [prep_check_a, prep_check_b] - - # Run prepare which should only initialize result files - script.prepare(api_only=False, arg_tversion=None, arg_cversion=None, checks=checks) - - # Verify result files and contents - expected = { - "prep_check_a": "Prepare Check A", - "prep_check_b": "Prepare Check B", - } - for func_name, title in expected.items(): - ar = AciResult(func_name, title, "") - file_path = os.path.join(script.JSON_DIR, ar.filename) - assert os.path.exists(file_path), "Missing result file: {}".format(file_path) - with open(file_path, "r") as f: - data = json.load(f) - assert data["ruleId"] == func_name - assert data["name"] == title - assert data["ruleStatus"] == AciResult.IN_PROGRESS diff --git a/tests/test_run_checks.py b/tests/test_run_checks.py deleted file mode 100644 index c8a440e..0000000 --- a/tests/test_run_checks.py +++ /dev/null @@ -1,188 +0,0 @@ -import importlib -import logging -import json -import os - -script = importlib.import_module("aci-preupgrade-validation-script") -AciVersion = script.AciVersion -JSON_DIR = script.JSON_DIR -AciResult = script.AciResult -Result = script.Result -check_wrapper = script.check_wrapper - - -# 120 = length of `<title> + <msg> --padding-- <RESULT>` in `[Check XX/YY] <title>... <msg> --padding-- <RESULT>` -ERROR_REASON = "This is a test exception to result in `script.ERROR`." -ERROR_REASON_LONG = "This is a looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong test exception to result in `script.ERROR`." # > 120 char - - -def check_builder(func_name, title, result, err_msg, others): - @check_wrapper(check_title=title) - def _check(**kwargs): - _check.__name__ = func_name # Set the function name for the check - if result == script.ERROR: - raise Exception(err_msg) - else: - return Result(result=result, **others) - return _check - - -fake_data_full = { - "msg": "test msg", - "headers": ["H1", "H2", "H3"], - "data": [["Data1", "Data2", "Data3"], ["Data4", "Data5", "Data6"], ["Loooooong Data7", "Data8", "Data9"]], - "unformatted_headers": ["Unformatted_H1"], - "unformatted_data": [["Data1"], ["Data2"]], - "recommended_action": "This is your recommendation to remediate the issue", - "doc_url": "https://fake_doc_url.local/path1/#section1", -} - -fake_data_no_msg_no_unform = { - "headers": ["H1", "H2", "H3"], - "data": [["Data1", "Data2", "Data3"], ["Data4", "Data5", "Data6"], ["Loooooong Data7", "Data8", "Data9"]], - "recommended_action": "This is your recommendation to remediate the issue", - "doc_url": "https://fake_doc_url.local/path1/#section1", -} - -fake_data_error = { - "msg": "Error msg. This should not be printed", -} - -fake_data_only_msg = { - "msg": "test msg", -} - -fake_data_only_long_msg = { - "msg": "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong test msg", # > 120 char -} - -fake_checks_meta = [ - ("fake_check1", "Test Check 1", script.PASS, "", {}), - ("fake_check2", "Test Check 2", script.FAIL_O, "", fake_data_full), - ("fake_check3", "Test Check 3", script.FAIL_UF, "", fake_data_no_msg_no_unform), - ("fake_check4", "Test Check 4", script.MANUAL, "", fake_data_only_msg), - ("fake_check5", "Test Check 5", script.POST, "", fake_data_only_msg), - ("fake_check6", "Test Check 6", script.NA, "", fake_data_only_msg), - ("fake_check7", "Test Check 7", script.ERROR, ERROR_REASON, fake_data_error), - ("fake_check8", "Test Check 8", script.PASS, "", fake_data_only_msg), - ("fake_check9", "Test Check 9", script.ERROR, ERROR_REASON_LONG, fake_data_error), - ("fake_check10", "Test Check 10", script.NA, "", fake_data_only_long_msg), -] - -fake_checks = [ - check_builder(func_name, title, result, err_msg, others) - for func_name, title, result, err_msg, others in fake_checks_meta -] - -fake_result_filenames = [ - "{}.json".format(func_name) for func_name, _, _, _, _ in fake_checks_meta -] - -fake_inputs = { - "username": "admin", - "password": "mypassword", - "cversion": AciVersion("6.1(1a)"), - "tversion": AciVersion("6.2(1a)"), - "sw_cversion": AciVersion("6.1(1a)"), - "vpc_node_ids": ["101", "102"], -} - - -def test_run_checks(capsys, caplog): - caplog.set_level(logging.CRITICAL) # Skip logging.exceptions in pytest output as it is expected. - script.run_checks(fake_checks, fake_inputs) - captured = capsys.readouterr() - print(captured.out) - assert ( - captured.out - == """\ -[Check 1/10] Test Check 1... PASS -[Check 2/10] Test Check 2... test msg FAIL - OUTAGE WARNING!! - H1 H2 H3 - -- -- -- - Data1 Data2 Data3 - Data4 Data5 Data6 - Loooooong Data7 Data8 Data9 - - Unformatted_H1 - -------------- - Data1 - Data2 - - Recommended Action: This is your recommendation to remediate the issue - Reference Document: https://fake_doc_url.local/path1/#section1 - - -[Check 3/10] Test Check 3... FAIL - UPGRADE FAILURE!! - H1 H2 H3 - -- -- -- - Data1 Data2 Data3 - Data4 Data5 Data6 - Loooooong Data7 Data8 Data9 - - Recommended Action: This is your recommendation to remediate the issue - Reference Document: https://fake_doc_url.local/path1/#section1 - - -[Check 4/10] Test Check 4... test msg MANUAL CHECK REQUIRED -[Check 5/10] Test Check 5... test msg POST UPGRADE CHECK REQUIRED -[Check 6/10] Test Check 6... test msg N/A -[Check 7/10] Test Check 7... Unexpected Error: This is a test exception to result in `script.ERROR`. ERROR !! -[Check 8/10] Test Check 8... test msg PASS -[Check 9/10] Test Check 9... Unexpected Error: This is a looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong test exception to result in `script.ERROR`. ERROR !! -[Check 10/10] Test Check 10... looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong test msg N/A - -=== Summary Result === - -PASS : 2 -FAIL - OUTAGE WARNING!! : 1 -FAIL - UPGRADE FAILURE!! : 1 -MANUAL CHECK REQUIRED : 1 -POST UPGRADE CHECK REQUIRED : 1 -N/A : 2 -ERROR !! : 2 -TOTAL : 10 -""" # noqa: W291 - ) - - json_files = [f for f in os.listdir(JSON_DIR) if f in fake_result_filenames] - assert json_files, "Result JSON file not created" - - for json_file in json_files: - with open(os.path.join(JSON_DIR, json_file)) as f: - data = json.load(f) - - for func_name, title, result, err_msg, others, in fake_checks_meta: - if data["ruleId"] == func_name: - assert data["name"] == title - # reason - if result == script.ERROR: - assert data["reason"].endswith(err_msg) - elif result not in [script.PASS, script.NA]: - msg = others.get("msg", "See Failure Details") - if others.get("unformatted_data"): - msg += ( - "\n" - "Parse failure occurred, the provided data may not be complete. " - "Please contact Cisco TAC to identify the missing data." - ) - assert data["reason"] == msg - else: - assert data["reason"] == others.get("msg", "") - # failureDetails.failType - if result not in [script.PASS, script.NA]: - assert data["failureDetails"]["failType"] == result - else: - assert data["failureDetails"]["failType"] == "" - # failureDetails.data - assert data["failureDetails"]["data"] == AciResult.craftData( - others.get("headers", []), others.get("data", []) - ) - assert data["failureDetails"]["unformatted_data"] == AciResult.craftData( - others.get("unformatted_headers", []), others.get("unformatted_data", []) - ) - # other fields - assert data["recommended_action"] == others.get("recommended_action", "") - assert data["docUrl"] == others.get("doc_url", "") - assert data["description"] == "" - assert data["sub_reason"] == "" diff --git a/tests/vpc_paired_switches_check/test_vpc_paired_switches_check.py b/tests/vpc_paired_switches_check/test_vpc_paired_switches_check.py deleted file mode 100644 index 3619137..0000000 --- a/tests/vpc_paired_switches_check/test_vpc_paired_switches_check.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import pytest -import logging -import importlib -from helpers.utils import read_data - -script = importlib.import_module("aci-preupgrade-validation-script") - -log = logging.getLogger(__name__) -dir = os.path.dirname(os.path.abspath(__file__)) - - -# icurl queries -topSystems = "topSystem.json" - - -@pytest.mark.parametrize( - "icurl_outputs, vpc_node_ids, expected_result", - [ - # all leaf switches are in vPC - ( - {topSystems: read_data(dir, "topSystem.json")}, - ["101", "102", "103", "204", "206"], - script.PASS, - ), - # not all leaf switches are in vPC - ( - {topSystems: read_data(dir, "topSystem.json")}, - ["101", "103", "204", "206"], - script.MANUAL, - ), - ], -) -def test_logic(mock_icurl, vpc_node_ids, expected_result): - result = script.vpc_paired_switches_check(1, 1, vpc_node_ids=vpc_node_ids) - assert result == expected_result diff --git a/tests/vpc_paired_switches_check/topSystem.json b/tests/vpc_paired_switches_check/topSystem.json deleted file mode 100644 index 39faf83..0000000 --- a/tests/vpc_paired_switches_check/topSystem.json +++ /dev/null @@ -1,134 +0,0 @@ -[ - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-1/sys", - "id": "1", - "name": "apic1", - "podId": "1", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-2/node-3/sys", - "id": "3", - "name": "apic3", - "podId": "2", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-2/sys", - "id": "2", - "name": "apic2", - "podId": "1", - "role": "controller" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-101/sys", - "id": "101", - "name": "leaf101", - "podId": "1", - "role": "leaf" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-102/sys", - "id": "102", - "name": "leaf102", - "podId": "1", - "role": "leaf" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-103/sys", - "id": "103", - "name": "leaf103", - "podId": "1", - "role": "leaf" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-2/node-204/sys", - "id": "204", - "name": "leaf204", - "podId": "2", - "role": "leaf" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-2/node-206/sys", - "id": "206", - "name": "leaf206", - "podId": "2", - "role": "leaf" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-2/node-2002/sys", - "id": "2002", - "name": "spine2002", - "podId": "2", - "role": "spine" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-2/node-2001/sys", - "id": "2001", - "name": "spine2001", - "podId": "2", - "role": "spine" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-1002/sys", - "id": "1002", - "name": "spine1002", - "podId": "1", - "role": "spine" - } - } - }, - { - "topSystem": { - "attributes": { - "dn": "topology/pod-1/node-1001/sys", - "id": "1001", - "name": "spine1001", - "podId": "1", - "role": "spine" - } - } - } -] From 818f198a5635f376a30f6e1c49ab91e9b5efe849 Mon Sep 17 00:00:00 2001 From: Welkin <48639332+welkin-he@users.noreply.github.com> Date: Thu, 11 Dec 2025 04:54:55 +1100 Subject: [PATCH 10/14] vmm_inventory_partial_sync fault check (#274) * updated vmm_inventory_partial_sync to align with v3.0 paradigm * Enhanced by looking for changeset partial-inv to avoid flagging on the other reasons. * merge mater + fix pytest --------- Co-authored-by: Gabriel <gmonroy@cisco.com> --- aci-preupgrade-validation-script.py | 38 +++++++++++++++++ docs/docs/validations.md | 16 ++++++-- .../faultInst_neg.json | 1 + .../faultInst_neg1.json | 15 +++++++ .../faultInst_pos.json | 15 +++++++ .../faultInst_pos2.json | 15 +++++++ .../test_vmm_inventory_partial_sync.py | 41 +++++++++++++++++++ 7 files changed, 138 insertions(+), 3 deletions(-) create mode 100644 tests/checks/vmm_inventory_partial_sync/faultInst_neg.json create mode 100644 tests/checks/vmm_inventory_partial_sync/faultInst_neg1.json create mode 100644 tests/checks/vmm_inventory_partial_sync/faultInst_pos.json create mode 100644 tests/checks/vmm_inventory_partial_sync/faultInst_pos2.json create mode 100644 tests/checks/vmm_inventory_partial_sync/test_vmm_inventory_partial_sync.py diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index 9dff95f..ff1e522 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -6032,6 +6032,43 @@ def configpush_shard_check(tversion, **kwargs): return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + +@check_wrapper(check_title='APIC VMM inventory sync fault (F0132)') +def apic_vmm_inventory_sync_faults_check(**kwargs): + result = PASS + headers = ['Fault', 'VMM Domain', 'Controller'] + data = [] + unformatted_headers = ["Fault", "Fault DN"] + unformatted_data = [] + recommended_action = "Please look for Faults under VM and Host and fix them via VCenter, then manually re-trigger inventory sync on APIC" + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#vmm-inventory-partially-synced' + vmm_regex = r'comp/prov-VMware/ctrlr-\[(?P<domain>.+?)\]-(?P<controller>.+?)/fault-F0132' + faultInsts = icurl('class', 'faultInst.json?query-target-filter=eq(faultInst.code,"F0132")') + + for faultInst in faultInsts: + fc = faultInst['faultInst']['attributes']['code'] + dn = faultInst['faultInst']['attributes']['dn'] + desc = faultInst['faultInst']['attributes']['descr'] + change_set = faultInst['faultInst']['attributes']['changeSet'] + + dn_array = re.search(vmm_regex, dn) + if dn_array and "partial-inv" in change_set: + data.append([fc, dn_array.group("domain"), dn_array.group("controller")]) + elif "partial-inv" in change_set: + unformatted_data.append([fc, dn]) + + if data or unformatted_data: + result = MANUAL + + return Result( + result=result, + headers=headers, + data=data, + unformatted_headers=unformatted_headers, + unformatted_data=unformatted_data, + recommended_action=recommended_action, + doc_url=doc_url) + # ---- Script Execution ---- @@ -6139,6 +6176,7 @@ class CheckManager: scalability_faults_check, fabric_port_down_check, equipment_disk_limits_exceeded, + apic_vmm_inventory_sync_faults_check, # Configurations vpc_paired_switches_check, diff --git a/docs/docs/validations.md b/docs/docs/validations.md index 8d4134a..83bebb2 100644 --- a/docs/docs/validations.md +++ b/docs/docs/validations.md @@ -68,7 +68,7 @@ Items | Faults | This Script [L3 Port Config][f7] | F0467: port-configured-as-l2 | :white_check_mark: | :white_check_mark: 5.2(4d) [L2 Port Config][f8] | F0467: port-configured-as-l3 | :white_check_mark: | :white_check_mark: 5.2(4d) [Access (Untagged) Port Config][f9] | F0467: native-or-untagged-encap-failure | :white_check_mark: | :no_entry_sign: -[Encap Already in Use][f10] | F0467: encap-already-in-use | :white_check_mark: | :no_entry_sign: | :no_entry_sign: +[Encap Already in Use][f10] | F0467: encap-already-in-use | :white_check_mark: | :no_entry_sign: [L3Out Subnets][f11] | F0467: prefix-entry-already-in-use | :white_check_mark: | :white_check_mark: 6.0(1g) [BD Subnets][f12] | F0469: duplicate-subnets-within-ctx | :white_check_mark: | :white_check_mark: 5.2(4d) [BD Subnets][f13] | F1425: subnet-overlap | :white_check_mark: | :white_check_mark: 5.2(4d) @@ -79,7 +79,7 @@ Items | Faults | This Script [Scalability (faults related to Capacity Dashboard)][f18] | TCA faults for eqptcapacityEntity | :white_check_mark: | :no_entry_sign: [Fabric Port Status][f19] | F1394: ethpm-if-port-down-fabric | :white_check_mark: | :no_entry_sign: [Equipment Disk Limits][f20] | F1820: 80% -minor<br>F1821: -major<br>F1822: -critical | :white_check_mark: | :no_entry_sign: - +[VMM Inventory Partially Synced][f21] | F0132: comp-ctrlr-operational-issues | :white_check_mark: | :no_entry_sign: [f1]: #apic-disk-space-usage @@ -102,7 +102,7 @@ Items | Faults | This Script [f18]: #scalability-faults-related-to-capacity-dashboard [f19]: #fabric-port-status [f20]: #equipment-disk-limits - +[f21]: #vmm-inventory-partially-synced ### Configuration Checks @@ -1509,6 +1509,16 @@ To recover from this fault, try the following action userdom : all ``` +### VMM Inventory Partially Synced + +This script checks for fault code F0132 with rule comp-ctrlr-operational-issues and change set `partial-inv`. This fault is raised when APICs report a partially synchronized inventory with vCenter servers. + +EPGs using the `immediate` or `on-demand` resolution immediacy (this is typical) rely on the VMM Inventory to determine VLAN programming. If the known inventory changes during an upgrade and the APIC is reporting its last sync to be partial, a VMM inventory resync response with inventory changes could result in VLANs being unexpectedly removed. + +EPGs using the `pre-provision` resolution immediacy do not rely on the VMM inventory for VLAN deployment and so unexpected inventory changes will not change vlan programmings. + +This check returns a `MANUAL` result as there are many reasons for a partial inventory sync to be reported. The goal is to ensure that the VMM inventory sync has fully completed before triggering the APIC upgrade to reduce any chance for unexpected inventory changes to occur. + ## Configuration Check Details ### VPC-paired Leaf switches diff --git a/tests/checks/vmm_inventory_partial_sync/faultInst_neg.json b/tests/checks/vmm_inventory_partial_sync/faultInst_neg.json new file mode 100644 index 0000000..fe51488 --- /dev/null +++ b/tests/checks/vmm_inventory_partial_sync/faultInst_neg.json @@ -0,0 +1 @@ +[] diff --git a/tests/checks/vmm_inventory_partial_sync/faultInst_neg1.json b/tests/checks/vmm_inventory_partial_sync/faultInst_neg1.json new file mode 100644 index 0000000..1854d9d --- /dev/null +++ b/tests/checks/vmm_inventory_partial_sync/faultInst_neg1.json @@ -0,0 +1,15 @@ +[ + { + "faultInst": { + "attributes": { + "cause": "operational-issues", + "code": "F0132", + "changeSet": "remoteOperIssues (Old: , New: event-channel-down)", + "descr": "Operational issues detected for VMM controller: 192.168.1.1 with name MY_CONTROLLER in datacenter MY_DC in domain: MY_DOMAIN due to error: Received partial inventory in the last inventory sync. Please look for Faults under VM and Host and fix them via VCenter, then manually re-trigger inventory sync on APIC", + "dn": "comp/prov-VMware/ctrlr-[MY_DOMAIN]-MY_CONTROLLER/fault-F0132", + "rule": "comp-ctrlr-operational-issues" + } + } + } + +] diff --git a/tests/checks/vmm_inventory_partial_sync/faultInst_pos.json b/tests/checks/vmm_inventory_partial_sync/faultInst_pos.json new file mode 100644 index 0000000..af9a9b9 --- /dev/null +++ b/tests/checks/vmm_inventory_partial_sync/faultInst_pos.json @@ -0,0 +1,15 @@ +[ + { + "faultInst": { + "attributes": { + "cause": "operational-issues", + "code": "F0132", + "changeSet": "remoteOperIssues (Old: event-channel-down,partial-inv, New: partial-inv)", + "descr": "Operational issues detected for VMM controller: 192.168.1.1 with name MY_CONTROLLER in datacenter MY_DC in domain: MY_DOMAIN due to error: Received partial inventory in the last inventory sync. Please look for Faults under VM and Host and fix them via VCenter, then manually re-trigger inventory sync on APIC", + "dn": "comp/prov-VMware/ctrlr-[MY_DOMAIN]-MY_CONTROLLER/fault-F0132", + "rule": "comp-ctrlr-operational-issues" + } + } + } + +] diff --git a/tests/checks/vmm_inventory_partial_sync/faultInst_pos2.json b/tests/checks/vmm_inventory_partial_sync/faultInst_pos2.json new file mode 100644 index 0000000..0f43d38 --- /dev/null +++ b/tests/checks/vmm_inventory_partial_sync/faultInst_pos2.json @@ -0,0 +1,15 @@ +[ + { + "faultInst": { + "attributes": { + "cause": "operational-issues", + "code": "F0132", + "changeSet": "remoteOperIssues (Old: event-channel-down,partial-inv, New: partial-inv)", + "descr": "Operational issues detected for VMM controller: 192.168.1.1 with name MY_CONTROLLER in datacenter MY_DC in domain: MY_DOMAIN due to error: Received partial inventory in the last inventory sync. Please look for Faults under VM and Host and fix them via VCenter, then manually re-trigger inventory sync on APIC", + "dn": "comp/prov-VMware/ctrlrx-[MY_DOMAIN]-MY_CONTROLLER/fault-F0132", + "rule": "comp-ctrlr-operational-issues" + } + } + } + +] diff --git a/tests/checks/vmm_inventory_partial_sync/test_vmm_inventory_partial_sync.py b/tests/checks/vmm_inventory_partial_sync/test_vmm_inventory_partial_sync.py new file mode 100644 index 0000000..81940d1 --- /dev/null +++ b/tests/checks/vmm_inventory_partial_sync/test_vmm_inventory_partial_sync.py @@ -0,0 +1,41 @@ +import os +import pytest +import logging +import importlib +from helpers.utils import read_data + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "apic_vmm_inventory_sync_faults_check" + + +f0132_api = 'faultInst.json' +f0132_api += '?query-target-filter=eq(faultInst.code,"F0132")' + +@pytest.mark.parametrize( + "icurl_outputs, expected_result", + [ + ( + {f0132_api: read_data(dir, "faultInst_neg.json")}, + script.PASS, + ), + ( + {f0132_api: read_data(dir, "faultInst_neg1.json")}, + script.PASS, + ), + ( + {f0132_api: read_data(dir, "faultInst_pos.json")}, + script.MANUAL, + ), + ( + {f0132_api: read_data(dir, "faultInst_pos2.json")}, + script.MANUAL, + ) + ], +) +def test_logic(run_check, mock_icurl, expected_result): + result = run_check() + assert result.result == expected_result From d1b8b5ee05385bc6297fd4b8f1e890e5b52039f9 Mon Sep 17 00:00:00 2001 From: takishida <38262981+takishida@users.noreply.github.com> Date: Wed, 17 Dec 2025 11:19:58 -0800 Subject: [PATCH 11/14] fix: #304 skip exception for fabricNodePEp with non-zero totalCount (#305) --- aci-preupgrade-validation-script.py | 10 +++++- tests/test_get_vpc_node.py | 47 ++++++++--------------------- 2 files changed, 22 insertions(+), 35 deletions(-) diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index ff1e522..f4fd819 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -1764,7 +1764,15 @@ def get_vpc_nodes(): """ Returns list of VPC Node IDs; ['101', '102', etc...] """ prints("Collecting VPC Node IDs...", end='') vpc_nodes = [] - prot_pols = icurl('class', 'fabricNodePEp.json') + try: + prot_pols = icurl('class', 'fabricNodePEp.json') + except Exception as e: + # CSCws30568: expected for fabricNodePEp to return non-zero totalCount + # incorrectly for an empty response. + if str(e).startswith("API response empty with totalCount:"): + prot_pols = [] + else: + raise e for vpc_node in prot_pols: vpc_nodes.append(vpc_node['fabricNodePEp']['attributes']['id']) vpc_nodes.sort() diff --git a/tests/test_get_vpc_node.py b/tests/test_get_vpc_node.py index 956377c..0f664a5 100644 --- a/tests/test_get_vpc_node.py +++ b/tests/test_get_vpc_node.py @@ -7,38 +7,10 @@ fabricNodePEps = "fabricNodePEp.json" data = [ - { - "fabricNodePEp": { - "attributes": { - "dn": "uni/fabric/protpol/expgep-101-103/nodepep-101", - "id": "101" - } - } - }, - { - "fabricNodePEp": { - "attributes": { - "dn": "uni/fabric/protpol/expgep-204-206/nodepep-206", - "id": "206" - } - } - }, - { - "fabricNodePEp": { - "attributes": { - "dn": "uni/fabric/protpol/expgep-101-103/nodepep-103", - "id": "103" - } - } - }, - { - "fabricNodePEp": { - "attributes": { - "dn": "uni/fabric/protpol/expgep-204-206/nodepep-204", - "id": "204" - } - } - } + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-103/nodepep-101", "id": "101"}}}, + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-204-206/nodepep-206", "id": "206"}}}, + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-101-103/nodepep-103", "id": "103"}}}, + {"fabricNodePEp": {"attributes": {"dn": "uni/fabric/protpol/expgep-204-206/nodepep-204", "id": "204"}}}, ] data2 = [ @@ -68,8 +40,15 @@ {fabricNodePEps: data2}, ["101", "102", "103", "104", "105", "106"], "Collecting VPC Node IDs...101, 102, 103, 104, ... (and 2 more)\n\n", - ) - ] + ), + # CSCws30568: expected for fabricNodePEp to return non-zero totalCount + # incorrectly for an empty response. + ( + {fabricNodePEps: {"totalCount": "8", "imdata": []}}, + [], + "Collecting VPC Node IDs...\n\n", + ), + ], ) def test_get_vpc_nodes(capsys, mock_icurl, expected_result, expected_stdout): vpc_nodes = script.get_vpc_nodes() From 300451e86329af99217de16b2e422e038ebf95cd Mon Sep 17 00:00:00 2001 From: tkishida <tkishida@cisco.com> Date: Wed, 17 Dec 2025 11:24:56 -0800 Subject: [PATCH 12/14] bump to v4.0.1 --- aci-preupgrade-validation-script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index f4fd819..88fb89f 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -38,7 +38,7 @@ import os import re -SCRIPT_VERSION = "v4.0.0" +SCRIPT_VERSION = "v4.0.1" DEFAULT_TIMEOUT = 600 # sec # result constants DONE = 'DONE' From 5f694ce1c9190ffd5b142c15f52c27b3c343e05a Mon Sep 17 00:00:00 2001 From: Saket Kumar <37291252+saket1999@users.noreply.github.com> Date: Thu, 15 Jan 2026 03:58:36 +0530 Subject: [PATCH 13/14] feat: Add pre-upgrade warning for APIC 6.2.1 that downgrade is not supported to pre-6.2.1 version. (#289) * feat: Add pre-upgrade warning for APIC 6.2.1 that downgrade is not supported to pre-6.2.1 version. --------- Co-authored-by: Saket <saketk@cisco.com> Co-authored-by: tkishida <tkishida@cisco.com> --- aci-preupgrade-validation-script.py | 20 +++++++++++ docs/docs/validations.md | 34 +++++++++++++++++++ ...est_apic_downgrade_compat_warning_check.py | 32 +++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 tests/checks/apic_downgrade_compat_warning_check/test_apic_downgrade_compat_warning_check.py diff --git a/aci-preupgrade-validation-script.py b/aci-preupgrade-validation-script.py index 88fb89f..7a0dc88 100644 --- a/aci-preupgrade-validation-script.py +++ b/aci-preupgrade-validation-script.py @@ -6077,6 +6077,25 @@ def apic_vmm_inventory_sync_faults_check(**kwargs): recommended_action=recommended_action, doc_url=doc_url) + +@check_wrapper(check_title='APIC downgrade compatibility when crossing 6.2 release') +def apic_downgrade_compat_warning_check(cversion, tversion, **kwargs): + result = NA + headers = ["Current version", "Target Version", "Warning"] + data = [] + recommended_action = 'No action required. Just be aware of the downgrade limitation after the upgrade.' + doc_url = 'https://datacenter.github.io/ACI-Pre-Upgrade-Validation-Script/validations/#apic-downgrade-compatibility-when-crossing-62-release' + + if not tversion or not cversion: + return Result(result=MANUAL, msg=TVER_MISSING) + if cversion.older_than("6.2(1a)") \ + and (tversion.same_as("6.2(1a)") or tversion.newer_than("6.2(1a)")): + result = MANUAL + data.append([cversion, tversion, "Downgrading APIC from 6.2(1)+ to pre-6.2(1) will not be supported."]) + + return Result(result=result, headers=headers, data=data, recommended_action=recommended_action, doc_url=doc_url) + + # ---- Script Execution ---- @@ -6164,6 +6183,7 @@ class CheckManager: post_upgrade_cb_check, validate_32_64_bit_image_check, fabric_link_redundancy_check, + apic_downgrade_compat_warning_check, # Faults apic_disk_space_faults_check, diff --git a/docs/docs/validations.md b/docs/docs/validations.md index 83bebb2..d6c6686 100644 --- a/docs/docs/validations.md +++ b/docs/docs/validations.md @@ -36,6 +36,7 @@ Items | This Script [6.0(2)+ requires 32 and 64 bit switch images][g16] | :white_check_mark: | :no_entry_sign: [Fabric Link Redundancy][g17] | :white_check_mark: | :no_entry_sign: [APIC Database Size][g18] | :white_check_mark: | :no_entry_sign: +[APIC downgrade compatibility when crossing 6.2 release][g19]| :white_check_mark: | :no_entry_sign: [g1]: #compatibility-target-aci-version [g2]: #compatibility-cimc-version @@ -55,6 +56,7 @@ Items | This Script [g16]: #602-requires-32-and-64-bit-switch-images [g17]: #fabric-link-redundancy [g18]: #apic-database-size +[g19]: #apic-downgrade-compatibility-when-crossing-62-release ### Fault Checks Items | Faults | This Script | APIC built-in @@ -498,6 +500,38 @@ For current version is 6.1(3f): In either scenario, contact TAC to collect a database dump of the flagged DME(s) and shard(s) for further analysis. +### APIC downgrade compatibility when crossing 6.2 release + +APIC 6.2(1) release introduces significant optimizations to the APIC upgrade process, including shorter upgrade time and an orchestrated workflow across the cluster with fewer failure points. This release includes an architecture change on APIC, so APIC running 6.2(1) or newer (e.g., 6.2(1a)) cannot be downgraded to any pre-6.2(1) version (e.g., 6.1(4h)). + +Upgrading from pre-6.2(1) to 6.2(1)+ is supported; however, rollback (downgrade) after such an upgrade is not possible. + +This check alerts you if you are crossing the 6.2 boundary, beyond which downgrade compatibility is lost. No additional user action is required. + +!!! note + Switch upgrade architecture hasn't been changed in 6.2(1)/16.2(1). The limitation of downgrade compatibility between pre-/post-6.2(1) versions is only for APIC. + +!!! example + These are examples for upgrade/downgrade paths to show which downgrade compatibility is lost. + + Upgrade: + + * 6.1(4) -> **6.2(1)**: Supported + * **6.2(1)** -> 6.2(2): Supported + + Downgrade: + + * **6.2(1)** -> 6.1(4): Not Supported !!! - The API request gets rejected on APIC. + * 6.2(2) -> **6.2(1)**: Supported + + Note that this is just one example. See [APIC Upgrade/Downgrade Matrix][2] for the full list of supported version combinations. + +!!! tip + Make sure to collect the latest configuration backup before you upgrade your APICs from pre-6.2(1) to 6.2(1)+ so that Cisco TAC can perform the fabric recovery process in the case of emergency where you need to downgrade your APICs to the previous version (i.e. 6.2(1)+ -> pre-6.2(1)). + + If it's for a lab environment, you can initialize the fabric and perform a fresh ISO installation of pre-6.2(1) on APICs. + + ## Fault Check Details ### APIC Disk Space Usage diff --git a/tests/checks/apic_downgrade_compat_warning_check/test_apic_downgrade_compat_warning_check.py b/tests/checks/apic_downgrade_compat_warning_check/test_apic_downgrade_compat_warning_check.py new file mode 100644 index 0000000..2b88686 --- /dev/null +++ b/tests/checks/apic_downgrade_compat_warning_check/test_apic_downgrade_compat_warning_check.py @@ -0,0 +1,32 @@ +import importlib +import logging +import os +import pytest + +script = importlib.import_module("aci-preupgrade-validation-script") + +log = logging.getLogger(__name__) +dir = os.path.dirname(os.path.abspath(__file__)) + +test_function = "apic_downgrade_compat_warning_check" + + +@pytest.mark.parametrize( + "cversion, tversion, expected_result", + [ + (None, None, script.MANUAL), + ("4.2(1b)", None, script.MANUAL), + (None, "5.2(2a)", script.MANUAL), + ("5.2(2a)", "6.1(4a)", script.NA), + ("6.1(3a)", "6.1(4c)", script.NA), + ("6.1(3a)", "6.2(1a)", script.MANUAL), + ("6.1(3a)", "6.2(2a)", script.MANUAL), + ("6.2(1a)", "6.2(2c)", script.NA), + ], +) +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion) if cversion else None, + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result From 46612d0012e3bd6baddb755970faeabea9d0b8e7 Mon Sep 17 00:00:00 2001 From: Enrique Estrada <jeestrad@cisco.com> Date: Tue, 27 Jan 2026 17:25:36 -0600 Subject: [PATCH 14/14] corrected to new pytest structure --- .../test_service_ep_flag_bd_check.py | 13 +++++++++---- .../service_ep_flag_bd_check/vnsLIfCtx-na.json | 0 .../service_ep_flag_bd_check/vnsLIfCtx-neg.json | 0 .../service_ep_flag_bd_check/vnsLIfCtx-pos.json | 0 4 files changed, 9 insertions(+), 4 deletions(-) rename tests/{ => checks}/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py (87%) rename tests/{ => checks}/service_ep_flag_bd_check/vnsLIfCtx-na.json (100%) rename tests/{ => checks}/service_ep_flag_bd_check/vnsLIfCtx-neg.json (100%) rename tests/{ => checks}/service_ep_flag_bd_check/vnsLIfCtx-pos.json (100%) diff --git a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py b/tests/checks/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py similarity index 87% rename from tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py rename to tests/checks/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py index ed7f2c9..707e437 100644 --- a/tests/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py +++ b/tests/checks/service_ep_flag_bd_check/test_service_ep_flag_bd_check.py @@ -9,7 +9,8 @@ log = logging.getLogger(__name__) dir = os.path.dirname(os.path.abspath(__file__)) - +test_function = "service_ep_flag_bd_check" + # icurl queries vnsLIfCtx_api = "vnsLIfCtx.json" vnsLIfCtx_api += "?query-target=self&rsp-subtree=children" @@ -83,6 +84,10 @@ ), ], ) -def test_logic(mock_icurl, icurl_outputs, cversion, tversion, expected_result): - result = script.service_ep_flag_bd_check(1, 1, script.AciVersion(cversion), script.AciVersion(tversion) if tversion else None) - assert result == expected_result + +def test_logic(run_check, mock_icurl, cversion, tversion, expected_result): + result = run_check( + cversion=script.AciVersion(cversion), + tversion=script.AciVersion(tversion) if tversion else None, + ) + assert result.result == expected_result \ No newline at end of file diff --git a/tests/service_ep_flag_bd_check/vnsLIfCtx-na.json b/tests/checks/service_ep_flag_bd_check/vnsLIfCtx-na.json similarity index 100% rename from tests/service_ep_flag_bd_check/vnsLIfCtx-na.json rename to tests/checks/service_ep_flag_bd_check/vnsLIfCtx-na.json diff --git a/tests/service_ep_flag_bd_check/vnsLIfCtx-neg.json b/tests/checks/service_ep_flag_bd_check/vnsLIfCtx-neg.json similarity index 100% rename from tests/service_ep_flag_bd_check/vnsLIfCtx-neg.json rename to tests/checks/service_ep_flag_bd_check/vnsLIfCtx-neg.json diff --git a/tests/service_ep_flag_bd_check/vnsLIfCtx-pos.json b/tests/checks/service_ep_flag_bd_check/vnsLIfCtx-pos.json similarity index 100% rename from tests/service_ep_flag_bd_check/vnsLIfCtx-pos.json rename to tests/checks/service_ep_flag_bd_check/vnsLIfCtx-pos.json