|
| 1 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 2 | +# |
| 3 | +# Copyright Red Hat |
| 4 | +# |
| 5 | +# SPDX-License-Identifier: GPL-2.0 |
| 6 | +# |
| 7 | +# Author: Liang Cong <[email protected]> |
| 8 | +# |
| 9 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 10 | +import json |
| 11 | +import os |
| 12 | +import re |
| 13 | + |
| 14 | +from avocado.utils import memory as avocado_mem |
| 15 | +from avocado.utils import process |
| 16 | + |
| 17 | +from virttest import libvirt_version |
| 18 | +from virttest import virsh |
| 19 | +from virttest.libvirt_xml import vm_xml |
| 20 | +from virttest.libvirt_xml.devices.memory import Memory |
| 21 | +from virttest.staging import utils_memory |
| 22 | +from virttest.utils_libvirtd import Libvirtd |
| 23 | + |
| 24 | +from provider.memory import memory_base |
| 25 | + |
| 26 | + |
| 27 | +def run(test, params, env): |
| 28 | + """ |
| 29 | + Verify nvdimm memory device works with various memory backing type |
| 30 | + """ |
| 31 | + def setup_test(): |
| 32 | + """ |
| 33 | + Test setup: |
| 34 | + 1. Set up hugepage |
| 35 | + 2. Build nvdimm memory device list |
| 36 | + 3. Build memory backing config |
| 37 | + """ |
| 38 | + def is_in_range(num, range_str): |
| 39 | + """ |
| 40 | + Check if the number is in the given range string |
| 41 | +
|
| 42 | + :param num: int, target number |
| 43 | + :param range_str: string, range string to check |
| 44 | + :return: bool, True if number is in range string, False otherwise |
| 45 | + """ |
| 46 | + if '-' in range_str: |
| 47 | + start, end = map(int, range_str.split('-')) |
| 48 | + return start <= num <= end |
| 49 | + return num == int(range_str) if range_str else False |
| 50 | + |
| 51 | + nonlocal nvdimm_path_list, memory_backing_dict |
| 52 | + test.log.info("TEST_SETUP: Set up hugepage") |
| 53 | + default_pagesize = avocado_mem.get_huge_page_size() |
| 54 | + hp_num = int(hugepage_memory) // default_pagesize |
| 55 | + utils_memory.set_num_huge_pages(hp_num) |
| 56 | + |
| 57 | + test.log.info("TEST_SETUP: Build nvdimm list") |
| 58 | + init_nvdimms_id_range = params.get("init_nvdimms_id_range") |
| 59 | + hotplug_nvdimms_id_range = params.get("hotplug_nvdimms_id_range", "") |
| 60 | + nvdimm_path = params.get("nvdimm_path") |
| 61 | + nvdimm_source_map = eval(params.get("nvdimm_source_map").format(default_pagesize)) |
| 62 | + nvdimm_node_map = eval(params.get("nvdimm_node_map")) |
| 63 | + nvdimm_dict_str = params.get("nvdimm_dict") |
| 64 | + for i in range(nvdimm_num): |
| 65 | + n_path = nvdimm_path.format(i) |
| 66 | + n_source = nvdimm_source_map.get(i, "").format(default_pagesize) |
| 67 | + n_node = nvdimm_node_map.get(i) |
| 68 | + |
| 69 | + nvdimm_path_list.append(n_path) |
| 70 | + process.run(f"truncate -s {nvdimm_size}k {n_path}", verbose=True) |
| 71 | + |
| 72 | + nvdimm_dict = eval(nvdimm_dict_str.format(n_path, n_source, n_node)) |
| 73 | + if is_in_range(i, init_nvdimms_id_range): |
| 74 | + init_nvdimms.append(nvdimm_dict) |
| 75 | + if is_in_range(i, hotplug_nvdimms_id_range): |
| 76 | + hotplug_nvdimms.append(nvdimm_dict) |
| 77 | + |
| 78 | + test.log.info("TEST_SETUP: Build memory backing config") |
| 79 | + memory_backing_dict = params.get("memory_backing_dict").format(default_pagesize) |
| 80 | + memory_backing_dict = eval(memory_backing_dict) |
| 81 | + |
| 82 | + def check_nvdimm_prealloc(exp_prealloc_list): |
| 83 | + """ |
| 84 | + Verify nvdimm memory preallocation status against expected values |
| 85 | +
|
| 86 | + :param exp_prealloc_list: expected nvdimm memory preallocation list |
| 87 | + """ |
| 88 | + preallocated_cmd = params.get("preallocated_cmd") |
| 89 | + preallocated_cmd_protocal = params.get("preallocated_cmd_protocal") |
| 90 | + pattern = params.get("preallocated_pattern") |
| 91 | + |
| 92 | + ret = virsh.qemu_monitor_command(vm_name, preallocated_cmd, |
| 93 | + preallocated_cmd_protocal, debug=True) |
| 94 | + test.log.debug(f"qemu-monitor-command '{preallocated_cmd}' result: {ret.stdout_text}") |
| 95 | + matches = sorted(re.findall(fr'{pattern}', ret.stdout_text, re.DOTALL)) |
| 96 | + actual_prealloc_list = [prealloc for _, prealloc in matches] |
| 97 | + if actual_prealloc_list != exp_prealloc_list: |
| 98 | + test.fail( |
| 99 | + f"Expected preallocated list is {exp_prealloc_list}, but found {actual_prealloc_list}") |
| 100 | + nonlocal nvdimm_name_list |
| 101 | + nvdimm_name_list = [name for name, _ in matches] |
| 102 | + |
| 103 | + def check_qemu_object_property(name_list, obj_property, exp_list): |
| 104 | + """ |
| 105 | + Verify qemu object properties against expected values |
| 106 | +
|
| 107 | + :param name_list: list of qemu object names |
| 108 | + :param obj_property: qemu object property name |
| 109 | + :param exp_list: expected values for the property |
| 110 | + """ |
| 111 | + qom_cmd_template = params.get("qom_cmd_template") |
| 112 | + for index, name in enumerate(name_list): |
| 113 | + qom_cmd = qom_cmd_template.format(name, obj_property) |
| 114 | + ret = virsh.qemu_monitor_command(vm_name, qom_cmd, debug=True) |
| 115 | + data_dict = json.loads(ret.stdout_text) |
| 116 | + exp_value = exp_list[index] |
| 117 | + |
| 118 | + if "return" not in data_dict: |
| 119 | + test.fail(f"QOM command {qom_cmd} doesn't have return value: {ret.stdout_text}") |
| 120 | + if exp_value != data_dict["return"]: |
| 121 | + test.fail( |
| 122 | + f"Expected nvdimm {name} {obj_property} is {exp_value}, but found {data_dict['return']}") |
| 123 | + |
| 124 | + def run_test(): |
| 125 | + """ |
| 126 | + Test steps: |
| 127 | + 1. Define the guest |
| 128 | + 2. Start the guest |
| 129 | + 3: Restart the libvirt deamon |
| 130 | + 4: Hot-plug nvdimm devices |
| 131 | + 5: Check nvdimm memory backend pre-allocated |
| 132 | + 6: Check nvdimm memory backing type |
| 133 | + 7: Check nvdimm memory backing path |
| 134 | + 8: Check nvdimm memory allocation threads |
| 135 | + 9: Login the guest and create file on each nvdimm device |
| 136 | + """ |
| 137 | + test.log.info("TEST_STEP1: Define the guest") |
| 138 | + vm_attrs.update(memory_backing_dict) |
| 139 | + memory_base.define_guest_with_memory_device(params, init_nvdimms, vm_attrs) |
| 140 | + |
| 141 | + test.log.info("TEST_STEP2: Start guest") |
| 142 | + vm.start() |
| 143 | + |
| 144 | + test.log.info("TEST_STEP3: Restart the libvirt deamon") |
| 145 | + Libvirtd().restart() |
| 146 | + |
| 147 | + if hotplug_nvdimms: |
| 148 | + test.log.info("TEST_STEP4: Hot-plug nvdimm devices") |
| 149 | + for nvdimm in hotplug_nvdimms: |
| 150 | + nvdimm_xml_dev = Memory() |
| 151 | + nvdimm_xml_dev.setup_attrs(**nvdimm) |
| 152 | + virsh.attach_device(vm_name, nvdimm_xml_dev.xml, **virsh_args) |
| 153 | + |
| 154 | + test.log.info("TEST_STEP5: Check nvdimm memory backend pre-allocated") |
| 155 | + check_nvdimm_prealloc([exp_prealloc] * nvdimm_num) |
| 156 | + |
| 157 | + test.log.info("TEST_STEP6: Check nvdimm memory backing type") |
| 158 | + check_qemu_object_property(nvdimm_name_list, property_type, [exp_type] * nvdimm_num) |
| 159 | + |
| 160 | + test.log.info("TEST_STEP7: Check nvdimm memory backing path") |
| 161 | + check_qemu_object_property(nvdimm_name_list, property_mempath, nvdimm_path_list) |
| 162 | + |
| 163 | + if alloc_mode in ["immediate_with_threads", "hugepage_nodeset"]: |
| 164 | + test.log.info("TEST_STEP8: Check nvdimm memory allocation threads") |
| 165 | + check_qemu_object_property(nvdimm_name_list, property_threads, [ |
| 166 | + int(threads)] * nvdimm_num) |
| 167 | + |
| 168 | + test.log.info("TEST_STEP9: Login the guest and create file on each nvdimm device") |
| 169 | + with vm.wait_for_login() as session: |
| 170 | + for i in range(nvdimm_num): |
| 171 | + memory_base.create_file_within_nvdimm_disk( |
| 172 | + test, session, test_device=nvdimm_device.format(i), |
| 173 | + mount_point=nvdimm_mount_point.format(i), test_file=nvdimm_file.format(i, i), |
| 174 | + test_str=file_content) |
| 175 | + |
| 176 | + def teardown_test(): |
| 177 | + """ |
| 178 | + Clean up environment after test |
| 179 | + 1. Remove hugepage |
| 180 | + 2. Restore domain xml |
| 181 | + 3. Remove nvdimm backing files |
| 182 | + """ |
| 183 | + utils_memory.set_num_huge_pages(0) |
| 184 | + bkxml.sync() |
| 185 | + for n_path in nvdimm_path_list: |
| 186 | + if os.path.exists(n_path): |
| 187 | + os.remove(n_path) |
| 188 | + |
| 189 | + libvirt_version.is_libvirt_feature_supported(params) |
| 190 | + virsh_args = {'debug': True, 'ignore_status': False} |
| 191 | + vm_name = params.get("main_vm") |
| 192 | + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) |
| 193 | + bkxml = vmxml.copy() |
| 194 | + vm = env.get_vm(vm_name) |
| 195 | + |
| 196 | + nvdimm_path_list = [] |
| 197 | + memory_backing_dict = {} |
| 198 | + nvdimm_name_list = [] |
| 199 | + init_nvdimms = [] |
| 200 | + hotplug_nvdimms = [] |
| 201 | + hugepage_memory = params.get("hugepage_memory") |
| 202 | + vm_attrs = eval(params.get("vm_attrs")) |
| 203 | + memory_backing_dict = params.get("memory_backing_dict") |
| 204 | + nvdimm_size = params.get("nvdimm_size") |
| 205 | + nvdimm_num = int(params.get("nvdimm_num")) |
| 206 | + exp_type = params.get("exp_type") |
| 207 | + exp_prealloc = params.get("exp_prealloc") |
| 208 | + threads = params.get("threads") |
| 209 | + nvdimm_device = params.get("nvdimm_device") |
| 210 | + nvdimm_mount_point = params.get("nvdimm_mount_point") |
| 211 | + nvdimm_file = params.get("nvdimm_file") |
| 212 | + file_content = params.get("file_content") |
| 213 | + |
| 214 | + property_type = params.get("property_type") |
| 215 | + property_mempath = params.get("property_mempath") |
| 216 | + property_threads = params.get("property_threads") |
| 217 | + alloc_mode = params.get("alloc_mode") |
| 218 | + |
| 219 | + try: |
| 220 | + setup_test() |
| 221 | + run_test() |
| 222 | + |
| 223 | + finally: |
| 224 | + teardown_test() |
0 commit comments