From 248bc6c58a8ee9a562975b9237130c89b5cf4297 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 29 Oct 2025 15:45:02 +0100 Subject: [PATCH 1/2] smoke test: support ceph storage pools --- .../smoke/test_backup_recovery_nas.py | 2 +- test/integration/smoke/test_direct_download.py | 4 ++-- .../smoke/test_human_readable_logs.py | 8 ++++---- .../integration/smoke/test_over_provisioning.py | 4 ++-- test/integration/smoke/test_snapshots.py | 7 +++---- test/integration/smoke/test_vm_life_cycle.py | 17 +++++++++++------ test/integration/smoke/test_vm_snapshots.py | 6 ++++++ test/integration/smoke/test_volumes.py | 13 +++++++++++++ 8 files changed, 42 insertions(+), 19 deletions(-) diff --git a/test/integration/smoke/test_backup_recovery_nas.py b/test/integration/smoke/test_backup_recovery_nas.py index 409a08acc9f0..93ae7644db2d 100644 --- a/test/integration/smoke/test_backup_recovery_nas.py +++ b/test/integration/smoke/test_backup_recovery_nas.py @@ -51,7 +51,7 @@ def setUpClass(cls): cls.storage_pool = StoragePool.list(cls.api_client)[0] if cls.storage_pool.type.lower() != 'networkfilesystem': - cls.skipTest(cls, reason="Test can be run only if the primary storage is of type NFS") + cls.skipTest(cls, reason="Test can be run only if the primary storage is of type NFS. The pool type is %s " % cls.storage_pool.type) # Check backup configuration values, set them to enable the nas provider backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled') diff --git a/test/integration/smoke/test_direct_download.py b/test/integration/smoke/test_direct_download.py index 6570bb9f0b3c..a94353875b2f 100644 --- a/test/integration/smoke/test_direct_download.py +++ b/test/integration/smoke/test_direct_download.py @@ -218,7 +218,7 @@ def setUpClass(cls): zoneid=cls.zone.id ) for pool in storage_pools: - if not cls.nfsStorageFound and pool.type == "NetworkFilesystem": + if not cls.nfsStorageFound and pool.type in ("NetworkFilesystem", "RBD"): cls.nfsStorageFound = True cls.nfsPoolId = pool.id elif not cls.localStorageFound and pool.type == "Filesystem": @@ -295,7 +295,7 @@ def deployVM(self, offering) : @skipTestIf("nfsKvmNotAvailable") @attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false") def test_01_deploy_vm_from_direct_download_template_nfs_storage(self): - """Test Deploy VM from direct download template on NFS storage + """Test Deploy VM from direct download template on NFS or RBD storage """ # Create service offering for local storage using storage tags diff --git a/test/integration/smoke/test_human_readable_logs.py b/test/integration/smoke/test_human_readable_logs.py index fb972511f9c1..c2478bfe5766 100644 --- a/test/integration/smoke/test_human_readable_logs.py +++ b/test/integration/smoke/test_human_readable_logs.py @@ -48,9 +48,9 @@ def test_01_disableHumanReadableLogs(self): sshClient.execute(command) # CapacityChecker runs as soon as management server is up - # Check if "usedMem: (" is printed out within 60 seconds while server is starting - command = "timeout 60 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" - sshClient.timeout = 60 + # Check if "usedMem: (" is printed out within 120 seconds while server is starting + command = "timeout 120 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" + sshClient.timeout = 120 result = sshClient.runCommand(command) self.assertTrue(result['status'] == "FAILED") @@ -70,7 +70,7 @@ def test_02_enableHumanReadableLogs(self): sshClient.execute(command) # CapacityChecker runs as soon as management server is up - # Check if "usedMem: (" is printed out within 60 seconds while server is restarting + # Check if "usedMem: (" is printed out within 120 seconds while server is restarting command = "timeout 120 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" sshClient.timeout = 120 result = sshClient.runCommand(command) diff --git a/test/integration/smoke/test_over_provisioning.py b/test/integration/smoke/test_over_provisioning.py index c2b1a5ac2052..70a3b84b9cfb 100644 --- a/test/integration/smoke/test_over_provisioning.py +++ b/test/integration/smoke/test_over_provisioning.py @@ -60,10 +60,10 @@ def test_UpdateStorageOverProvisioningFactor(self): "The environment don't have storage pools required for test") for pool in storage_pools: - if pool.type == "NetworkFilesystem" or pool.type == "VMFS" or pool.type == "PowerFlex": + if pool.type in ("NetworkFilesystem", "VMFS", "PowerFlex", "RBD"): break - if pool.type != "NetworkFilesystem" and pool.type != "VMFS" and pool.type != "PowerFlex": + if pool.type not in ("NetworkFilesystem", "VMFS", "PowerFlex", "RBD"): raise self.skipTest("Storage overprovisioning currently not supported on " + pool.type + " pools") self.poolId = pool.id diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index b1a2569d9694..284d8959df56 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -262,7 +262,7 @@ def test_02_list_snapshots_with_removed_data_store(self): volume_pool_response = list_storage_pools(self.apiclient, id=vol_res[0].storageid) volume_pool = volume_pool_response[0] - if volume_pool.type.lower() != 'networkfilesystem': + if volume_pool.type not in ("NetworkFilesystem", "RBD"): self.skipTest("This test is not supported for volume created on storage pool type %s" % volume_pool.type) clusters = list_clusters( self.apiclient, @@ -291,9 +291,8 @@ def test_02_list_snapshots_with_removed_data_store(self): 'Up', "Check primary storage state" ) - self.assertEqual( - storage.type, - 'NetworkFilesystem', + self.assertTrue( + storage.type in ("NetworkFilesystem", "RBD"), "Check storage pool type" ) storage_pools_response = list_storage_pools(self.apiclient, diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 8df0b994a555..59e33d7d7908 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -48,6 +48,7 @@ get_suitable_test_template, get_test_ovf_templates, list_hosts, + list_storage_pools, get_vm_vapp_configs) from marvin.codes import FAILED, PASS from nose.plugins.attrib import attr @@ -1707,12 +1708,16 @@ def get_target_host(self, virtualmachineid): return target_hosts[0] - def get_target_pool(self, volid): - target_pools = StoragePool.listForMigration(self.apiclient, id=volid) + def get_target_pool(self, vol): + target_pools = StoragePool.listForMigration(self.apiclient, id=vol.id) if target_pools is None or len(target_pools) == 0: self.skipTest("Not enough storage pools found for migration") + source_pool = list_storage_pools(self.apiclient, id=vol.storageid)[0] + if source_pool.type == 'RBD' and target_pools[0].type == 'RBD': + self.skipTest("Live VM migration between RBD pools is unsupported") + return target_pools[0] def get_vm_volumes(self, id): @@ -1751,7 +1756,7 @@ def test_01_migrate_VM_and_root_volume(self): root_volume = self.get_vm_volumes(vm.id)[0] - target_pool = self.get_target_pool(root_volume.id) + target_pool = self.get_target_pool(root_volume) target_host = self.get_target_host(vm.id) @@ -1789,9 +1794,9 @@ def test_02_migrate_VM_with_two_data_disks(self): root_volume = self.get_vm_volumes(vm.id)[0] - target_pool = self.get_target_pool(root_volume.id) - volume1.target_pool = self.get_target_pool(volume1.id) - volume2.target_pool = self.get_target_pool(volume2.id) + target_pool = self.get_target_pool(root_volume) + volume1.target_pool = self.get_target_pool(volume1) + volume2.target_pool = self.get_target_pool(volume2) target_host = self.get_target_host(vm.id) diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py index 8c106f05a9f6..af441b44020f 100644 --- a/test/integration/smoke/test_vm_snapshots.py +++ b/test/integration/smoke/test_vm_snapshots.py @@ -47,6 +47,11 @@ def setUpClass(cls): cls.unsupportedHypervisor = True return + list_volume_pool_response = list_storage_pools(cls.apiclient) + volume_pool = list_volume_pool_response[0] + if volume_pool.type == "RBD": + cls.skipTest(cls, reason="VM snapshot is unsupported for VMs on RBD storage pool") + cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) @@ -89,6 +94,7 @@ def setUpClass(cls): serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) + cls._cleanup.append(cls.virtual_machine) volumes = list_volumes( cls.apiclient, virtualmachineid=cls.virtual_machine.id, diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 6cf3f082bc22..d54bc619fef1 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -1039,6 +1039,14 @@ def test_13_migrate_volume_and_change_offering(self): else: raise self.skipTest("Not enough storage pools found, skipping test") + volume = Volume.list(self.apiclient, + id=volume.id, + account=self.account.name, + domainid=self.account.domainid)[0] + source_pool = list_storage_pools(self.apiclient, id=volume.storageid)[0] + if source_pool.type == 'RBD' and pool.type == 'RBD': + self.skipTest("Volume migration between RBD pools is unsupported") + if hasattr(pool, 'tags'): StoragePool.update(self.apiclient, id=pool.id, tags="") @@ -1111,6 +1119,11 @@ def setUpClass(cls): cls.unsupportedHypervisor = True return + list_volume_pool_response = list_storage_pools(cls.apiclient) + volume_pool = list_volume_pool_response[0] + if volume_pool.type == "RBD": + cls.skipTest(cls, reason="Volume encryption is unsupported for volumes on RBD storage pool") + # Get Zone and Domain cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) From ddfdacfce4bea88bb384147e4507f111c2394803 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 30 Oct 2025 20:45:30 +0100 Subject: [PATCH 2/2] test: fix test failures --- test/integration/smoke/test_vm_life_cycle.py | 3 ++- test/integration/smoke/test_vm_snapshots.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 59e33d7d7908..5ed35f81b912 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -1714,7 +1714,8 @@ def get_target_pool(self, vol): if target_pools is None or len(target_pools) == 0: self.skipTest("Not enough storage pools found for migration") - source_pool = list_storage_pools(self.apiclient, id=vol.storageid)[0] + volume = Volume.list(self.apiclient, id=vol.id)[0] + source_pool = list_storage_pools(self.apiclient, id=volume.storageid)[0] if source_pool.type == 'RBD' and target_pools[0].type == 'RBD': self.skipTest("Live VM migration between RBD pools is unsupported") diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py index af441b44020f..b1c9fe0e71ee 100644 --- a/test/integration/smoke/test_vm_snapshots.py +++ b/test/integration/smoke/test_vm_snapshots.py @@ -94,7 +94,6 @@ def setUpClass(cls): serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) - cls._cleanup.append(cls.virtual_machine) volumes = list_volumes( cls.apiclient, virtualmachineid=cls.virtual_machine.id,