Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 90 additions & 15 deletions tests/storage/glusterfs/conftest.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,36 @@
from __future__ import annotations

import pytest

import logging
from dataclasses import dataclass

from lib.common import exec_nofail, raise_errors, setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk
from lib.netutil import is_ipv6

from typing import TYPE_CHECKING, Generator

if TYPE_CHECKING:
from lib.host import Host
from lib.pool import Pool
from lib.sr import SR
from lib.vdi import VDI
from lib.vm import VM

# explicit import for package-scope fixtures
from pkgfixtures import pool_with_saved_yum_state

GLUSTERFS_PORTS = [('24007', 'tcp'), ('49152:49251', 'tcp')]

def _setup_host_with_glusterfs(host):
@dataclass
class GlusterFsConfig:
uninstall_glusterfs: bool = True

@pytest.fixture(scope='package')
def _glusterfs_config() -> GlusterFsConfig:
return GlusterFsConfig()

def _setup_host_with_glusterfs(host: Host):
for service in ['iptables', 'ip6tables']:
host.ssh(['cp', '/etc/sysconfig/%s' % service, '/etc/sysconfig/%s.orig' % service])

Expand All @@ -30,13 +50,18 @@ def _setup_host_with_glusterfs(host):

host.ssh(['systemctl', 'enable', '--now', 'glusterd.service'])

def _teardown_host_with_glusterfs(host):
def _uninstall_host_glusterfs(host: Host):
errors = []
errors += exec_nofail(lambda: host.ssh(['systemctl', 'disable', '--now', 'glusterd.service']))

# Remove any remaining gluster-related data to avoid issues in future test runs
errors += exec_nofail(lambda: host.ssh(['rm', '-rf', '/var/lib/glusterd']))

raise_errors(errors)

def _restore_host_iptables(host: Host):
errors = []

iptables = 'ip6tables' if is_ipv6(host.hostname_or_ip) else 'iptables'
for h in host.pool.hosts:
hostname_or_ip = h.hostname_or_ip
Expand All @@ -56,7 +81,7 @@ def _teardown_host_with_glusterfs(host):
raise_errors(errors)

@pytest.fixture(scope='package')
def pool_without_glusterfs(host):
def pool_without_glusterfs(host: Host) -> Generator[Pool]:
for h in host.pool.hosts:
if h.file_exists('/usr/sbin/glusterd'):
raise Exception(
Expand All @@ -65,28 +90,57 @@ def pool_without_glusterfs(host):
yield host.pool

@pytest.fixture(scope='package')
def pool_with_glusterfs(pool_without_glusterfs, pool_with_saved_yum_state):
def pool_with_glusterfs(
pool_without_glusterfs: Pool,
pool_with_saved_yum_state: Pool,
_glusterfs_config: GlusterFsConfig
) -> Generator[Pool]:

def _host_rollback(host: Host):
_uninstall_host_glusterfs(host)
_restore_host_iptables(host)

def _disable_yum_rollback(host: Host):
host.saved_rollback_id = None

pool = pool_with_saved_yum_state
pool.exec_on_hosts_on_error_rollback(_setup_host_with_glusterfs, _teardown_host_with_glusterfs)
pool.exec_on_hosts_on_error_rollback(_setup_host_with_glusterfs, _host_rollback)

yield pool
pool.exec_on_hosts_on_error_continue(_teardown_host_with_glusterfs)

if not _glusterfs_config.uninstall_glusterfs:
pool.exec_on_hosts_on_error_continue(_disable_yum_rollback)
return

pool.exec_on_hosts_on_error_continue(_uninstall_host_glusterfs)
pool.exec_on_hosts_on_error_continue(_restore_host_iptables)

@pytest.fixture(scope='package')
def gluster_disk(pool_with_unused_512B_disk, unused_512B_disks):
def gluster_disk(
pool_with_unused_512B_disk: Pool,
unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]],
_glusterfs_config: GlusterFsConfig,
) -> Generator[None]:
pool = pool_with_unused_512B_disk
mountpoint = '/mnt/sr_disk'
for h in pool.hosts:
sr_disk = unused_512B_disks[h][0]["name"]
setup_formatted_and_mounted_disk(h, sr_disk, 'xfs', mountpoint)

yield

if not _glusterfs_config.uninstall_glusterfs:
logging.warning("<< leave fstab and keep mountpoints place for manual cleanup")
return

pool.exec_on_hosts_on_error_continue(
lambda h: teardown_formatted_and_mounted_disk(h, mountpoint)
)

def _fallback_gluster_teardown(host):
def _fallback_gluster_teardown(host: Host):
# See: https://microdevsys.com/wp/volume-delete-volume-failed-some-of-the-peers-are-down/
# Remove all peers and bricks from the hosts volume and then stop and destroy volume.
def teardown_for_host(h):
def teardown_for_host(h: Host):
logging.info("< Fallback teardown on host: %s" % h)
hosts = h.pool.hosts

Expand Down Expand Up @@ -123,7 +177,12 @@ def teardown_for_host(h):
pass

@pytest.fixture(scope='package')
def gluster_volume_started(host, hostA2, gluster_disk):
def gluster_volume_started(
host: Host,
hostA2: Host,
gluster_disk: None,
_glusterfs_config: GlusterFsConfig
) -> Generator[None]:
hosts = host.pool.hosts

if is_ipv6(host.hostname_or_ip):
Expand Down Expand Up @@ -157,7 +216,13 @@ def gluster_volume_started(host, hostA2, gluster_disk):
host.ssh(['gluster', 'volume', 'set', 'vol0', 'network.ping-timeout', '5'])

host.ssh(['gluster', 'volume', 'start', 'vol0'])

yield

if not _glusterfs_config.uninstall_glusterfs:
logging.warning("<< leave gluster volume vol0 in place for manual cleanup")
return

logging.info("<< stop and delete gluster volume vol0")
try:
host.ssh(['gluster', '--mode=script', 'volume', 'stop', 'vol0'])
Expand All @@ -173,7 +238,7 @@ def gluster_volume_started(host, hostA2, gluster_disk):


@pytest.fixture(scope='package')
def glusterfs_device_config(host):
def glusterfs_device_config(host: Host) -> dict[str, str]:
backup_servers = []
for h in host.pool.hosts[1:]:
backup_servers.append(h.hostname_or_ip)
Expand All @@ -184,22 +249,32 @@ def glusterfs_device_config(host):
}

@pytest.fixture(scope='package')
def glusterfs_sr(host, pool_with_glusterfs, gluster_volume_started, glusterfs_device_config):
def glusterfs_sr(
host: Host,
pool_with_glusterfs: Pool,
gluster_volume_started: None,
glusterfs_device_config: dict[str, str],
_glusterfs_config: GlusterFsConfig
) -> Generator[SR]:
""" A GlusterFS SR on first host. """
# Create the SR
sr = host.sr_create('glusterfs', "GlusterFS-SR-test", glusterfs_device_config, shared=True)
yield sr
# teardown
sr.destroy()
try:
sr.destroy()
except Exception as e:
_glusterfs_config.uninstall_glusterfs = False
raise pytest.fail("Could not destroy glusterfs SR, leaving packages in place for manual cleanup") from e

@pytest.fixture(scope='module')
def vdi_on_glusterfs_sr(glusterfs_sr):
def vdi_on_glusterfs_sr(glusterfs_sr: SR) -> Generator[VDI]:
vdi = glusterfs_sr.create_vdi('GlusterFS-VDI-test')
yield vdi
vdi.destroy()

@pytest.fixture(scope='module')
def vm_on_glusterfs_sr(host, glusterfs_sr, vm_ref):
def vm_on_glusterfs_sr(host: Host, glusterfs_sr: SR, vm_ref: str) -> Generator[VM]:
vm = host.import_vm(vm_ref, sr_uuid=glusterfs_sr.uuid)
yield vm
# teardown
Expand Down
60 changes: 47 additions & 13 deletions tests/storage/linstor/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import functools
import logging
import os
from dataclasses import dataclass

import lib.commands as commands

Expand All @@ -16,16 +17,28 @@
if TYPE_CHECKING:
from lib.host import Host
from lib.pool import Pool
from lib.sr import SR
from lib.vdi import VDI

GROUP_NAME = 'linstor_group'
STORAGE_POOL_NAME = f'{GROUP_NAME}/thin_device'
LINSTOR_RELEASE_PACKAGE = 'xcp-ng-release-linstor'
LINSTOR_PACKAGE = 'xcp-ng-linstor'

@dataclass
class LinstorConfig:
uninstall_linstor: bool = True

@pytest.fixture(scope='package')
def _linstor_config() -> LinstorConfig:
return LinstorConfig()

@pytest.fixture(scope='package')
def lvm_disks(pool_with_unused_512B_disk: Pool,
unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]],
provisioning_type: str) -> Generator[None]:
def lvm_disks(
pool_with_unused_512B_disk: Pool,
unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]],
provisioning_type: str,
) -> Generator[None]:
"""
Common LVM PVs on which a LV is created on each host of the pool.

Expand Down Expand Up @@ -70,19 +83,24 @@ def host_devices(host: Host) -> list[str]:
host.ssh(['pvremove', device])

@pytest.fixture(scope="package")
def storage_pool_name(provisioning_type):
def storage_pool_name(provisioning_type: str) -> str:
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME

@pytest.fixture(params=["thin"], scope="session")
def provisioning_type(request):
def provisioning_type(request: pytest.FixtureRequest) -> str:
return request.param

@pytest.fixture(scope='package')
def pool_with_linstor(hostA2, lvm_disks, pool_with_saved_yum_state):
def pool_with_linstor(
hostA2: Host,
lvm_disks: None,
pool_with_saved_yum_state: Pool,
_linstor_config: LinstorConfig
) -> Generator[Pool]:
import concurrent.futures
pool = pool_with_saved_yum_state

def check_linstor_installed(host):
def check_linstor_installed(host: Host):
if host.is_package_installed(LINSTOR_PACKAGE):
raise Exception(
f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.'
Expand All @@ -91,7 +109,7 @@ def check_linstor_installed(host):
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(check_linstor_installed, pool.hosts)

def install_linstor(host):
def install_linstor(host: Host):
logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...")
host.yum_install([LINSTOR_RELEASE_PACKAGE])
host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing")
Expand All @@ -105,9 +123,16 @@ def install_linstor(host):

yield pool

def _disable_yum_rollback(host: Host):
host.saved_rollback_id = None

if not _linstor_config.uninstall_linstor:
pool.exec_on_hosts_on_error_continue(_disable_yum_rollback)
return

# Need to remove this package as we have separate run of `test_create_sr_without_linstor`
# for `thin` and `thick` `provisioning_type`.
def remove_linstor(host):
def remove_linstor(host: Host):
logging.info(f"Cleaning up python-linstor from host {host}...")
host.yum_remove(["python-linstor"])
host.restart_toolstack(verify=True)
Expand All @@ -116,23 +141,32 @@ def remove_linstor(host):
executor.map(remove_linstor, pool.hosts)

@pytest.fixture(scope='package')
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
def linstor_sr(
pool_with_linstor: Pool,
provisioning_type: str,
storage_pool_name: str,
_linstor_config: LinstorConfig
) -> Generator[SR]:
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': storage_pool_name,
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
'provisioning': provisioning_type
}, shared=True)
yield sr
sr.destroy()
try:
sr.destroy()
except Exception as e:
_linstor_config.uninstall_linstor = False
raise pytest.fail("Could not destroy linstor SR, leaving packages in place for manual cleanup") from e

@pytest.fixture(scope='module')
def vdi_on_linstor_sr(linstor_sr):
def vdi_on_linstor_sr(linstor_sr: SR) -> Generator[VDI]:
vdi = linstor_sr.create_vdi('LINSTOR-VDI-test')
yield vdi
vdi.destroy()

@pytest.fixture(scope='module')
def vm_on_linstor_sr(host, linstor_sr, vm_ref):
def vm_on_linstor_sr(host: Host, linstor_sr: SR, vm_ref: str):
vm = host.import_vm(vm_ref, sr_uuid=linstor_sr.uuid)
yield vm
logging.info("<< Destroy VM")
Expand Down
Loading