Skip to content

Commit 7fd0b2c

Browse files
committed
glusterfs: Don't uninstall glusterfs tools when SR couldn't be destroyed
Leave glusterfs tools in place for manual cleanup after a failed SR cleanup Signed-off-by: Antoine Bartuccio <[email protected]>
1 parent a71b5e5 commit 7fd0b2c

File tree

1 file changed

+71
-15
lines changed

1 file changed

+71
-15
lines changed

tests/storage/glusterfs/conftest.py

Lines changed: 71 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,36 @@
1+
from __future__ import annotations
2+
13
import pytest
24

35
import logging
6+
from dataclasses import dataclass
47

58
from lib.common import exec_nofail, raise_errors, setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk
69
from lib.netutil import is_ipv6
710

11+
from typing import TYPE_CHECKING, Generator
12+
13+
if TYPE_CHECKING:
14+
from lib.host import Host
15+
from lib.pool import Pool
16+
from lib.sr import SR
17+
from lib.vdi import VDI
18+
from lib.vm import VM
19+
820
# explicit import for package-scope fixtures
921
from pkgfixtures import pool_with_saved_yum_state
1022

1123
GLUSTERFS_PORTS = [('24007', 'tcp'), ('49152:49251', 'tcp')]
1224

13-
def _setup_host_with_glusterfs(host):
25+
@dataclass
26+
class GlusterFsConfig:
27+
uninstall_glusterfs: bool = True
28+
29+
@pytest.fixture(scope='package')
30+
def _glusterfs_config() -> GlusterFsConfig:
31+
return GlusterFsConfig()
32+
33+
def _setup_host_with_glusterfs(host: Host):
1434
for service in ['iptables', 'ip6tables']:
1535
host.ssh(['cp', '/etc/sysconfig/%s' % service, '/etc/sysconfig/%s.orig' % service])
1636

@@ -30,13 +50,18 @@ def _setup_host_with_glusterfs(host):
3050

3151
host.ssh(['systemctl', 'enable', '--now', 'glusterd.service'])
3252

33-
def _teardown_host_with_glusterfs(host):
53+
def _uninstall_host_glusterfs(host: Host):
3454
errors = []
3555
errors += exec_nofail(lambda: host.ssh(['systemctl', 'disable', '--now', 'glusterd.service']))
3656

3757
# Remove any remaining gluster-related data to avoid issues in future test runs
3858
errors += exec_nofail(lambda: host.ssh(['rm', '-rf', '/var/lib/glusterd']))
3959

60+
raise_errors(errors)
61+
62+
def _restore_host_iptables(host: Host):
63+
errors = []
64+
4065
iptables = 'ip6tables' if is_ipv6(host.hostname_or_ip) else 'iptables'
4166
for h in host.pool.hosts:
4267
hostname_or_ip = h.hostname_or_ip
@@ -56,7 +81,7 @@ def _teardown_host_with_glusterfs(host):
5681
raise_errors(errors)
5782

5883
@pytest.fixture(scope='package')
59-
def pool_without_glusterfs(host):
84+
def pool_without_glusterfs(host: Host) -> Generator[Pool]:
6085
for h in host.pool.hosts:
6186
if h.file_exists('/usr/sbin/glusterd'):
6287
raise Exception(
@@ -65,14 +90,35 @@ def pool_without_glusterfs(host):
6590
yield host.pool
6691

6792
@pytest.fixture(scope='package')
68-
def pool_with_glusterfs(pool_without_glusterfs, pool_with_saved_yum_state):
93+
def pool_with_glusterfs(
94+
pool_without_glusterfs: Pool,
95+
pool_with_saved_yum_state: Pool,
96+
_glusterfs_config: GlusterFsConfig
97+
) -> Generator[Pool]:
98+
99+
def _host_rollback(host: Host):
100+
_uninstall_host_glusterfs(host)
101+
_restore_host_iptables(host)
102+
103+
def _disable_yum_rollback(host: Host):
104+
host.saved_rollback_id = None
105+
69106
pool = pool_with_saved_yum_state
70-
pool.exec_on_hosts_on_error_rollback(_setup_host_with_glusterfs, _teardown_host_with_glusterfs)
107+
pool.exec_on_hosts_on_error_rollback(_setup_host_with_glusterfs, _host_rollback)
108+
71109
yield pool
72-
pool.exec_on_hosts_on_error_continue(_teardown_host_with_glusterfs)
110+
111+
if _glusterfs_config.uninstall_glusterfs:
112+
pool.exec_on_hosts_on_error_continue(_uninstall_host_glusterfs)
113+
else:
114+
pool.exec_on_hosts_on_error_continue(_disable_yum_rollback)
115+
pool.exec_on_hosts_on_error_continue(_restore_host_iptables)
73116

74117
@pytest.fixture(scope='package')
75-
def gluster_disk(pool_with_unused_512B_disk, unused_512B_disks):
118+
def gluster_disk(
119+
pool_with_unused_512B_disk: Pool,
120+
unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]
121+
) -> Generator[None, None, None]:
76122
pool = pool_with_unused_512B_disk
77123
mountpoint = '/mnt/sr_disk'
78124
for h in pool.hosts:
@@ -83,10 +129,10 @@ def gluster_disk(pool_with_unused_512B_disk, unused_512B_disks):
83129
lambda h: teardown_formatted_and_mounted_disk(h, mountpoint)
84130
)
85131

86-
def _fallback_gluster_teardown(host):
132+
def _fallback_gluster_teardown(host: Host):
87133
# See: https://microdevsys.com/wp/volume-delete-volume-failed-some-of-the-peers-are-down/
88134
# Remove all peers and bricks from the hosts volume and then stop and destroy volume.
89-
def teardown_for_host(h):
135+
def teardown_for_host(h: Host):
90136
logging.info("< Fallback teardown on host: %s" % h)
91137
hosts = h.pool.hosts
92138

@@ -123,7 +169,7 @@ def teardown_for_host(h):
123169
pass
124170

125171
@pytest.fixture(scope='package')
126-
def gluster_volume_started(host, hostA2, gluster_disk):
172+
def gluster_volume_started(host: Host, hostA2: Host, gluster_disk: None) -> Generator[None]:
127173
hosts = host.pool.hosts
128174

129175
if is_ipv6(host.hostname_or_ip):
@@ -173,7 +219,7 @@ def gluster_volume_started(host, hostA2, gluster_disk):
173219

174220

175221
@pytest.fixture(scope='package')
176-
def glusterfs_device_config(host):
222+
def glusterfs_device_config(host: Host) -> dict[str, str]:
177223
backup_servers = []
178224
for h in host.pool.hosts[1:]:
179225
backup_servers.append(h.hostname_or_ip)
@@ -184,22 +230,32 @@ def glusterfs_device_config(host):
184230
}
185231

186232
@pytest.fixture(scope='package')
187-
def glusterfs_sr(host, pool_with_glusterfs, gluster_volume_started, glusterfs_device_config):
233+
def glusterfs_sr(
234+
host: Host,
235+
pool_with_glusterfs: Pool,
236+
gluster_volume_started: None,
237+
glusterfs_device_config: dict[str, str],
238+
_glusterfs_config: GlusterFsConfig
239+
) -> Generator[SR]:
188240
""" A GlusterFS SR on first host. """
189241
# Create the SR
190242
sr = host.sr_create('glusterfs', "GlusterFS-SR-test", glusterfs_device_config, shared=True)
191243
yield sr
192244
# teardown
193-
sr.destroy()
245+
try:
246+
sr.destroy()
247+
except Exception as e:
248+
_glusterfs_config.uninstall_glusterfs = False
249+
raise e
194250

195251
@pytest.fixture(scope='module')
196-
def vdi_on_glusterfs_sr(glusterfs_sr):
252+
def vdi_on_glusterfs_sr(glusterfs_sr: SR) -> Generator[VDI]:
197253
vdi = glusterfs_sr.create_vdi('GlusterFS-VDI-test')
198254
yield vdi
199255
vdi.destroy()
200256

201257
@pytest.fixture(scope='module')
202-
def vm_on_glusterfs_sr(host, glusterfs_sr, vm_ref):
258+
def vm_on_glusterfs_sr(host: Host, glusterfs_sr: SR, vm_ref: str) -> Generator[VM]:
203259
vm = host.import_vm(vm_ref, sr_uuid=glusterfs_sr.uuid)
204260
yield vm
205261
# teardown

0 commit comments

Comments
 (0)