1+ from __future__ import annotations
2+
13import pytest
24
35import logging
6+ from dataclasses import dataclass
47
58from lib .common import exec_nofail , raise_errors , setup_formatted_and_mounted_disk , teardown_formatted_and_mounted_disk
69from lib .netutil import is_ipv6
710
11+ from typing import TYPE_CHECKING , Generator
12+
13+ if TYPE_CHECKING :
14+ from lib .host import Host
15+ from lib .pool import Pool
16+ from lib .sr import SR
17+ from lib .vdi import VDI
18+ from lib .vm import VM
19+
820# explicit import for package-scope fixtures
921from pkgfixtures import pool_with_saved_yum_state
1022
1123GLUSTERFS_PORTS = [('24007' , 'tcp' ), ('49152:49251' , 'tcp' )]
1224
13- def _setup_host_with_glusterfs (host ):
25+ @dataclass
26+ class GlusterFsConfig :
27+ uninstall_glusterfs : bool = True
28+
29+ @pytest .fixture (scope = 'package' )
30+ def _glusterfs_config () -> GlusterFsConfig :
31+ return GlusterFsConfig ()
32+
33+ def _setup_host_with_glusterfs (host : Host ):
1434 for service in ['iptables' , 'ip6tables' ]:
1535 host .ssh (['cp' , '/etc/sysconfig/%s' % service , '/etc/sysconfig/%s.orig' % service ])
1636
@@ -30,13 +50,18 @@ def _setup_host_with_glusterfs(host):
3050
3151 host .ssh (['systemctl' , 'enable' , '--now' , 'glusterd.service' ])
3252
33- def _teardown_host_with_glusterfs (host ):
53+ def _uninstall_host_glusterfs (host : Host ):
3454 errors = []
3555 errors += exec_nofail (lambda : host .ssh (['systemctl' , 'disable' , '--now' , 'glusterd.service' ]))
3656
3757 # Remove any remaining gluster-related data to avoid issues in future test runs
3858 errors += exec_nofail (lambda : host .ssh (['rm' , '-rf' , '/var/lib/glusterd' ]))
3959
60+ raise_errors (errors )
61+
62+ def _restore_host_iptables (host : Host ):
63+ errors = []
64+
4065 iptables = 'ip6tables' if is_ipv6 (host .hostname_or_ip ) else 'iptables'
4166 for h in host .pool .hosts :
4267 hostname_or_ip = h .hostname_or_ip
@@ -56,7 +81,7 @@ def _teardown_host_with_glusterfs(host):
5681 raise_errors (errors )
5782
5883@pytest .fixture (scope = 'package' )
59- def pool_without_glusterfs (host ) :
84+ def pool_without_glusterfs (host : Host ) -> Generator [ Pool ] :
6085 for h in host .pool .hosts :
6186 if h .file_exists ('/usr/sbin/glusterd' ):
6287 raise Exception (
@@ -65,28 +90,57 @@ def pool_without_glusterfs(host):
6590 yield host .pool
6691
6792@pytest .fixture (scope = 'package' )
68- def pool_with_glusterfs (pool_without_glusterfs , pool_with_saved_yum_state ):
93+ def pool_with_glusterfs (
94+ pool_without_glusterfs : Pool ,
95+ pool_with_saved_yum_state : Pool ,
96+ _glusterfs_config : GlusterFsConfig
97+ ) -> Generator [Pool ]:
98+
99+ def _host_rollback (host : Host ):
100+ _uninstall_host_glusterfs (host )
101+ _restore_host_iptables (host )
102+
103+ def _disable_yum_rollback (host : Host ):
104+ host .saved_rollback_id = None
105+
69106 pool = pool_with_saved_yum_state
70- pool .exec_on_hosts_on_error_rollback (_setup_host_with_glusterfs , _teardown_host_with_glusterfs )
107+ pool .exec_on_hosts_on_error_rollback (_setup_host_with_glusterfs , _host_rollback )
108+
71109 yield pool
72- pool .exec_on_hosts_on_error_continue (_teardown_host_with_glusterfs )
110+
111+ if not _glusterfs_config .uninstall_glusterfs :
112+ pool .exec_on_hosts_on_error_continue (_disable_yum_rollback )
113+ return
114+
115+ pool .exec_on_hosts_on_error_continue (_uninstall_host_glusterfs )
116+ pool .exec_on_hosts_on_error_continue (_restore_host_iptables )
73117
74118@pytest .fixture (scope = 'package' )
75- def gluster_disk (pool_with_unused_512B_disk , unused_512B_disks ):
119+ def gluster_disk (
120+ pool_with_unused_512B_disk : Pool ,
121+ unused_512B_disks : dict [Host , list [Host .BlockDeviceInfo ]],
122+ _glusterfs_config : GlusterFsConfig ,
123+ ) -> Generator [None ]:
76124 pool = pool_with_unused_512B_disk
77125 mountpoint = '/mnt/sr_disk'
78126 for h in pool .hosts :
79127 sr_disk = unused_512B_disks [h ][0 ]["name" ]
80128 setup_formatted_and_mounted_disk (h , sr_disk , 'xfs' , mountpoint )
129+
81130 yield
131+
132+ if not _glusterfs_config .uninstall_glusterfs :
133+ logging .warning ("<< leave fstab and keep mountpoints place for manual cleanup" )
134+ return
135+
82136 pool .exec_on_hosts_on_error_continue (
83137 lambda h : teardown_formatted_and_mounted_disk (h , mountpoint )
84138 )
85139
86- def _fallback_gluster_teardown (host ):
140+ def _fallback_gluster_teardown (host : Host ):
87141 # See: https://microdevsys.com/wp/volume-delete-volume-failed-some-of-the-peers-are-down/
88142 # Remove all peers and bricks from the hosts volume and then stop and destroy volume.
89- def teardown_for_host (h ):
143+ def teardown_for_host (h : Host ):
90144 logging .info ("< Fallback teardown on host: %s" % h )
91145 hosts = h .pool .hosts
92146
@@ -123,7 +177,12 @@ def teardown_for_host(h):
123177 pass
124178
125179@pytest .fixture (scope = 'package' )
126- def gluster_volume_started (host , hostA2 , gluster_disk ):
180+ def gluster_volume_started (
181+ host : Host ,
182+ hostA2 : Host ,
183+ gluster_disk : None ,
184+ _glusterfs_config : GlusterFsConfig
185+ ) -> Generator [None ]:
127186 hosts = host .pool .hosts
128187
129188 if is_ipv6 (host .hostname_or_ip ):
@@ -157,7 +216,13 @@ def gluster_volume_started(host, hostA2, gluster_disk):
157216 host .ssh (['gluster' , 'volume' , 'set' , 'vol0' , 'network.ping-timeout' , '5' ])
158217
159218 host .ssh (['gluster' , 'volume' , 'start' , 'vol0' ])
219+
160220 yield
221+
222+ if not _glusterfs_config .uninstall_glusterfs :
223+ logging .warning ("<< leave gluster volume vol0 in place for manual cleanup" )
224+ return
225+
161226 logging .info ("<< stop and delete gluster volume vol0" )
162227 try :
163228 host .ssh (['gluster' , '--mode=script' , 'volume' , 'stop' , 'vol0' ])
@@ -173,7 +238,7 @@ def gluster_volume_started(host, hostA2, gluster_disk):
173238
174239
175240@pytest .fixture (scope = 'package' )
176- def glusterfs_device_config (host ) :
241+ def glusterfs_device_config (host : Host ) -> dict [ str , str ] :
177242 backup_servers = []
178243 for h in host .pool .hosts [1 :]:
179244 backup_servers .append (h .hostname_or_ip )
@@ -184,22 +249,32 @@ def glusterfs_device_config(host):
184249 }
185250
186251@pytest .fixture (scope = 'package' )
187- def glusterfs_sr (host , pool_with_glusterfs , gluster_volume_started , glusterfs_device_config ):
252+ def glusterfs_sr (
253+ host : Host ,
254+ pool_with_glusterfs : Pool ,
255+ gluster_volume_started : None ,
256+ glusterfs_device_config : dict [str , str ],
257+ _glusterfs_config : GlusterFsConfig
258+ ) -> Generator [SR ]:
188259 """ A GlusterFS SR on first host. """
189260 # Create the SR
190261 sr = host .sr_create ('glusterfs' , "GlusterFS-SR-test" , glusterfs_device_config , shared = True )
191262 yield sr
192263 # teardown
193- sr .destroy ()
264+ try :
265+ sr .destroy ()
266+ except Exception as e :
267+ _glusterfs_config .uninstall_glusterfs = False
268+ raise pytest .fail ("Could not destroy glusterfs SR, leaving packages in place for manual cleanup" ) from e
194269
195270@pytest .fixture (scope = 'module' )
196- def vdi_on_glusterfs_sr (glusterfs_sr ) :
271+ def vdi_on_glusterfs_sr (glusterfs_sr : SR ) -> Generator [ VDI ] :
197272 vdi = glusterfs_sr .create_vdi ('GlusterFS-VDI-test' )
198273 yield vdi
199274 vdi .destroy ()
200275
201276@pytest .fixture (scope = 'module' )
202- def vm_on_glusterfs_sr (host , glusterfs_sr , vm_ref ) :
277+ def vm_on_glusterfs_sr (host : Host , glusterfs_sr : SR , vm_ref : str ) -> Generator [ VM ] :
203278 vm = host .import_vm (vm_ref , sr_uuid = glusterfs_sr .uuid )
204279 yield vm
205280 # teardown
0 commit comments