diff --git a/dev_requirements.txt b/dev_requirements.txt index 5fbd5907f..1d83fd017 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,4 +1,4 @@ -coverage +coverage<7.11 astroid pylint bitarray diff --git a/drivers/LinstorSR.py b/drivers/LinstorSR.py index 28ca15729..6c7fe410c 100755 --- a/drivers/LinstorSR.py +++ b/drivers/LinstorSR.py @@ -1934,8 +1934,9 @@ def resize(self, sr_uuid, vdi_uuid, size) -> str: raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') if size == self.size: - return VDI.VDI.get_params(self) + return VDI.VDI.get_params(self) # No change needed + # Compute VDI sizes if self.vdi_type == vhdutil.VDI_TYPE_RAW: old_volume_size = self.size new_volume_size = LinstorVolumeManager.round_up_volume_size(size) @@ -1952,8 +1953,10 @@ def resize(self, sr_uuid, vdi_uuid, size) -> str: self.sr._ensure_space_available(space_needed) old_size = self.size + + # Resize VDI if self.vdi_type == vhdutil.VDI_TYPE_RAW: - self._linstor.resize(self.uuid, new_volume_size) + self._linstor.resize_volume(self.uuid, new_volume_size) else: if new_volume_size != old_volume_size: self.sr._vhdutil.inflate( @@ -1965,6 +1968,7 @@ def resize(self, sr_uuid, vdi_uuid, size) -> str: # Reload size attributes. self._load_this() + # Update metadata vdi_ref = self.sr.srcmd.params['vdi_ref'] self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) self.session.xenapi.VDI.set_physical_utilisation( diff --git a/drivers/linstorvolumemanager.py b/drivers/linstorvolumemanager.py index c969d0b48..e769942eb 100755 --- a/drivers/linstorvolumemanager.py +++ b/drivers/linstorvolumemanager.py @@ -841,31 +841,33 @@ def resize_volume(self, volume_uuid, new_size): self.ensure_volume_is_not_locked(volume_uuid) new_size = self.round_up_volume_size(new_size) // 1024 - retry_count = 30 - while True: - result = self._linstor.volume_dfn_modify( - rsc_name=volume_name, - volume_nr=0, - size=new_size + # We can't resize anything until DRBD is up to date. + # We wait here for 5min max and raise an easy to understand error for the user. + # 5min is an arbitrary time, it's impossible to get a fit all situation value + # and it's currently impossible to know how much time we have to wait + # This is mostly an issue for thick provisioning, thin isn't affected. + start_time = time.monotonic() + try: + self._linstor.resource_dfn_wait_synced(volume_name, wait_interval=1.0, timeout=60*5) + except linstor.LinstorTimeoutError: + raise LinstorVolumeManagerError( + f"Volume `{volume_uuid}` from SR `{self._group_name}` is busy and can't be resized right now. " + + "Please retry later." ) + util.SMlog(f"DRBD is up to date, syncing took {time.monotonic() - start_time}s") - self._mark_resource_cache_as_dirty() - - error_str = self._get_error_str(result) - if not error_str: - break + result = self._linstor.volume_dfn_modify( + rsc_name=volume_name, + volume_nr=0, + size=new_size + ) - # After volume creation, DRBD volume can be unusable during many seconds. - # So we must retry the definition change if the device is not up to date. - # Often the case for thick provisioning. - if retry_count and error_str.find('non-UpToDate DRBD device') >= 0: - time.sleep(2) - retry_count -= 1 - continue + self._mark_resource_cache_as_dirty() + error_str = self._get_error_str(result) + if error_str: raise LinstorVolumeManagerError( - 'Could not resize volume `{}` from SR `{}`: {}' - .format(volume_uuid, self._group_name, error_str) + f"Could not resize volume `{volume_uuid}` from SR `{self._group_name}`: {error_str}" ) def get_volume_name(self, volume_uuid):