@@ -212,7 +212,6 @@ def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
212212def resize_lora_model (lora_sd , new_rank , new_conv_rank , save_dtype , device , dynamic_method , dynamic_param , verbose , svd_lowrank_niter = 2 ):
213213 max_old_rank = None
214214 new_alpha = None
215- verbose_str = "\n "
216215 fro_list = []
217216
218217 if dynamic_method :
@@ -285,15 +284,13 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna
285284 if not np .isnan (fro_retained ):
286285 fro_list .append (float (fro_retained ))
287286
288- verbose_str + = f"{ block_down_name :75} | "
287+ verbose_str = f"{ block_down_name :75} | "
289288 verbose_str += (
290289 f"sum(S) retained: { sum_retained :.1%} , fro retained: { fro_retained :.1%} , max(S) ratio: { max_ratio :0.1f} "
291290 )
292-
293- if verbose and dynamic_method :
294- verbose_str += f", dynamic | dim: { param_dict ['new_rank' ]} , alpha: { param_dict ['new_alpha' ]} \n "
295- else :
296- verbose_str += "\n "
291+ if dynamic_method :
292+ verbose_str += f", dynamic | dim: { param_dict ['new_rank' ]} , alpha: { param_dict ['new_alpha' ]} "
293+ tqdm .write (verbose_str )
297294
298295 new_alpha = param_dict ["new_alpha" ]
299296 o_lora_sd [block_down_name + lora_down_name + weight_name ] = param_dict ["lora_down" ].to (save_dtype ).contiguous ()
@@ -308,7 +305,6 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna
308305 del param_dict
309306
310307 if verbose :
311- print (verbose_str )
312308 print (f"Average Frobenius norm retention: { np .mean (fro_list ):.2%} | std: { np .std (fro_list ):0.3f} " )
313309 logger .info ("resizing complete" )
314310 return o_lora_sd , max_old_rank , new_alpha
0 commit comments