We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e453d1b commit 02932beCopy full SHA for 02932be
quantllm/api/high_level.py
@@ -755,8 +755,8 @@ def save_quantized_model(
755
# Get original model path from cache if available
756
original_path = None
757
if hasattr(model, 'config') and hasattr(model.config, '_name_or_path'):
758
- from huggingface_hub import HfFolder
759
- cache_dir = os.getenv('HF_HOME', HfFolder.default_cache_path)
+
+ cache_dir = os.getenv('HF_HOME')
760
model_id = model.config._name_or_path
761
if '/' in model_id: # It's a hub model
762
org, model_name = model_id.split('/')
@@ -881,4 +881,4 @@ def save_quantized_model(
881
finally:
882
if torch.cuda.is_available():
883
torch.cuda.empty_cache()
884
-
0 commit comments