Spaces:
Running
Running
Oleg Shulyakov
commited on
Commit
·
280ff7d
1
Parent(s):
dceec55
Fix errors
Browse files
app.py
CHANGED
@@ -140,7 +140,7 @@ class HuggingFaceModelProcessor:
|
|
140 |
if not os.path.isfile(quant_config.fp16_model):
|
141 |
raise GGUFConverterError(f"Model file not found: {quant_config.fp16_model}")
|
142 |
|
143 |
-
train_data_path = CALIBRATION_FILE
|
144 |
if not os.path.isfile(train_data_path):
|
145 |
raise GGUFConverterError(f"Training data file not found: {train_data_path}")
|
146 |
print(f"Training data file path: {train_data_path}")
|
@@ -285,7 +285,7 @@ class HuggingFaceModelProcessor:
|
|
285 |
|
286 |
print("Model converted to fp16 successfully!")
|
287 |
print(f"Converted model path: {os.path.abspath(processing_config.quant_config.fp16_model)}")
|
288 |
-
return fp16_model
|
289 |
|
290 |
def _quantize_model(self, quant_config: QuantizationConfig) -> str:
|
291 |
"""Quantize the GGUF model."""
|
@@ -593,7 +593,7 @@ class GGUFConverterUI:
|
|
593 |
)
|
594 |
|
595 |
@staticmethod
|
596 |
-
def _update_output_repo(model_id: str, oauth_token: gr.OAuthToken
|
597 |
"""Update output repository name based on model and user."""
|
598 |
if oauth_token is None or not oauth_token.token:
|
599 |
return ""
|
@@ -743,7 +743,7 @@ class GGUFConverterUI:
|
|
743 |
repo_name: str, gguf_name: str, quant_embedding: bool,
|
744 |
embedding_tensor_method: str, leave_output: bool,
|
745 |
quant_output: bool, output_tensor_method: str,
|
746 |
-
split_model: bool, split_max_tensors, split_max_size: str) -> Tuple[str, str]:
|
747 |
"""Wrapper for the process_model method to handle the conversion using ModelProcessingConfig."""
|
748 |
try:
|
749 |
# Validate token and get token string
|
@@ -775,10 +775,10 @@ class GGUFConverterUI:
|
|
775 |
|
776 |
model_name = self.processor._get_model_name(model_id)
|
777 |
|
778 |
-
with tempfile.TemporaryDirectory(dir=self.OUTPUT_FOLDER) as outDirObj:
|
779 |
outdir = (
|
780 |
-
self._create_folder(os.path.join(self.OUTPUT_FOLDER, model_name))
|
781 |
-
if self.RUN_LOCALLY == "1"
|
782 |
else Path(outDirObj)
|
783 |
)
|
784 |
|
|
|
140 |
if not os.path.isfile(quant_config.fp16_model):
|
141 |
raise GGUFConverterError(f"Model file not found: {quant_config.fp16_model}")
|
142 |
|
143 |
+
train_data_path = self.CALIBRATION_FILE
|
144 |
if not os.path.isfile(train_data_path):
|
145 |
raise GGUFConverterError(f"Training data file not found: {train_data_path}")
|
146 |
print(f"Training data file path: {train_data_path}")
|
|
|
285 |
|
286 |
print("Model converted to fp16 successfully!")
|
287 |
print(f"Converted model path: {os.path.abspath(processing_config.quant_config.fp16_model)}")
|
288 |
+
return processing_config.quant_config.fp16_model
|
289 |
|
290 |
def _quantize_model(self, quant_config: QuantizationConfig) -> str:
|
291 |
"""Quantize the GGUF model."""
|
|
|
593 |
)
|
594 |
|
595 |
@staticmethod
|
596 |
+
def _update_output_repo(model_id: str, oauth_token: Optional[gr.OAuthToken]) -> str:
|
597 |
"""Update output repository name based on model and user."""
|
598 |
if oauth_token is None or not oauth_token.token:
|
599 |
return ""
|
|
|
743 |
repo_name: str, gguf_name: str, quant_embedding: bool,
|
744 |
embedding_tensor_method: str, leave_output: bool,
|
745 |
quant_output: bool, output_tensor_method: str,
|
746 |
+
split_model: bool, split_max_tensors, split_max_size: str, oauth_token: Optional[gr.OAuthToken]) -> Tuple[str, str]:
|
747 |
"""Wrapper for the process_model method to handle the conversion using ModelProcessingConfig."""
|
748 |
try:
|
749 |
# Validate token and get token string
|
|
|
775 |
|
776 |
model_name = self.processor._get_model_name(model_id)
|
777 |
|
778 |
+
with tempfile.TemporaryDirectory(dir=self.processor.OUTPUT_FOLDER) as outDirObj:
|
779 |
outdir = (
|
780 |
+
self.processor._create_folder(os.path.join(self.processor.OUTPUT_FOLDER, model_name))
|
781 |
+
if self.processor.RUN_LOCALLY == "1"
|
782 |
else Path(outDirObj)
|
783 |
)
|
784 |
|