Spaces:
Sleeping
Sleeping
Update src/translate/Translate.py
Browse files- src/translate/Translate.py +18 -14
src/translate/Translate.py
CHANGED
@@ -74,12 +74,12 @@ def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
|
74 |
|
75 |
def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
76 |
# Load model directly
|
77 |
-
|
78 |
# limit max_new_tokens to 150% of the requestValue
|
79 |
prompt = f"Translate this text to Romanian: {requestValue}"
|
80 |
|
81 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
82 |
-
model = AutoModelForCausalLM.from_pretrained(
|
83 |
input_ids = tokenizer.encode(requestValue, add_special_tokens=True)
|
84 |
num_tokens = len(input_ids)
|
85 |
# Estimate output length (e.g., 50% longer)
|
@@ -88,14 +88,18 @@ def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-bette
|
|
88 |
|
89 |
messages = [{"role": "user", "content": prompt}]
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
|
76 |
# Load model directly
|
77 |
+
model_name = model if '/' in model else 'Gargaz/gemma-2b-romanian-better'
|
78 |
# limit max_new_tokens to 150% of the requestValue
|
79 |
prompt = f"Translate this text to Romanian: {requestValue}"
|
80 |
|
81 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
82 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
83 |
input_ids = tokenizer.encode(requestValue, add_special_tokens=True)
|
84 |
num_tokens = len(input_ids)
|
85 |
# Estimate output length (e.g., 50% longer)
|
|
|
88 |
|
89 |
messages = [{"role": "user", "content": prompt}]
|
90 |
|
91 |
+
try:
|
92 |
+
inputs = tokenizer.apply_chat_template(
|
93 |
+
messages,
|
94 |
+
add_generation_prompt=True,
|
95 |
+
tokenize=True,
|
96 |
+
return_dict=True,
|
97 |
+
return_tensors="pt",
|
98 |
+
).to(device)
|
99 |
+
|
100 |
+
outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
|
101 |
+
response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
|
102 |
+
return response
|
103 |
+
except Exception as error:
|
104 |
+
return error
|
105 |
+
|