TiberiuCristianLeon commited on
Commit
c55d8e2
·
verified ·
1 Parent(s): 73d63a7

Update src/translate/Translate.py

Browse files
Files changed (1) hide show
  1. src/translate/Translate.py +18 -14
src/translate/Translate.py CHANGED
@@ -74,12 +74,12 @@ def gemma(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
74
 
75
  def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
76
  # Load model directly
77
- model = model if '/' in model else 'Gargaz/gemma-2b-romanian-better'
78
  # limit max_new_tokens to 150% of the requestValue
79
  prompt = f"Translate this text to Romanian: {requestValue}"
80
 
81
- tokenizer = AutoTokenizer.from_pretrained("Gargaz/gemma-2b-romanian-better")
82
- model = AutoModelForCausalLM.from_pretrained("Gargaz/gemma-2b-romanian-better").to(device)
83
  input_ids = tokenizer.encode(requestValue, add_special_tokens=True)
84
  num_tokens = len(input_ids)
85
  # Estimate output length (e.g., 50% longer)
@@ -88,14 +88,18 @@ def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-bette
88
 
89
  messages = [{"role": "user", "content": prompt}]
90
 
91
- inputs = tokenizer.apply_chat_template(
92
- messages,
93
- add_generation_prompt=True,
94
- tokenize=True,
95
- return_dict=True,
96
- return_tensors="pt",
97
- ).to(device)
98
-
99
- outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
100
- response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
101
- return response, model
 
 
 
 
 
74
 
75
  def gemma_direct(requestValue: str, model: str = 'Gargaz/gemma-2b-romanian-better'):
76
  # Load model directly
77
+ model_name = model if '/' in model else 'Gargaz/gemma-2b-romanian-better'
78
  # limit max_new_tokens to 150% of the requestValue
79
  prompt = f"Translate this text to Romanian: {requestValue}"
80
 
81
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
82
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
83
  input_ids = tokenizer.encode(requestValue, add_special_tokens=True)
84
  num_tokens = len(input_ids)
85
  # Estimate output length (e.g., 50% longer)
 
88
 
89
  messages = [{"role": "user", "content": prompt}]
90
 
91
+ try:
92
+ inputs = tokenizer.apply_chat_template(
93
+ messages,
94
+ add_generation_prompt=True,
95
+ tokenize=True,
96
+ return_dict=True,
97
+ return_tensors="pt",
98
+ ).to(device)
99
+
100
+ outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
101
+ response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
102
+ return response
103
+ except Exception as error:
104
+ return error
105
+