Spaces:
Running
Running
Commit
·
8aa99b8
1
Parent(s):
6fadedd
Update tinyllama_inference.py with DeepSeek integration and debug logging
Browse files- tinyllama_inference.py +4 -6
tinyllama_inference.py
CHANGED
@@ -9,7 +9,7 @@ def load_model():
|
|
9 |
global tokenizer, model
|
10 |
if tokenizer is None or model is None:
|
11 |
# Use a DeepSeek model for code evaluation.
|
12 |
-
model_name = "deepseek-ai/deepseek-coder-1.3b" # Adjust to your chosen DeepSeek model
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
15 |
return tokenizer, model
|
@@ -37,12 +37,10 @@ Solution: "{code}"
|
|
37 |
do_sample=False
|
38 |
)
|
39 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
40 |
-
|
41 |
-
|
42 |
-
# print("Raw model response:", response_text)
|
43 |
-
|
44 |
# Use regex to extract the JSON object from the response
|
45 |
-
match = re.search(r'\{
|
46 |
if match:
|
47 |
json_text = match.group(0)
|
48 |
try:
|
|
|
9 |
global tokenizer, model
|
10 |
if tokenizer is None or model is None:
|
11 |
# Use a DeepSeek model for code evaluation.
|
12 |
+
model_name = "deepseek-ai/deepseek-coder-1.3b" # Adjust to your chosen DeepSeek model if needed
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
15 |
return tokenizer, model
|
|
|
37 |
do_sample=False
|
38 |
)
|
39 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
40 |
+
print("Raw model response:", response_text) # Debug output
|
41 |
+
|
|
|
|
|
42 |
# Use regex to extract the JSON object from the response
|
43 |
+
match = re.search(r'\{.*?\}', response_text)
|
44 |
if match:
|
45 |
json_text = match.group(0)
|
46 |
try:
|