Spaces:
Sleeping
Sleeping
File size: 2,845 Bytes
e65d0ad 8206a45 e65d0ad 8206a45 3d71f2e 8206a45 4441c50 8206a45 e65d0ad 8206a45 4980b54 8206a45 3c44ee8 35d31e1 3c44ee8 35d31e1 3c44ee8 35d31e1 903f0f8 8206a45 903f0f8 b72b033 8206a45 4980b54 8206a45 4980b54 0bae633 8206a45 4980b54 b72b033 4980b54 3c44ee8 4980b54 8206a45 4980b54 8206a45 e65d0ad 8206a45 e65d0ad 8206a45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import json
import re
from transformers import AutoTokenizer, AutoModelForCausalLM
# Global variables for caching the model and tokenizer
tokenizer, model = None, None
def load_model():
global tokenizer, model
if tokenizer is None or model is None:
# Use the DeepSeek instruct model for code evaluation.
model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return tokenizer, model
def evaluate_code(question, code):
# Refined prompt with explicit instructions and a "Your response:" line.
prompt = f"""You are an expert code evaluator.
Evaluate the following solution for the given problem.
Rate the solution as follows:
- 5 stars: Perfect solution; it is correct, efficient, and follows best practices.
- 4 stars: Correct solution with minor issues or improvements possible.
- 3 stars: Partially correct solution with noticeable issues.
- 2 stars: Incorrect solution with some correct elements.
- 1 star: Mostly incorrect solution.
- 0 stars: Completely incorrect solution.
Respond with exactly one JSON object (with no extra text) that has exactly two keys:
"stars": an integer between 0 and 5,
"feedback": a concise string message explaining your rating.
The JSON must start with '{{' and end with '}}'.
Do not output any additional text.
Question: "{question}"
Solution: "{code}"
Your response:"""
tokenizer, model = load_model()
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_new_tokens=100, # Increase token allowance if needed
temperature=0.2, # Allow some creativity, but mostly deterministic
pad_token_id=tokenizer.eos_token_id,
do_sample=True
)
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("Raw model response:", response_text) # Debug output
# Extract all JSON objects (non-greedy) and use the last one
matches = re.findall(r'\{.*?\}', response_text)
if matches:
json_text = matches[-1] # Pick the last JSON block
try:
result = json.loads(json_text)
except Exception as e:
result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
else:
result = {"stars": 0, "feedback": "Evaluation failed. Unable to extract JSON from AI response."}
return result
# For direct command-line testing.
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print(json.dumps({"error": "Please provide a question and code as arguments"}))
sys.exit(1)
question = sys.argv[1]
code = sys.argv[2]
result = evaluate_code(question, code)
print(json.dumps(result))
|