DEADLOCK007X commited on
Commit
8b70345
·
verified ·
1 Parent(s): 200227d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -14
app.py CHANGED
@@ -1,31 +1,56 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  def evaluate_code(language, question, code):
4
- # Check if code is provided
5
  if not code.strip():
6
  return "Error: No code provided. Please enter your solution code."
7
-
8
- # Dummy evaluation logic based on language.
9
- # Replace this with your TinyLlama integration later.
10
- if language.lower() == "c":
11
- return "C evaluation: The code meets basic criteria but may need improvements."
12
- elif language.lower() == "python":
13
- return "Python evaluation: Code is correct and follows best practices."
14
- elif language.lower() == "java":
15
- return "Java evaluation: Good structure, but consider error handling improvements."
16
- else:
17
- return "Unsupported language. Please select C, Python, or Java."
 
 
 
18
 
19
  iface = gr.Interface(
20
  fn=evaluate_code,
21
  inputs=[
22
  gr.Dropdown(choices=["C", "Python", "Java"], label="Language"),
23
  gr.Textbox(lines=2, placeholder="Enter the problem question here...", label="Question"),
24
- gr.Code(label="Your Code (if any)") # No placeholder property here.
25
  ],
26
  outputs=gr.Textbox(label="Evaluation Result"),
27
  title="Code Evaluator",
28
- description="Enter a coding question and your solution (if available) to get AI-powered feedback. Supports C, Python, and Java."
29
  )
30
 
31
  if __name__ == "__main__":
 
1
  import gradio as gr
2
+ import json
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ def load_model():
7
+ # Change to the actual TinyLlama model identifier available on Hugging Face.
8
+ model_name = "TheBloke/tiny-llama-7b"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+ return tokenizer, model
12
+
13
+ # Load the model once when the app starts
14
+ tokenizer, model = load_model()
15
+
16
+ def evaluate_tinyllama(prompt):
17
+ inputs = tokenizer(prompt, return_tensors="pt")
18
+ outputs = model.generate(**inputs, max_new_tokens=150)
19
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+ try:
21
+ result = json.loads(response_text.strip())
22
+ except Exception as e:
23
+ result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
24
+ return result
25
 
26
  def evaluate_code(language, question, code):
 
27
  if not code.strip():
28
  return "Error: No code provided. Please enter your solution code."
29
+
30
+ # Build a detailed prompt for the evaluator.
31
+ prompt = f"""
32
+ You are an expert code evaluator.
33
+ Rate the following solution on a scale of 0-5 (0 = completely incorrect, 5 = excellent) and provide a concise feedback message.
34
+ Language: {language}
35
+ Problem: "{question}"
36
+ Solution: "{code}"
37
+ Return ONLY valid JSON: {{"stars": number, "feedback": string}}.
38
+ Do not include any extra text.
39
+ """
40
+ result = evaluate_tinyllama(prompt)
41
+ # Format the output nicely
42
+ return f"Stars: {result.get('stars', 0)}\nFeedback: {result.get('feedback', '')}"
43
 
44
  iface = gr.Interface(
45
  fn=evaluate_code,
46
  inputs=[
47
  gr.Dropdown(choices=["C", "Python", "Java"], label="Language"),
48
  gr.Textbox(lines=2, placeholder="Enter the problem question here...", label="Question"),
49
+ gr.Code(language="python", label="Your Code")
50
  ],
51
  outputs=gr.Textbox(label="Evaluation Result"),
52
  title="Code Evaluator",
53
+ description="Enter a coding question and your solution to get AI-powered feedback. Supports C, Python, and Java."
54
  )
55
 
56
  if __name__ == "__main__":