Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Update app.py
#1
by
						
DEADLOCK007X
	
							
						- opened
							
					
    	
        app.py
    CHANGED
    
    | @@ -1,47 +1,15 @@ | |
| 1 | 
            -
            import os
         | 
| 2 | 
            -
            import json
         | 
| 3 | 
             
            import gradio as gr
         | 
| 4 | 
            -
            from  | 
| 5 |  | 
| 6 | 
            -
            def  | 
| 7 | 
            -
                # Use a public, open-source model for code evaluation.
         | 
| 8 | 
            -
                model_name = "Salesforce/codegen-350M-mono"
         | 
| 9 | 
            -
                tokenizer = AutoTokenizer.from_pretrained(model_name)
         | 
| 10 | 
            -
                model = AutoModelForCausalLM.from_pretrained(model_name)
         | 
| 11 | 
            -
                return tokenizer, model
         | 
| 12 | 
            -
             | 
| 13 | 
            -
            # Load the model once at startup.
         | 
| 14 | 
            -
            tokenizer, model = load_model()
         | 
| 15 | 
            -
             | 
| 16 | 
            -
            def evaluate_model(prompt):
         | 
| 17 | 
            -
                inputs = tokenizer(prompt, return_tensors="pt")
         | 
| 18 | 
            -
                outputs = model.generate(**inputs, max_new_tokens=150)
         | 
| 19 | 
            -
                response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
         | 
| 20 | 
            -
                try:
         | 
| 21 | 
            -
                    result = json.loads(response_text.strip())
         | 
| 22 | 
            -
                except Exception as e:
         | 
| 23 | 
            -
                    result = {"stars": 0, "feedback": "Evaluation failed. Unable to parse AI response."}
         | 
| 24 | 
            -
                return result
         | 
| 25 | 
            -
             | 
| 26 | 
            -
            def evaluate_code(language, question, code):
         | 
| 27 | 
             
                if not code.strip():
         | 
| 28 | 
             
                    return "Error: No code provided. Please enter your solution code."
         | 
| 29 | 
            -
                
         | 
| 30 | 
            -
                 | 
| 31 | 
            -
                prompt = f"""
         | 
| 32 | 
            -
            You are an expert code evaluator.
         | 
| 33 | 
            -
            Rate the following solution on a scale of 0-5 (0 = completely incorrect, 5 = excellent) and provide a concise feedback message.
         | 
| 34 | 
            -
            Language: {language}
         | 
| 35 | 
            -
            Problem: "{question}"
         | 
| 36 | 
            -
            Solution: "{code}"
         | 
| 37 | 
            -
            Return ONLY valid JSON: {{"stars": number, "feedback": string}}.
         | 
| 38 | 
            -
            Do not include any extra text.
         | 
| 39 | 
            -
            """
         | 
| 40 | 
            -
                result = evaluate_model(prompt)
         | 
| 41 | 
             
                return f"Stars: {result.get('stars', 0)}\nFeedback: {result.get('feedback', '')}"
         | 
| 42 |  | 
| 43 | 
             
            iface = gr.Interface(
         | 
| 44 | 
            -
                fn= | 
| 45 | 
             
                inputs=[
         | 
| 46 | 
             
                    gr.Dropdown(choices=["C", "Python", "Java"], label="Language"),
         | 
| 47 | 
             
                    gr.Textbox(lines=2, placeholder="Enter the problem question here...", label="Question"),
         | 
|  | |
|  | |
|  | |
| 1 | 
             
            import gradio as gr
         | 
| 2 | 
            +
            from tinyllama_inference import evaluate_code
         | 
| 3 |  | 
| 4 | 
            +
            def evaluate_interface(language, question, code):
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 5 | 
             
                if not code.strip():
         | 
| 6 | 
             
                    return "Error: No code provided. Please enter your solution code."
         | 
| 7 | 
            +
                # Here you might choose to use the language input to further tailor the prompt if needed.
         | 
| 8 | 
            +
                result = evaluate_code(question, code)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 9 | 
             
                return f"Stars: {result.get('stars', 0)}\nFeedback: {result.get('feedback', '')}"
         | 
| 10 |  | 
| 11 | 
             
            iface = gr.Interface(
         | 
| 12 | 
            +
                fn=evaluate_interface,
         | 
| 13 | 
             
                inputs=[
         | 
| 14 | 
             
                    gr.Dropdown(choices=["C", "Python", "Java"], label="Language"),
         | 
| 15 | 
             
                    gr.Textbox(lines=2, placeholder="Enter the problem question here...", label="Question"),
         | 
