File size: 2,187 Bytes
ef1a205
a8a2d3f
d56b284
 
ef1a205
d56b284
ef1a205
 
 
d56b284
 
 
 
 
 
 
 
ef1a205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d56b284
ef1a205
 
 
 
 
 
d56b284
 
a8a2d3f
ef1a205
d56b284
 
ef1a205
 
 
d56b284
 
ef1a205
 
a8a2d3f
ef1a205
a8a2d3f
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from typing import Dict
import gradio as gr
import openai
import os
import json

from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field

MODEL = "gpt-3.5-turbo"

try:
    openai.api_key = os.getenv("OPENAI_API_KEY")
except:
    print("Set the OPENAI_API_KEY environment variable")
    exit()

with open('./rubrics/act_rubric.json', 'r') as j:
     act_rubric = json.loads(j.read())

class ScoreDescription(BaseModel):
    score: int = Field(description="The score given")
    description: str = Field(description="Why the score was given")

class ACTScore(BaseModel):
    sub_scores: Dict[str, ScoreDescription] = Field(description="The sub-scores of the essay for each category in the rubric")
    overall_feedback: str = Field(description="Overall feedback for the essay")

parser = PydanticOutputParser(pydantic_object=ACTScore)

grader_template = PromptTemplate(
    input_variables=['rubric', 'essay_prompt', 'essay'],
    template= """
        You are an essay grader provided with the following grading rubric:\n
        {rubric}
        \n
        The essay writer was given the following instructions to write the essay: \n
        {essay_prompt}
        \n
        Grade the following essay. Provide sub-scores and rationale for each sub-score. \n
        {essay}
        \n

        Format description:
        {format_description}
    """,
    partial_variables={
        'format_description': parser.get_format_instructions()
    }

)
    
def get_prompt(essay, essay_prompt):
    return grader_template.format(
        rubric=act_rubric,
        essay=essay,
        essay_prompt=essay_prompt
    )



def grade_essay(essay, essay_prompt):
    response = openai.ChatCompletion.create(
        model=MODEL,
        messages=[{"role":"user", "content":get_prompt(essay, essay_prompt)}],
        temperature=0.0,
        max_tokens=1000,
    )
    
    result = response['choices'][0]['message']['content']
    return result

demo = gr.Interface(fn=grade_essay, inputs=[gr.Textbox(lines=10, placeholder='Essay'), gr.Textbox(lines=10, placeholder='Essay Prompt')], outputs="text")

demo.launch()