File size: 2,980 Bytes
977aa4a 4dec878 977aa4a 013c0dc 669c0b8 013c0dc f243d4f 6768aaa 013c0dc f243d4f 977aa4a f243d4f 013c0dc 4dec878 07bbfbd 977aa4a 07bbfbd 4dec878 013c0dc 1dbd98a f243d4f 1dbd98a f243d4f 1dbd98a 4dec878 9c29307 4dec878 9c29307 4dec878 1dbd98a 9c29307 f243d4f 4dec878 f243d4f 013c0dc 07bbfbd 977aa4a 07bbfbd 977aa4a 07bbfbd 977aa4a 07bbfbd 977aa4a 07bbfbd 977aa4a 07bbfbd 1dbd98a 9c29307 f243d4f 013c0dc 4dec878 f243d4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import os
import typing
import openai
from django.contrib.postgres import fields
from django.db import models
class Quiz(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Question(models.Model):
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
prompt = models.CharField(max_length=200)
rubrics = models.TextField(
blank=True, null=True, verbose_name="Grading Rubrics - For LLM-graded questions only. You can leave this empty."
)
def __str__(self):
return self.prompt
def get_answer(self) -> typing.Union["Answer", None]:
return (
getattr(self, "multiplechoiceanswer", None)
or getattr(self, "freetextanswer", None)
# or getattr(self, "llmgradedanswer", None)
or self.llmgradedanswer # type: ignore
)
class Answer(models.Model):
question = models.OneToOneField(Question, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self) -> str:
return (
getattr(self, "correct_answer", None) or getattr(self, "rubrics", None) or "No answer or rubrics provided"
)
def is_correct(self, user_answer) -> bool:
return user_answer == getattr(self, "correct_answer", None)
class FreeTextAnswer(Answer):
correct_answer = models.CharField(max_length=200, default="")
case_sensitive = models.BooleanField(default=False)
def is_correct(self, user_answer) -> bool:
if not self.case_sensitive:
return user_answer.lower() == self.correct_answer.lower()
return user_answer == self.correct_answer
class LLMGradedAnswer(Answer):
def grade(self, user_answer) -> dict:
"""
Grades the user's answer by calling the grading API.
Args:
user_answer (str): The answer provided by the user.
Returns:
dict: The result of the grading.
"""
try:
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = f"Grade the following answer based on the rubric:\nRubric: {self.question.rubrics}\nAnswer: {user_answer}"
response = openai.ChatCompletion.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
return {"result": "success", "message": response.choices[0].message["content"]}
except openai.error.OpenAIError as e:
print(f"An error occurred: {e}")
return {"result": "error", "message": str(e)}
class MultipleChoiceAnswer(Answer):
correct_answer = models.CharField(max_length=200, default="")
choices = fields.ArrayField(models.CharField(max_length=200, blank=True))
def __str__(self) -> str:
return f"{self.correct_answer} from {self.choices}"
|