Spaces:
Sleeping
Sleeping
Create quiz_generator.py
Browse files- quiz_generator.py +176 -0
quiz_generator.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp
|
2 |
+
import asyncio
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
from typing import List, Dict, Any
|
6 |
+
from fallback_questions import FallbackQuestions
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
class QuizGenerator:
|
11 |
+
def __init__(self, api_key: str):
|
12 |
+
self.api_key = api_key
|
13 |
+
self.model_used = "fallback"
|
14 |
+
self.generation_method = "fallback"
|
15 |
+
self.fallback = FallbackQuestions()
|
16 |
+
|
17 |
+
# Modell-konfigurasjon
|
18 |
+
self.models = {
|
19 |
+
"norwegian": "NbAiLab/nb-gpt-j-6B",
|
20 |
+
"english": "meta-llama/Llama-2-70b-chat-hf",
|
21 |
+
"fallback": "google/flan-t5-small"
|
22 |
+
}
|
23 |
+
|
24 |
+
async def generate_quiz(self, request) -> List[Dict[str, Any]]:
|
25 |
+
"""Hovedmetode for quiz-generering"""
|
26 |
+
logger.info(f"Starter quiz-generering: {request.tema} ({request.språk})")
|
27 |
+
|
28 |
+
# Prøv AI-generering først
|
29 |
+
if self.api_key:
|
30 |
+
try:
|
31 |
+
questions = await self._try_ai_generation(request)
|
32 |
+
if questions:
|
33 |
+
logger.info(f"AI-generering suksess: {len(questions)} spørsmål")
|
34 |
+
return questions
|
35 |
+
except Exception as e:
|
36 |
+
logger.warning(f"AI-generering feilet: {e}")
|
37 |
+
|
38 |
+
# Fallback til forhåndsdefinerte spørsmål
|
39 |
+
logger.info("Bruker fallback-spørsmål")
|
40 |
+
self.model_used = "fallback"
|
41 |
+
self.generation_method = "predefined"
|
42 |
+
|
43 |
+
return self.fallback.get_questions(
|
44 |
+
tema=request.tema,
|
45 |
+
språk=request.språk,
|
46 |
+
antall=request.antall_spørsmål,
|
47 |
+
type=request.type,
|
48 |
+
vanskelighet=request.vanskelighetsgrad
|
49 |
+
)
|
50 |
+
|
51 |
+
async def _try_ai_generation(self, request) -> List[Dict[str, Any]]:
|
52 |
+
"""Prøv AI-generering med forskjellige modeller"""
|
53 |
+
|
54 |
+
# Velg modell basert på språk
|
55 |
+
if request.språk == "no":
|
56 |
+
model = self.models["norwegian"]
|
57 |
+
else:
|
58 |
+
model = self.models["english"]
|
59 |
+
|
60 |
+
logger.info(f"Prøver AI-modell: {model}")
|
61 |
+
|
62 |
+
try:
|
63 |
+
questions = await self._call_huggingface_api(model, request)
|
64 |
+
if questions:
|
65 |
+
self.model_used = model
|
66 |
+
self.generation_method = "ai"
|
67 |
+
return questions
|
68 |
+
except Exception as e:
|
69 |
+
logger.warning(f"Modell {model} feilet: {e}")
|
70 |
+
|
71 |
+
# Prøv fallback-modell
|
72 |
+
try:
|
73 |
+
logger.info(f"Prøver fallback-modell: {self.models['fallback']}")
|
74 |
+
questions = await self._call_huggingface_api(self.models["fallback"], request)
|
75 |
+
if questions:
|
76 |
+
self.model_used = self.models["fallback"]
|
77 |
+
self.generation_method = "ai_fallback"
|
78 |
+
return questions
|
79 |
+
except Exception as e:
|
80 |
+
logger.warning(f"Fallback-modell feilet: {e}")
|
81 |
+
|
82 |
+
return []
|
83 |
+
|
84 |
+
async def _call_huggingface_api(self, model: str, request) -> List[Dict[str, Any]]:
|
85 |
+
"""Kall Hugging Face Inference API"""
|
86 |
+
|
87 |
+
prompt = self._build_prompt(request, model)
|
88 |
+
|
89 |
+
async with aiohttp.ClientSession() as session:
|
90 |
+
async with session.post(
|
91 |
+
f"https://api-inference.huggingface.co/models/{model}",
|
92 |
+
headers={
|
93 |
+
"Authorization": f"Bearer {self.api_key}",
|
94 |
+
"Content-Type": "application/json"
|
95 |
+
},
|
96 |
+
json={
|
97 |
+
"inputs": prompt,
|
98 |
+
"parameters": {
|
99 |
+
"max_new_tokens": 1500,
|
100 |
+
"temperature": 0.7,
|
101 |
+
"do_sample": True,
|
102 |
+
"top_p": 0.9
|
103 |
+
}
|
104 |
+
},
|
105 |
+
timeout=aiohttp.ClientTimeout(total=30)
|
106 |
+
) as response:
|
107 |
+
|
108 |
+
if response.status != 200:
|
109 |
+
error_text = await response.text()
|
110 |
+
raise Exception(f"HTTP {response.status}: {error_text}")
|
111 |
+
|
112 |
+
data = await response.json()
|
113 |
+
|
114 |
+
# Parse response
|
115 |
+
if isinstance(data, list) and data:
|
116 |
+
generated_text = data[0].get("generated_text", "")
|
117 |
+
elif isinstance(data, dict):
|
118 |
+
generated_text = data.get("generated_text", "")
|
119 |
+
else:
|
120 |
+
raise Exception("Uventet response-format")
|
121 |
+
|
122 |
+
# Parse quiz fra generert tekst
|
123 |
+
questions = self._parse_quiz_response(generated_text, request.antall_spørsmål)
|
124 |
+
|
125 |
+
if not questions:
|
126 |
+
raise Exception("Kunne ikke parse quiz-spørsmål fra AI-respons")
|
127 |
+
|
128 |
+
return questions
|
129 |
+
|
130 |
+
def _build_prompt(self, request, model: str) -> str:
|
131 |
+
"""Bygg prompt for AI-modell"""
|
132 |
+
|
133 |
+
if request.språk == "no":
|
134 |
+
return f"""Generer {request.antall_spørsmål} quiz-spørsmål på norsk om temaet "{request.tema}".
|
135 |
+
|
136 |
+
Format for hvert spørsmål:
|
137 |
+
SPØRSMÅL: [spørsmålstekst]
|
138 |
+
A) [alternativ 1]
|
139 |
+
B) [alternativ 2]
|
140 |
+
C) [alternativ 3]
|
141 |
+
D) [alternativ 4]
|
142 |
+
KORREKT: [A, B, C eller D]
|
143 |
+
FORKLARING: [kort forklaring]
|
144 |
+
|
145 |
+
---
|
146 |
+
|
147 |
+
Tema: {request.tema}
|
148 |
+
Type: {request.type}
|
149 |
+
Vanskelighetsgrad: {request.vanskelighetsgrad}/5
|
150 |
+
|
151 |
+
Start generering:"""
|
152 |
+
else:
|
153 |
+
return f"""Generate {request.antall_spørsmål} quiz questions in English about "{request.tema}".
|
154 |
+
|
155 |
+
Format each question exactly like this:
|
156 |
+
QUESTION: [question text]
|
157 |
+
A) [option 1]
|
158 |
+
B) [option 2]
|
159 |
+
C) [option 3]
|
160 |
+
D) [option 4]
|
161 |
+
CORRECT: [A, B, C, or D]
|
162 |
+
EXPLANATION: [brief explanation]
|
163 |
+
|
164 |
+
---
|
165 |
+
|
166 |
+
Topic: {request.tema}
|
167 |
+
Type: {request.type}
|
168 |
+
Difficulty: {request.vanskelighetsgrad}/5
|
169 |
+
|
170 |
+
Start generating:"""
|
171 |
+
|
172 |
+
def _parse_quiz_response(self, response: str, expected_count: int) -> List[Dict[str, Any]]:
|
173 |
+
"""Parse AI-respons til quiz-spørsmål"""
|
174 |
+
questions = []
|
175 |
+
|
176 |
+
# Spli
|