Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
import time | |
import os | |
from typing import List, Dict, Any, Optional | |
import random | |
import requests | |
# API key validation | |
def validate_api_key(api_key: str) -> bool: | |
"""Validate the API key against the stored secret""" | |
expected_key = os.environ.get("SOACTI_API_KEY") | |
if not expected_key: | |
print("WARNING: SOACTI_API_KEY not set in environment variables") | |
return False | |
return api_key == expected_key | |
# Improved AI Quiz generation | |
class AIQuizGenerator: | |
def __init__(self): | |
self.api_key = os.environ.get("HUGGINGFACE_API_KEY") | |
self.api_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large" | |
# Backup models to try | |
self.models = [ | |
"microsoft/DialoGPT-large", | |
"google/flan-t5-large", | |
"facebook/blenderbot-400M-distill", | |
"microsoft/DialoGPT-medium" | |
] | |
print(f"AI Generator initialized. API key available: {bool(self.api_key)}") | |
def generate_quiz(self, tema: str, antall: int = 3, språk: str = "no") -> List[Dict[str, Any]]: | |
"""Generate quiz questions using Hugging Face Inference API""" | |
if not self.api_key: | |
print("❌ No Hugging Face API key - using enhanced fallback") | |
return self._generate_enhanced_fallback(tema, antall) | |
# Try multiple models until one works | |
for model in self.models: | |
try: | |
print(f"🤖 Trying model: {model}") | |
questions = self._try_model(model, tema, antall, språk) | |
if questions and len(questions) > 0: | |
print(f"✅ Success with model: {model}") | |
return questions | |
except Exception as e: | |
print(f"❌ Model {model} failed: {str(e)}") | |
continue | |
print("❌ All AI models failed - using enhanced fallback") | |
return self._generate_enhanced_fallback(tema, antall) | |
def _try_model(self, model: str, tema: str, antall: int, språk: str) -> List[Dict[str, Any]]: | |
"""Try a specific model""" | |
# Create a very specific prompt | |
prompt = self._create_specific_prompt(tema, antall, språk) | |
headers = { | |
"Authorization": f"Bearer {self.api_key}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": 800, | |
"temperature": 0.7, | |
"do_sample": True, | |
"top_p": 0.9 | |
} | |
} | |
api_url = f"https://api-inference.huggingface.co/models/{model}" | |
start_time = time.time() | |
response = requests.post(api_url, headers=headers, json=payload, timeout=30) | |
generation_time = time.time() - start_time | |
print(f"API Response Status: {response.status_code}") | |
if response.status_code != 200: | |
raise Exception(f"API returned {response.status_code}: {response.text}") | |
result = response.json() | |
if isinstance(result, list) and len(result) > 0: | |
generated_text = result[0].get("generated_text", "") | |
else: | |
generated_text = str(result) | |
print(f"Generated text preview: {generated_text[:200]}...") | |
# Parse the response | |
questions = self._parse_ai_response(generated_text, tema, antall) | |
# Add metadata | |
for q in questions: | |
q["_metadata"] = { | |
"model": model, | |
"generation_time": generation_time, | |
"ai_generated": True | |
} | |
return questions | |
def _create_specific_prompt(self, tema: str, antall: int, språk: str) -> str: | |
"""Create a very specific prompt for better results""" | |
if språk == "no": | |
return f"""Lag {antall} quiz-spørsmål om {tema} på norsk. | |
Format: | |
SPØRSMÅL: [konkret spørsmål om {tema}] | |
A) [første alternativ] | |
B) [andre alternativ] | |
C) [tredje alternativ] | |
D) [fjerde alternativ] | |
SVAR: [A, B, C eller D] | |
FORKLARING: [kort forklaring] | |
Eksempel om fotball: | |
SPØRSMÅL: Hvem vant Ballon d'Or i 2023? | |
A) Lionel Messi | |
B) Erling Haaland | |
C) Kylian Mbappé | |
D) Karim Benzema | |
SVAR: A | |
FORKLARING: Lionel Messi vant sin åttende Ballon d'Or i 2023. | |
Nå lag {antall} spørsmål om {tema}:""" | |
else: | |
return f"""Create {antall} quiz questions about {tema} in English. | |
Format: | |
QUESTION: [specific question about {tema}] | |
A) [first option] | |
B) [second option] | |
C) [third option] | |
D) [fourth option] | |
ANSWER: [A, B, C or D] | |
EXPLANATION: [brief explanation] | |
Now create {antall} questions about {tema}:""" | |
def _parse_ai_response(self, text: str, tema: str, expected_count: int) -> List[Dict[str, Any]]: | |
"""Parse AI response into structured questions""" | |
questions = [] | |
# Split into sections | |
sections = text.split("SPØRSMÅL:") if "SPØRSMÅL:" in text else text.split("QUESTION:") | |
for section in sections[1:]: # Skip first empty section | |
try: | |
question = self._parse_single_question(section, tema) | |
if question: | |
questions.append(question) | |
except Exception as e: | |
print(f"Error parsing question section: {e}") | |
continue | |
return questions[:expected_count] | |
def _parse_single_question(self, section: str, tema: str) -> Optional[Dict[str, Any]]: | |
"""Parse a single question from text""" | |
lines = [line.strip() for line in section.split('\n') if line.strip()] | |
if not lines: | |
return None | |
question_text = lines[0].strip() | |
options = [] | |
correct_answer = 0 | |
explanation = "" | |
for line in lines[1:]: | |
if line.startswith(('A)', 'B)', 'C)', 'D)')): | |
options.append(line[2:].strip()) | |
elif line.startswith(('SVAR:', 'ANSWER:')): | |
answer_part = line.split(':', 1)[1].strip() | |
if answer_part in ['A', 'B', 'C', 'D']: | |
correct_answer = ['A', 'B', 'C', 'D'].index(answer_part) | |
elif line.startswith(('FORKLARING:', 'EXPLANATION:')): | |
explanation = line.split(':', 1)[1].strip() | |
if len(options) >= 3 and question_text: | |
# Ensure we have 4 options | |
while len(options) < 4: | |
options.append(f"Alternativ {len(options) + 1}") | |
return { | |
"spørsmål": question_text, | |
"alternativer": options[:4], | |
"korrekt_svar": correct_answer, | |
"forklaring": explanation or f"Spørsmål om {tema}" | |
} | |
return None | |
def _generate_enhanced_fallback(self, tema: str, antall: int) -> List[Dict[str, Any]]: | |
"""Generate better fallback questions based on topic analysis""" | |
# Analyze topic to create better questions | |
tema_lower = tema.lower() | |
questions = [] | |
# Football/Soccer specific | |
if any(word in tema_lower for word in ['fotball', 'football', 'soccer', 'messi', 'ronaldo', 'haaland']): | |
questions = [ | |
{ | |
"spørsmål": "Hvem regnes som en av verdens beste fotballspillere gjennom tidene?", | |
"alternativer": ["Lionel Messi", "Michael Jordan", "Tiger Woods", "Usain Bolt"], | |
"korrekt_svar": 0, | |
"forklaring": "Lionel Messi regnes som en av de beste fotballspillerne noensinne med 8 Ballon d'Or-priser." | |
}, | |
{ | |
"spørsmål": "Hvilket land har vunnet flest VM i fotball?", | |
"alternativer": ["Tyskland", "Argentina", "Brasil", "Frankrike"], | |
"korrekt_svar": 2, | |
"forklaring": "Brasil har vunnet VM i fotball 5 ganger (1958, 1962, 1970, 1994, 2002)." | |
}, | |
{ | |
"spørsmål": "Hva kalles den prestisjetunge individuelle prisen i fotball?", | |
"alternativer": ["Golden Boot", "Ballon d'Or", "FIFA Award", "Champions Trophy"], | |
"korrekt_svar": 1, | |
"forklaring": "Ballon d'Or er den mest prestisjetunge individuelle prisen i fotball." | |
} | |
] | |
# Technology specific | |
elif any(word in tema_lower for word in ['teknologi', 'technology', 'ai', 'computer', 'programming']): | |
questions = [ | |
{ | |
"spørsmål": f"Hva er en viktig utvikling innen {tema}?", | |
"alternativer": ["Kunstig intelligens", "Dampmaskin", "Hjulet", "Ild"], | |
"korrekt_svar": 0, | |
"forklaring": f"Kunstig intelligens er en av de viktigste utviklingene innen moderne {tema}." | |
} | |
] | |
# Generic but better questions | |
if not questions: | |
questions = [ | |
{ | |
"spørsmål": f"Hva er karakteristisk for {tema}?", | |
"alternativer": [f"Viktig egenskap ved {tema}", "Irrelevant faktor", "Tilfeldig element", "Ukjent aspekt"], | |
"korrekt_svar": 0, | |
"forklaring": f"Dette spørsmålet handler om de karakteristiske egenskapene ved {tema}." | |
}, | |
{ | |
"spørsmål": f"Hvor er {tema} mest relevant?", | |
"alternativer": ["I relevant kontekst", "I irrelevant sammenheng", "Ingen steder", "Overalt"], | |
"korrekt_svar": 0, | |
"forklaring": f"{tema} er mest relevant i sin naturlige kontekst." | |
} | |
] | |
# Add metadata to show these are fallbacks | |
for q in questions: | |
q["_metadata"] = { | |
"model": "enhanced_fallback", | |
"generation_time": 0.1, | |
"ai_generated": False | |
} | |
return questions[:antall] | |
# Initialize the AI generator | |
quiz_generator = AIQuizGenerator() | |
# API endpoint for quiz generation | |
def generate_quiz_api(tema: str, språk: str = "no", antall_spørsmål: int = 3, | |
type: str = "sted", vanskelighetsgrad: int = 3, | |
api_key: str = None) -> Dict[str, Any]: | |
"""API endpoint for quiz generation""" | |
if not validate_api_key(api_key): | |
return { | |
"success": False, | |
"message": "Ugyldig API-nøkkel", | |
"questions": [] | |
} | |
if not tema or len(tema.strip()) < 2: | |
return { | |
"success": False, | |
"message": "Vennligst oppgi et tema (minimum 2 tegn)", | |
"questions": [] | |
} | |
try: | |
start_time = time.time() | |
questions = quiz_generator.generate_quiz(tema.strip(), antall_spørsmål, språk) | |
total_time = time.time() - start_time | |
# Check if we got real AI questions or fallbacks | |
ai_generated = any(q.get("_metadata", {}).get("ai_generated", False) for q in questions) | |
model_used = questions[0].get("_metadata", {}).get("model", "unknown") if questions else "none" | |
return { | |
"success": True, | |
"questions": questions, | |
"metadata": { | |
"generation_time": round(total_time, 2), | |
"model_used": model_used, | |
"topic": tema, | |
"ai_generated": ai_generated, | |
"fallback_used": not ai_generated | |
}, | |
"message": f"Genererte {len(questions)} spørsmål om '{tema}'" + | |
(" med AI" if ai_generated else " med forbedret fallback") | |
} | |
except Exception as e: | |
print(f"Error in generate_quiz_api: {str(e)}") | |
return { | |
"success": False, | |
"message": f"Feil ved generering av quiz: {str(e)}", | |
"questions": [] | |
} | |
# Gradio interface | |
def generate_quiz_gradio(tema, antall, api_key=None): | |
"""Gradio wrapper""" | |
if api_key and not validate_api_key(api_key): | |
return "❌ **Ugyldig API-nøkkel**" | |
if not tema or len(tema.strip()) < 2: | |
return "❌ **Vennligst skriv inn et tema**" | |
try: | |
result = generate_quiz_api(tema, "no", antall, "sted", 3, api_key) | |
if not result["success"]: | |
return f"❌ **Feil:** {result['message']}" | |
questions = result["questions"] | |
metadata = result["metadata"] | |
# Show different info based on whether AI was used | |
if metadata.get("ai_generated", False): | |
status_icon = "🤖" | |
status_text = "AI-generert" | |
else: | |
status_icon = "🔄" | |
status_text = "Forbedret fallback" | |
output = f"✅ **Genererte {len(questions)} spørsmål om '{tema}'**\n\n" | |
output += f"{status_icon} **Type:** {status_text}\n" | |
output += f"⚙️ **Modell:** {metadata['model_used']}\n" | |
output += f"⏱️ **Tid:** {metadata['generation_time']}s\n\n" | |
for i, q in enumerate(questions, 1): | |
output += f"📝 **Spørsmål {i}:** {q['spørsmål']}\n" | |
for j, alt in enumerate(q['alternativer']): | |
marker = "✅" if j == q['korrekt_svar'] else "❌" | |
output += f" {chr(65+j)}) {alt} {marker}\n" | |
output += f"💡 **Forklaring:** {q['forklaring']}\n\n" | |
return output | |
except Exception as e: | |
return f"❌ **Feil:** {str(e)}" | |
# Health check | |
def health_check(): | |
return { | |
"status": "healthy", | |
"timestamp": time.time(), | |
"ai_available": bool(os.environ.get("HUGGINGFACE_API_KEY")) | |
} | |
# Gradio interface | |
with gr.Blocks(title="SoActi AI Quiz API - Forbedret") as demo: | |
gr.Markdown("# 🧠 SoActi AI Quiz API - Forbedret") | |
gr.Markdown("**🚀 Ekte AI-generering med forbedret fallback**") | |
with gr.Row(): | |
with gr.Column(): | |
tema_input = gr.Textbox( | |
label="Tema", | |
value="verdens beste fotballspillere", | |
placeholder="Fotball, teknologi, historie, mat, filmer..." | |
) | |
antall_input = gr.Slider( | |
minimum=1, | |
maximum=5, | |
step=1, | |
label="Antall spørsmål", | |
value=3 | |
) | |
api_key_input = gr.Textbox( | |
label="API-nøkkel", | |
placeholder="Skriv inn API-nøkkel...", | |
type="password" | |
) | |
generate_btn = gr.Button("🚀 Generer Forbedret Quiz!", variant="primary") | |
with gr.Column(): | |
output = gr.Textbox( | |
label="Generert Quiz", | |
lines=20, | |
placeholder="Skriv inn et tema og test den forbedrede AI-genereringen!" | |
) | |
generate_btn.click( | |
fn=generate_quiz_gradio, | |
inputs=[tema_input, antall_input, api_key_input], | |
outputs=output | |
) | |
gr.Markdown("## 🔗 API Endepunkt") | |
gr.Markdown("`POST https://Soacti-soacti-ai-quiz-api.hf.space/generate-quiz`") | |
# FastAPI setup | |
from fastapi import FastAPI, HTTPException, Depends, Header | |
from fastapi.middleware.cors import CORSMiddleware | |
from pydantic import BaseModel | |
app = FastAPI(title="SoActi Quiz API - Forbedret") | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
class QuizRequest(BaseModel): | |
tema: str | |
språk: str = "no" | |
antall_spørsmål: int = 3 | |
type: str = "sted" | |
vanskelighetsgrad: int = 3 | |
async def get_api_key(authorization: str = Header(None)): | |
if not authorization: | |
raise HTTPException(status_code=401, detail="API key missing") | |
parts = authorization.split() | |
if len(parts) != 2 or parts[0].lower() != "bearer": | |
raise HTTPException(status_code=401, detail="Invalid authorization header") | |
return parts[1] | |
async def api_generate_quiz(request: QuizRequest, api_key: str = Depends(get_api_key)): | |
result = generate_quiz_api( | |
request.tema, | |
request.språk, | |
request.antall_spørsmål, | |
request.type, | |
request.vanskelighetsgrad, | |
api_key | |
) | |
if not result["success"]: | |
raise HTTPException(status_code=400, detail=result["message"]) | |
return result | |
async def api_health(): | |
return health_check() | |
# Mount Gradio | |
app = gr.mount_gradio_app(app, demo, path="/") | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |