Soacti's picture
Update app.py
1078d41 verified
raw
history blame
15.1 kB
import gradio as gr
import json
import time
import os
from typing import List, Dict, Any, Optional
import random
# Import Hugging Face inference API
from huggingface_hub import InferenceClient
# API key validation
def validate_api_key(api_key: str) -> bool:
"""Validate the API key against the stored secret"""
expected_key = os.environ.get("SOACTI_API_KEY")
if not expected_key:
print("WARNING: SOACTI_API_KEY not set in environment variables")
return False
return api_key == expected_key
# AI Quiz generation with Hugging Face models
class AIQuizGenerator:
def __init__(self):
self.api_key = os.environ.get("HUGGINGFACE_API_KEY")
if not self.api_key:
print("WARNING: HUGGINGFACE_API_KEY not set in environment variables")
# Use a more capable model for better quiz generation
self.default_model = "microsoft/DialoGPT-medium"
self.fallback_model = "google/flan-t5-base"
# Initialize the client
self.client = InferenceClient(token=self.api_key) if self.api_key else None
def generate_quiz(self, tema: str, antall: int = 3, språk: str = "no") -> List[Dict[str, Any]]:
"""Generate quiz questions using AI - NO RESTRICTIONS ON TOPIC"""
if not self.client:
print("No Hugging Face API key available, generating basic questions")
return self._generate_basic_questions(tema, antall)
try:
# Build flexible prompt that works with ANY topic
prompt = self._build_flexible_prompt(tema, antall, språk)
# Try primary model first
start_time = time.time()
try:
response = self.client.text_generation(
prompt,
model=self.default_model,
max_new_tokens=1500,
temperature=0.8,
repetition_penalty=1.1,
do_sample=True
)
except:
# Fallback to more reliable model
response = self.client.text_generation(
prompt,
model=self.fallback_model,
max_new_tokens=1200,
temperature=0.7,
repetition_penalty=1.2,
)
generation_time = time.time() - start_time
print(f"AI response received in {generation_time:.2f}s for topic: {tema}")
print(f"Response preview: {response[:300]}...")
# Parse the response into questions
questions = self._parse_flexible_response(response, tema, antall)
# If we couldn't parse enough questions, generate more basic ones
if len(questions) < antall:
additional = self._generate_basic_questions(tema, antall - len(questions))
questions.extend(additional)
return questions[:antall]
except Exception as e:
print(f"Error generating quiz with AI: {str(e)}")
return self._generate_basic_questions(tema, antall)
def _build_flexible_prompt(self, tema: str, antall: int, språk: str) -> str:
"""Build a flexible prompt that works with ANY topic"""
language = "norsk" if språk == "no" else "English"
return f"""Create {antall} quiz questions about "{tema}" in {language}.
IMPORTANT: Generate questions about ANY topic the user requests - there are NO restrictions.
Whether it's about science, history, pop culture, technology, sports, entertainment, food, travel, or anything else - create relevant questions.
Format each question EXACTLY like this:
Q1: [Clear question about {tema}]
A) [First option]
B) [Second option]
C) [Third option]
D) [Fourth option]
CORRECT: [A, B, C, or D]
EXPLANATION: [Brief explanation why this is correct]
Q2: [Second question about {tema}]
A) [First option]
B) [Second option]
C) [Third option]
D) [Fourth option]
CORRECT: [A, B, C, or D]
EXPLANATION: [Brief explanation]
Continue for all {antall} questions about "{tema}".
Make the questions interesting and educational. Use your knowledge to create accurate, relevant questions about this topic.
Topic: {tema}
Generate {antall} questions now:
"""
def _parse_flexible_response(self, response: str, tema: str, expected_count: int) -> List[Dict[str, Any]]:
"""Parse AI response with flexible parsing for any topic"""
questions = []
# Split response into potential question blocks
lines = response.split('\n')
current_question = {}
current_options = []
for line in lines:
line = line.strip()
if not line:
continue
# Look for question patterns
if line.startswith(('Q1:', 'Q2:', 'Q3:', 'Q4:', 'Q5:')) or 'SPØRSMÅL:' in line.upper():
# Save previous question if complete
if self._is_complete_question(current_question, current_options):
current_question["alternativer"] = current_options
questions.append(current_question)
# Start new question
question_text = line.split(':', 1)[1].strip() if ':' in line else line
current_question = {"spørsmål": question_text}
current_options = []
elif line.startswith(('A)', 'B)', 'C)', 'D)')):
option = line[2:].strip()
if option:
current_options.append(option)
elif 'CORRECT:' in line.upper() or 'KORREKT:' in line.upper():
correct_part = line.upper().split('CORRECT:')[-1].split('KORREKT:')[-1].strip()
if correct_part and correct_part[0] in ['A', 'B', 'C', 'D']:
current_question["korrekt_svar"] = ['A', 'B', 'C', 'D'].index(correct_part[0])
elif 'EXPLANATION:' in line.upper() or 'FORKLARING:' in line.upper():
explanation = line.split(':')[1].strip() if ':' in line else line
current_question["forklaring"] = explanation
# Add the last question if complete
if self._is_complete_question(current_question, current_options):
current_question["alternativer"] = current_options
questions.append(current_question)
return questions
def _is_complete_question(self, question: Dict, options: List) -> bool:
"""Check if a question is complete"""
return (
"spørsmål" in question and
len(options) >= 3 and # At least 3 options
"korrekt_svar" in question and
question["korrekt_svar"] < len(options)
)
def _generate_basic_questions(self, tema: str, antall: int) -> List[Dict[str, Any]]:
"""Generate basic questions when AI fails - works with ANY topic"""
questions = []
# Generate generic but relevant questions for any topic
question_templates = [
f"Hva er det mest kjente ved {tema}?",
f"Hvilket år er viktig i historien til {tema}?",
f"Hvor finner man vanligvis {tema}?",
f"Hva karakteriserer {tema}?",
f"Hvilken betydning har {tema}?"
]
for i in range(min(antall, len(question_templates))):
questions.append({
"spørsmål": question_templates[i],
"alternativer": [
f"Alternativ A om {tema}",
f"Alternativ B om {tema}",
f"Alternativ C om {tema}",
f"Alternativ D om {tema}"
],
"korrekt_svar": 0, # Always A for simplicity
"forklaring": f"Dette er et generert spørsmål om {tema}. For mer nøyaktige spørsmål, prøv igjen - AI-systemet lærer kontinuerlig."
})
return questions
# Initialize the AI generator
quiz_generator = AIQuizGenerator()
# API endpoint for quiz generation - NO TOPIC RESTRICTIONS
def generate_quiz_api(tema: str, språk: str = "no", antall_spørsmål: int = 3,
type: str = "sted", vanskelighetsgrad: int = 3,
api_key: str = None) -> Dict[str, Any]:
"""API endpoint for quiz generation - ACCEPTS ANY TOPIC"""
# Validate API key
if not validate_api_key(api_key):
return {
"success": False,
"message": "Ugyldig API-nøkkel",
"questions": []
}
# NO TOPIC FILTERING - Accept absolutely anything
if not tema or len(tema.strip()) < 2:
return {
"success": False,
"message": "Vennligst oppgi et tema (minimum 2 tegn)",
"questions": []
}
try:
# Generate questions with AI - NO RESTRICTIONS
start_time = time.time()
questions = quiz_generator.generate_quiz(tema.strip(), antall_spørsmål, språk)
generation_time = time.time() - start_time
return {
"success": True,
"questions": questions,
"metadata": {
"generation_time": round(generation_time, 2),
"model_used": quiz_generator.default_model,
"topic": tema,
"unrestricted": True # Flag to show no restrictions
},
"message": f"Genererte {len(questions)} spørsmål om '{tema}' - ingen begrensninger!"
}
except Exception as e:
print(f"Error in generate_quiz_api: {str(e)}")
return {
"success": False,
"message": f"Feil ved generering av quiz: {str(e)}",
"questions": []
}
# Gradio interface - emphasize NO RESTRICTIONS
def generate_quiz_gradio(tema, antall, api_key=None):
"""Gradio wrapper - accepts ANY topic"""
if api_key and not validate_api_key(api_key):
return "❌ **Ugyldig API-nøkkel**"
if not tema or len(tema.strip()) < 2:
return "❌ **Vennligst skriv inn et tema**"
try:
result = generate_quiz_api(tema, "no", antall, "sted", 3, api_key)
if not result["success"]:
return f"❌ **Feil:** {result['message']}"
questions = result["questions"]
model = result["metadata"]["model_used"]
time_taken = result["metadata"]["generation_time"]
output = f"✅ **Genererte {len(questions)} spørsmål om '{tema}'**\n\n"
output += f"🤖 **Modell:** {model}\n"
output += f"⏱️ **Tid:** {time_taken}s\n"
output += f"🔓 **Ingen begrensninger** - alle temaer er tillatt!\n\n"
for i, q in enumerate(questions, 1):
output += f"📝 **Spørsmål {i}:** {q['spørsmål']}\n"
for j, alt in enumerate(q['alternativer']):
marker = "✅" if j == q['korrekt_svar'] else "❌"
output += f" {chr(65+j)}) {alt} {marker}\n"
output += f"💡 **Forklaring:** {q['forklaring']}\n\n"
return output
except Exception as e:
return f"❌ **Feil:** {str(e)}"
# Health check endpoint
def health_check():
return {"status": "healthy", "timestamp": time.time(), "unrestricted": True}
# Gradio interface - emphasize freedom
with gr.Blocks(title="SoActi AI Quiz API - Ubegrenset") as demo:
gr.Markdown("# 🧠 SoActi AI Quiz API - Ubegrenset")
gr.Markdown("**🔓 Lag quiz om ABSOLUTT HVASOM HELST - ingen begrensninger!**")
with gr.Row():
with gr.Column():
tema_input = gr.Textbox(
label="Tema (skriv hva som helst!)",
value="",
placeholder="Fotball, Harry Potter, Kvantfysikk, Baking, TikTok, Dinosaurer, Programmering, K-pop, Filosofi, Gaming..."
)
antall_input = gr.Slider(
minimum=1,
maximum=5,
step=1,
label="Antall spørsmål",
value=3
)
api_key_input = gr.Textbox(
label="API-nøkkel (for testing)",
placeholder="Skriv inn API-nøkkel...",
type="password"
)
generate_btn = gr.Button("🚀 Generer Quiz om HVASOM HELST!", variant="primary")
with gr.Column():
output = gr.Textbox(
label="Generert Quiz",
lines=20,
placeholder="Skriv inn HVILKET SOM HELST tema og klikk 'Generer Quiz'!\n\nEksempler:\n- Marvel filmer\n- Norsk rap\n- Kryptovaluta\n- Yoga\n- Sushi\n- Elon Musk\n- Klimaendringer\n- Netflix serier\n- Fotografi\n- Skateboard"
)
generate_btn.click(
fn=generate_quiz_gradio,
inputs=[tema_input, antall_input, api_key_input],
outputs=output
)
gr.Markdown("## 🔗 API for SoActi")
gr.Markdown("`POST https://Soacti-soacti-ai-quiz-api.hf.space/generate-quiz`")
gr.Markdown("**🔓 Ingen begrensninger - brukere kan spørre om hva som helst!**")
# FastAPI setup with CORS for unrestricted access
from fastapi import FastAPI, HTTPException, Depends, Header
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
app = FastAPI(title="SoActi Quiz API - Ubegrenset")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class QuizRequest(BaseModel):
tema: str # NO restrictions on what this can be
språk: str = "no"
antall_spørsmål: int = 3
type: str = "sted"
vanskelighetsgrad: int = 3
async def get_api_key(authorization: str = Header(None)):
if not authorization:
raise HTTPException(status_code=401, detail="API key missing")
parts = authorization.split()
if len(parts) != 2 or parts[0].lower() != "bearer":
raise HTTPException(status_code=401, detail="Invalid authorization header")
return parts[1]
@app.post("/generate-quiz")
async def api_generate_quiz(request: QuizRequest, api_key: str = Depends(get_api_key)):
"""Generate quiz about ANY topic - no restrictions"""
result = generate_quiz_api(
request.tema, # Accept ANY topic
request.språk,
request.antall_spørsmål,
request.type,
request.vanskelighetsgrad,
api_key
)
if not result["success"]:
raise HTTPException(status_code=400, detail=result["message"])
return result
@app.get("/health")
async def api_health():
return health_check()
# Mount Gradio
app = gr.mount_gradio_app(app, demo, path="/")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)