Spaces:
Sleeping
Sleeping
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import gradio as gr | |
import re | |
model_path = "prajjwal888/Llama-2-7b-chat-question-generation" | |
model = AutoModelForCausalLM.from_pretrained( | |
model_path, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
def parse_generated_text(text: str) -> dict: | |
clean_text = re.sub(r"\[/?INST\]", "", text) | |
clean_text = re.sub(r"Question:\s*Question:", "Question:", clean_text) | |
clean_text = clean_text.strip() | |
match = re.search(r"Question:\s*(.*?)(?:\nHint:|Hint:)(.*)", clean_text, re.DOTALL) | |
if match: | |
question = match.group(1).strip().strip('"').replace("Question:", "").strip() | |
hint = match.group(2).strip().strip('"') | |
else: | |
question = clean_text.strip() | |
hint = "No hint available" | |
return { | |
"question": question, | |
"hint": hint | |
} | |
def generate_questions(topic, difficulty, types, count): | |
print("Received input:", topic, difficulty, types, count) | |
try: | |
pipe = pipeline( | |
task="text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
max_length=200, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True | |
) | |
questions = [] | |
for _ in range(count): | |
for q_type in types: | |
prompt = ( | |
f"Generate a {difficulty} difficulty {q_type} question about {topic}.\n" | |
"Format strictly as follows:\n" | |
"Question: <your question here>\n" | |
"Hint: <your hint here or 'No hint available'>" | |
) | |
formatted_prompt = f"<s>[INST] {prompt} [/INST]" | |
print("Prompt:", formatted_prompt) | |
result = pipe(formatted_prompt) | |
print("Raw Output:", result) | |
generated_text = result[0]['generated_text'].replace(formatted_prompt, "").strip() | |
parsed = parse_generated_text(generated_text) | |
print("Parsed Output:", parsed) | |
# Safe fallback | |
if not parsed['question']: | |
parsed['question'] = "⚠️ Could not parse question." | |
if not parsed['hint']: | |
parsed['hint'] = "No hint available" | |
formatted = f"**Type**: {q_type}\n\n**Question**: {parsed['question']}\n\n**Hint**: {parsed['hint']}\n\n---" | |
questions.append(formatted) | |
final_output = "\n\n".join(questions) | |
print("Final Output:", final_output) | |
return final_output | |
except Exception as e: | |
print("Error:", e) | |
return f"❌ Something went wrong: {e}" | |
except Exception as e: | |
print("Error:", e) | |
return f"Something went wrong: {e}" | |
iface = gr.Interface( | |
fn=generate_questions, | |
inputs=[ | |
gr.Textbox(label="Topic"), | |
gr.Dropdown(choices=["easy", "medium", "hard"], label="Difficulty", value="medium"), | |
gr.CheckboxGroup(choices=["Conceptual", "Numerical", "Application"], label="Question Types"), | |
gr.Slider(minimum=1, maximum=5, step=1, value=2, label="Number of Questions per Type") | |
], | |
outputs=gr.Markdown(label="Generated Questions"), | |
title="AI Question Generator", | |
description="Enter a topic, select difficulty and question types to generate AI-powered questions." | |
) | |
if __name__ == "__main__": | |
iface.queue().launch() | |