Delete app-BACKUP.py
Browse files- app-BACKUP.py +0 -137
app-BACKUP.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from langchain.prompts import PromptTemplate
|
3 |
-
from langchain.chains import LLMChain
|
4 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
-
import fitz
|
6 |
-
import json
|
7 |
-
import docx
|
8 |
-
import os
|
9 |
-
|
10 |
-
|
11 |
-
# Title
|
12 |
-
st.title("📄 File-based MCQ Generator")
|
13 |
-
|
14 |
-
# Sidebar
|
15 |
-
st.sidebar.title("Upload & Settings")
|
16 |
-
|
17 |
-
# Upload file
|
18 |
-
uploaded_file = st.sidebar.file_uploader("Upload a file (PDF or Word)", type=["pdf", "docx"])
|
19 |
-
|
20 |
-
# Number of questions
|
21 |
-
number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5)
|
22 |
-
|
23 |
-
# Session states
|
24 |
-
if "mcqs" not in st.session_state:
|
25 |
-
st.session_state.mcqs = []
|
26 |
-
if "current_q" not in st.session_state:
|
27 |
-
st.session_state.current_q = 0
|
28 |
-
if "user_answers" not in st.session_state:
|
29 |
-
st.session_state.user_answers = {}
|
30 |
-
if "quiz_finished" not in st.session_state:
|
31 |
-
st.session_state.quiz_finished = False
|
32 |
-
|
33 |
-
# Gemini setup
|
34 |
-
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
35 |
-
llm = ChatGoogleGenerativeAI(
|
36 |
-
model="gemini-2.0-flash",
|
37 |
-
google_api_key=GOOGLE_API_KEY,
|
38 |
-
temperature=0.7
|
39 |
-
)
|
40 |
-
|
41 |
-
template = """
|
42 |
-
You are an expert MCQ generator. Generate {number} unique multiple-choice questions from the given text.
|
43 |
-
Each question must have exactly 1 correct answer and 3 incorrect options.
|
44 |
-
Strictly return output in the following JSON format (no explanations, no markdown):
|
45 |
-
|
46 |
-
[
|
47 |
-
{{
|
48 |
-
"question": "What is ...?",
|
49 |
-
"options": ["Option A", "Option B", "Option C", "Option D"],
|
50 |
-
"answer": "Option D"
|
51 |
-
}},
|
52 |
-
...
|
53 |
-
]
|
54 |
-
|
55 |
-
TEXT:
|
56 |
-
{text}
|
57 |
-
"""
|
58 |
-
|
59 |
-
|
60 |
-
prompt = PromptTemplate(
|
61 |
-
input_variables=["text", "number"],
|
62 |
-
template=template
|
63 |
-
)
|
64 |
-
|
65 |
-
mcq_chain = LLMChain(llm=llm, prompt=prompt)
|
66 |
-
|
67 |
-
# Extract text from PDF or Word
|
68 |
-
def extract_text(file):
|
69 |
-
if file.name.endswith(".pdf"):
|
70 |
-
# Read the entire file content into memory
|
71 |
-
file_bytes = file.read()
|
72 |
-
# Open the PDF from the byte stream
|
73 |
-
doc = fitz.open(stream=file_bytes, filetype="pdf")
|
74 |
-
# Extract text from all pages
|
75 |
-
text = ""
|
76 |
-
for page in doc:
|
77 |
-
text += page.get_text()
|
78 |
-
return text
|
79 |
-
elif file.name.endswith(".docx"):
|
80 |
-
doc = docx.Document(file)
|
81 |
-
return "\n".join([para.text for para in doc.paragraphs])
|
82 |
-
return ""
|
83 |
-
|
84 |
-
# Generate MCQs
|
85 |
-
if st.sidebar.button("Generate MCQs"):
|
86 |
-
if uploaded_file is None:
|
87 |
-
st.error("Please upload a file.")
|
88 |
-
else:
|
89 |
-
with st.spinner("Extracting text and generating MCQs..."):
|
90 |
-
text = extract_text(uploaded_file)
|
91 |
-
try:
|
92 |
-
response = mcq_chain.run(text=text, number=str(number_of_questions))
|
93 |
-
mcqs_json = json.loads(response[8:-3])
|
94 |
-
st.session_state.mcqs = mcqs_json
|
95 |
-
st.session_state.current_q = 0
|
96 |
-
st.session_state.user_answers = {}
|
97 |
-
st.session_state.quiz_finished = False
|
98 |
-
st.success("✅ MCQs generated successfully!")
|
99 |
-
except Exception as e:
|
100 |
-
st.error(f"Error generating MCQs: {e}")
|
101 |
-
|
102 |
-
# Display question
|
103 |
-
if st.session_state.mcqs and not st.session_state.quiz_finished:
|
104 |
-
idx = st.session_state.current_q
|
105 |
-
q_data = st.session_state.mcqs[idx]
|
106 |
-
|
107 |
-
st.subheader(f"Question {idx + 1}: {q_data['question']}")
|
108 |
-
|
109 |
-
with st.form(key=f"form_{idx}"):
|
110 |
-
selected_option = st.radio("Choose an answer:", q_data["options"], key=f"radio_{idx}")
|
111 |
-
submitted = st.form_submit_button("Next")
|
112 |
-
|
113 |
-
if submitted:
|
114 |
-
st.session_state.user_answers[idx] = selected_option
|
115 |
-
if idx < len(st.session_state.mcqs) - 1:
|
116 |
-
st.session_state.current_q += 1
|
117 |
-
else:
|
118 |
-
st.session_state.quiz_finished = True
|
119 |
-
st.success("🎉 Quiz completed!")
|
120 |
-
|
121 |
-
# Show result
|
122 |
-
if st.session_state.quiz_finished:
|
123 |
-
st.header("📊 Quiz Results")
|
124 |
-
score = 0
|
125 |
-
total = len(st.session_state.mcqs)
|
126 |
-
|
127 |
-
for i, q in enumerate(st.session_state.mcqs):
|
128 |
-
user_ans = st.session_state.user_answers.get(i)
|
129 |
-
correct_ans = q["answer"]
|
130 |
-
if user_ans == correct_ans:
|
131 |
-
score += 1
|
132 |
-
st.markdown(f"**Q{i+1}: {q['question']}**")
|
133 |
-
st.markdown(f"- Your answer: {user_ans}")
|
134 |
-
st.markdown(f"- Correct answer: {correct_ans}")
|
135 |
-
st.markdown("---")
|
136 |
-
|
137 |
-
st.success(f"✅ You scored {score} out of {total}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|