Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,399 +1,67 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import requests
|
4 |
-
import hashlib
|
5 |
-
from typing import List, Dict, Any, Optional, Mapping
|
6 |
from datetime import datetime
|
7 |
-
import json
|
8 |
-
import re
|
9 |
-
from urllib.parse import quote
|
10 |
import time
|
11 |
import random
|
12 |
import markdown
|
13 |
-
|
14 |
-
from crewai import Agent, Task, Crew, Process
|
15 |
-
from crewai.tools import BaseTool
|
16 |
from groq import Groq
|
17 |
-
import nltk
|
18 |
-
from textstat import flesch_reading_ease, flesch_kincaid_grade
|
19 |
-
from bs4 import BeautifulSoup
|
20 |
-
import concurrent.futures
|
21 |
-
from duckduckgo_search import DDGS
|
22 |
-
|
23 |
-
# Download NLTK data
|
24 |
-
try:
|
25 |
-
nltk.download('punkt', quiet=True)
|
26 |
-
nltk.download('stopwords', quiet=True)
|
27 |
-
nltk.download('wordnet', quiet=True)
|
28 |
-
except:
|
29 |
-
pass
|
30 |
-
|
31 |
-
# Custom Tools for Academic Research and Writing
|
32 |
-
class AcademicResearchTool(BaseTool):
|
33 |
-
name: str = "academic_research"
|
34 |
-
description: str = "Conduct comprehensive academic research for thesis/synopsis"
|
35 |
-
|
36 |
-
def _run(self, topic: str, research_areas: str) -> str:
|
37 |
-
try:
|
38 |
-
time.sleep(1)
|
39 |
-
search_queries = [
|
40 |
-
f"{topic} research studies",
|
41 |
-
f"{topic} academic papers",
|
42 |
-
f"{topic} recent developments",
|
43 |
-
f"{topic} methodology",
|
44 |
-
f"{topic} literature review"
|
45 |
-
]
|
46 |
-
all_research = []
|
47 |
-
with DDGS() as ddgs:
|
48 |
-
for query in search_queries:
|
49 |
-
try:
|
50 |
-
results = list(ddgs.text(query, max_results=6))
|
51 |
-
for result in results:
|
52 |
-
all_research.append({
|
53 |
-
'query': query,
|
54 |
-
'title': result.get('title', ''),
|
55 |
-
'content': result.get('body', ''),
|
56 |
-
'url': result.get('href', ''),
|
57 |
-
'relevance_score': self._calculate_relevance(result.get('body', ''), topic)
|
58 |
-
})
|
59 |
-
time.sleep(0.5)
|
60 |
-
except Exception:
|
61 |
-
continue
|
62 |
-
unique_research = self._remove_duplicates(all_research)
|
63 |
-
unique_research.sort(key=lambda x: x['relevance_score'], reverse=True)
|
64 |
-
return json.dumps(unique_research[:15])
|
65 |
-
except Exception as e:
|
66 |
-
return f"Research failed: {str(e)}"
|
67 |
-
|
68 |
-
def _calculate_relevance(self, content: str, topic: str) -> float:
|
69 |
-
topic_words = set(topic.lower().split())
|
70 |
-
content_words = set(content.lower().split())
|
71 |
-
if not topic_words or not content_words:
|
72 |
-
return 0.0
|
73 |
-
intersection = topic_words.intersection(content_words)
|
74 |
-
return len(intersection) / len(topic_words)
|
75 |
-
|
76 |
-
def _remove_duplicates(self, research_list: List[Dict]) -> List[Dict]:
|
77 |
-
seen_urls = set()
|
78 |
-
unique_research = []
|
79 |
-
for item in research_list:
|
80 |
-
if item['url'] not in seen_urls:
|
81 |
-
seen_urls.add(item['url'])
|
82 |
-
unique_research.append(item)
|
83 |
-
return unique_research
|
84 |
-
|
85 |
-
class CitationGeneratorTool(BaseTool):
|
86 |
-
name: str = "citation_generator"
|
87 |
-
description: str = "Generate proper academic citations and references"
|
88 |
-
|
89 |
-
def _run(self, research_data: str) -> str:
|
90 |
-
try:
|
91 |
-
research_items = json.loads(research_data)
|
92 |
-
citations = []
|
93 |
-
for i, item in enumerate(research_items[:10]):
|
94 |
-
title = item.get('title', 'Unknown Title')
|
95 |
-
url = item.get('url', '')
|
96 |
-
domain = url.split('/')[2] if len(url.split('/')) > 2 else 'Unknown'
|
97 |
-
citation = {
|
98 |
-
'id': f"source_{i+1}",
|
99 |
-
'title': title,
|
100 |
-
'url': url,
|
101 |
-
'domain': domain,
|
102 |
-
'apa_citation': f"{domain}. ({datetime.now().year}). {title}. Retrieved from {url}",
|
103 |
-
'in_text': f"({domain}, {datetime.now().year})"
|
104 |
-
}
|
105 |
-
citations.append(citation)
|
106 |
-
return json.dumps(citations)
|
107 |
-
except Exception as e:
|
108 |
-
return f"Citation generation failed: {str(e)}"
|
109 |
-
|
110 |
-
class AcademicWritingTool(BaseTool):
|
111 |
-
name: str = "academic_writing"
|
112 |
-
description: str = "Analyze and improve academic writing style"
|
113 |
-
|
114 |
-
def _run(self, text: str, academic_level: str) -> str:
|
115 |
-
try:
|
116 |
-
flesch_score = flesch_reading_ease(text)
|
117 |
-
fk_grade = flesch_kincaid_grade(text)
|
118 |
-
sentences = text.split('.')
|
119 |
-
sentence_lengths = [len(s.split()) for s in sentences if s.strip()]
|
120 |
-
avg_sentence_length = sum(sentence_lengths) / max(len(sentence_lengths), 1)
|
121 |
-
academic_patterns = [
|
122 |
-
"furthermore", "moreover", "additionally", "consequently",
|
123 |
-
"therefore", "thus", "hence", "accordingly", "subsequently"
|
124 |
-
]
|
125 |
-
pattern_usage = sum(1 for pattern in academic_patterns if pattern in text.lower())
|
126 |
-
level_guidelines = {
|
127 |
-
'undergraduate': {
|
128 |
-
'target_flesch': (60, 80),
|
129 |
-
'target_grade': (12, 14),
|
130 |
-
'sentence_length': (15, 25)
|
131 |
-
},
|
132 |
-
'masters': {
|
133 |
-
'target_flesch': (50, 70),
|
134 |
-
'target_grade': (14, 16),
|
135 |
-
'sentence_length': (18, 30)
|
136 |
-
},
|
137 |
-
'phd': {
|
138 |
-
'target_flesch': (40, 60),
|
139 |
-
'target_grade': (16, 18),
|
140 |
-
'sentence_length': (20, 35)
|
141 |
-
}
|
142 |
-
}
|
143 |
-
guidelines = level_guidelines.get(academic_level.lower(), level_guidelines['masters'])
|
144 |
-
analysis = {
|
145 |
-
'flesch_score': flesch_score,
|
146 |
-
'fk_grade': fk_grade,
|
147 |
-
'avg_sentence_length': avg_sentence_length,
|
148 |
-
'academic_patterns_used': pattern_usage,
|
149 |
-
'target_guidelines': guidelines,
|
150 |
-
'suggestions': []
|
151 |
-
}
|
152 |
-
if flesch_score > guidelines['target_flesch'][1]:
|
153 |
-
analysis['suggestions'].append("Consider more complex sentence structures for academic tone")
|
154 |
-
if avg_sentence_length < guidelines['sentence_length'][0]:
|
155 |
-
analysis['suggestions'].append("Use longer, more detailed sentences")
|
156 |
-
if pattern_usage < 3:
|
157 |
-
analysis['suggestions'].append("Include more academic transition phrases")
|
158 |
-
return json.dumps(analysis)
|
159 |
-
except Exception as e:
|
160 |
-
return f"Academic analysis failed: {str(e)}"
|
161 |
-
|
162 |
-
class HumanizationTool(BaseTool):
|
163 |
-
name: str = "humanization_tool"
|
164 |
-
description: str = "Make academic writing sound more human and less AI-like"
|
165 |
-
|
166 |
-
def _run(self, text: str) -> str:
|
167 |
-
try:
|
168 |
-
ai_patterns = [
|
169 |
-
"It is important to note that",
|
170 |
-
"This demonstrates that",
|
171 |
-
"This indicates that",
|
172 |
-
"As previously mentioned",
|
173 |
-
"It should be mentioned that",
|
174 |
-
"This suggests that",
|
175 |
-
"This implies that",
|
176 |
-
"It can be concluded that"
|
177 |
-
]
|
178 |
-
human_alternatives = [
|
179 |
-
"Notably,",
|
180 |
-
"This shows",
|
181 |
-
"This reveals",
|
182 |
-
"As noted earlier",
|
183 |
-
"It's worth noting",
|
184 |
-
"This suggests",
|
185 |
-
"This implies",
|
186 |
-
"Therefore,"
|
187 |
-
]
|
188 |
-
humanized_text = text
|
189 |
-
for ai_pattern, human_alt in zip(ai_patterns, human_alternatives):
|
190 |
-
humanized_text = humanized_text.replace(ai_pattern, human_alt)
|
191 |
-
variations = [
|
192 |
-
"Interestingly,",
|
193 |
-
"Surprisingly,",
|
194 |
-
"Remarkably,",
|
195 |
-
"Significantly,",
|
196 |
-
"Importantly,"
|
197 |
-
]
|
198 |
-
sentences = humanized_text.split('.')
|
199 |
-
for i in range(1, len(sentences), 3):
|
200 |
-
if i < len(sentences) and sentences[i].strip():
|
201 |
-
variation = random.choice(variations)
|
202 |
-
sentences[i] = f" {variation} {sentences[i].lstrip()}"
|
203 |
-
humanized_text = '.'.join(sentences)
|
204 |
-
personal_insights = [
|
205 |
-
"Based on the available evidence,",
|
206 |
-
"From the research findings,",
|
207 |
-
"Considering the data,",
|
208 |
-
"In light of these results,"
|
209 |
-
]
|
210 |
-
if len(sentences) > 5:
|
211 |
-
insight = random.choice(personal_insights)
|
212 |
-
sentences[2] = f" {insight} {sentences[2].lstrip()}"
|
213 |
-
return '.'.join(sentences)
|
214 |
-
except Exception as e:
|
215 |
-
return f"Humanization failed: {str(e)}"
|
216 |
-
|
217 |
-
def rate_limit_handler(max_retries=3, base_delay=2):
|
218 |
-
def decorator(func):
|
219 |
-
def wrapper(*args, **kwargs):
|
220 |
-
for attempt in range(max_retries):
|
221 |
-
try:
|
222 |
-
return func(*args, **kwargs)
|
223 |
-
except Exception as e:
|
224 |
-
if "rate_limit" in str(e).lower() and attempt < max_retries - 1:
|
225 |
-
delay = base_delay * (2 ** attempt) + random.uniform(0, 1)
|
226 |
-
st.warning(f"Rate limit hit. Retrying in {delay:.1f} seconds... (Attempt {attempt + 1}/{max_retries})")
|
227 |
-
time.sleep(delay)
|
228 |
-
else:
|
229 |
-
raise e
|
230 |
-
return None
|
231 |
-
return wrapper
|
232 |
-
return decorator
|
233 |
-
|
234 |
-
# Custom LLM class for CrewAI with built-in API
|
235 |
-
import litellm
|
236 |
-
from langchain.llms.base import LLM
|
237 |
-
|
238 |
-
class BuiltInLLM(LLM):
|
239 |
-
model_name: str = "groq/llama-3.3-70b-versatile"
|
240 |
-
api_key: str = "API_KEY" # <-- Replace with your actual API key
|
241 |
|
242 |
-
|
243 |
-
super().__init__()
|
244 |
-
os.environ["GROQ_API_KEY"] = self.api_key
|
245 |
-
litellm.set_verbose = False
|
246 |
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
research_agent = Agent(
|
280 |
-
role="Academic Research Specialist",
|
281 |
-
goal="Conduct comprehensive academic research and gather credible sources",
|
282 |
-
backstory="You are a PhD-level researcher with expertise in finding and analyzing academic sources. You understand how to identify credible information and synthesize research findings.",
|
283 |
-
tools=[AcademicResearchTool()],
|
284 |
-
verbose=True,
|
285 |
-
allow_delegation=False,
|
286 |
-
llm=llm
|
287 |
-
)
|
288 |
-
thesis_writer = Agent(
|
289 |
-
role="Academic Thesis Writer",
|
290 |
-
goal="Write sophisticated thesis documents that sound completely human-written",
|
291 |
-
backstory="You are an experienced academic writer who specializes in creating thesis documents. You know how to write in a way that sounds natural and scholarly, avoiding AI patterns while maintaining academic rigor.",
|
292 |
-
tools=[AcademicWritingTool(), CitationGeneratorTool()],
|
293 |
-
verbose=True,
|
294 |
-
allow_delegation=False,
|
295 |
-
llm=llm
|
296 |
-
)
|
297 |
-
humanization_agent = Agent(
|
298 |
-
role="Academic Writing Humanizer",
|
299 |
-
goal="Make academic writing sound completely human and undetectable",
|
300 |
-
backstory="You are an expert editor who specializes in making academic content sound natural and human-written. You know how to eliminate AI patterns and create authentic scholarly writing.",
|
301 |
-
tools=[HumanizationTool()],
|
302 |
-
verbose=True,
|
303 |
-
allow_delegation=False,
|
304 |
-
llm=llm
|
305 |
-
)
|
306 |
-
return research_agent, thesis_writer, humanization_agent
|
307 |
-
|
308 |
-
def create_thesis_tasks(topic, document_type, academic_level, research_areas, word_count, agents):
|
309 |
-
research_agent, thesis_writer, humanization_agent = agents
|
310 |
-
research_task = Task(
|
311 |
-
description=f"""
|
312 |
-
Conduct comprehensive academic research for a {document_type} on "{topic}".
|
313 |
-
Research Areas: {research_areas}
|
314 |
-
Academic Level: {academic_level}
|
315 |
-
Target Length: {word_count} words
|
316 |
-
Requirements:
|
317 |
-
- Find 10-15 credible academic sources
|
318 |
-
- Gather recent research and developments
|
319 |
-
- Identify key theories and methodologies
|
320 |
-
- Note different perspectives and debates
|
321 |
-
- Focus on peer-reviewed and scholarly sources
|
322 |
-
- Include both theoretical and practical aspects
|
323 |
-
Provide a detailed research summary with key findings, methodologies, and source analysis.
|
324 |
-
""",
|
325 |
-
agent=research_agent,
|
326 |
-
expected_output="Comprehensive research summary with credible sources and key insights"
|
327 |
-
)
|
328 |
-
thesis_task = Task(
|
329 |
-
description=f"""
|
330 |
-
Write a complete {document_type} on "{topic}" that sounds completely human-written.
|
331 |
-
Academic Level: {academic_level}
|
332 |
-
Target Length: {word_count} words
|
333 |
-
Research Areas: {research_areas}
|
334 |
-
Requirements:
|
335 |
-
- Use the comprehensive research provided
|
336 |
-
- Write in proper academic style for {academic_level} level
|
337 |
-
- Include proper citations and references
|
338 |
-
- Create logical structure with introduction, body, and conclusion
|
339 |
-
- Use varied sentence structures and academic vocabulary
|
340 |
-
- Include critical analysis and original insights
|
341 |
-
- Maintain scholarly tone while sounding natural
|
342 |
-
- Avoid AI-like patterns and formal robotic language
|
343 |
-
- Include methodology, findings, and implications
|
344 |
-
- Make it engaging and intellectually rigorous
|
345 |
-
Structure:
|
346 |
-
1. Introduction and background
|
347 |
-
2. Literature review
|
348 |
-
3. Methodology
|
349 |
-
4. Analysis and findings
|
350 |
-
5. Discussion and implications
|
351 |
-
6. Conclusion and recommendations
|
352 |
-
Important: Write as if you're a human academic expert sharing original research and insights.
|
353 |
-
""",
|
354 |
-
agent=thesis_writer,
|
355 |
-
expected_output="Complete academic thesis document with proper structure and citations",
|
356 |
-
dependencies=[research_task]
|
357 |
-
)
|
358 |
-
humanization_task = Task(
|
359 |
-
description=f"""
|
360 |
-
Polish and humanize the thesis document to make it completely undetectable as AI-written.
|
361 |
-
Requirements:
|
362 |
-
- Remove any remaining AI patterns
|
363 |
-
- Improve natural academic flow
|
364 |
-
- Add authentic human writing touches
|
365 |
-
- Ensure varied sentence structures
|
366 |
-
- Make transitions feel natural and scholarly
|
367 |
-
- Add subtle personal insights and critical thinking
|
368 |
-
- Maintain academic rigor while sounding human
|
369 |
-
- Improve readability without losing sophistication
|
370 |
-
- Ensure proper citation integration
|
371 |
-
- Make it sound like expert human academic writing
|
372 |
-
Focus on making it indistinguishable from high-quality human academic writing.
|
373 |
-
""",
|
374 |
-
agent=humanization_agent,
|
375 |
-
expected_output="Final polished human-sounding academic thesis document",
|
376 |
-
dependencies=[thesis_task]
|
377 |
-
)
|
378 |
-
return [research_task, thesis_task, humanization_task]
|
379 |
-
|
380 |
-
def run_thesis_writer(topic, document_type, academic_level, research_areas, word_count):
|
381 |
try:
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
|
|
390 |
)
|
391 |
-
|
392 |
-
result = crew.kickoff()
|
393 |
-
return result
|
394 |
except Exception as e:
|
395 |
-
st.error(f"Error
|
396 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
|
398 |
def main():
|
399 |
st.set_page_config(
|
@@ -465,12 +133,19 @@ def main():
|
|
465 |
placeholder="Specific methodology, theoretical framework, case studies, etc...",
|
466 |
height=100
|
467 |
)
|
|
|
|
|
|
|
|
|
|
|
468 |
if st.button("π Generate Thesis Document", type="primary", use_container_width=True):
|
469 |
if not topic.strip():
|
470 |
st.error("Please enter a thesis topic!")
|
|
|
|
|
471 |
else:
|
472 |
research_areas_text = research_areas if research_areas.strip() else "general academic research"
|
473 |
-
result = run_thesis_writer(topic, document_type, academic_level, research_areas_text, word_count)
|
474 |
if result:
|
475 |
st.session_state.generated_thesis = result
|
476 |
st.session_state.thesis_info = {
|
@@ -554,4 +229,4 @@ def main():
|
|
554 |
st.info("π Enter a thesis topic and click 'Generate Thesis Document' to create your academic content")
|
555 |
|
556 |
if __name__ == "__main__":
|
557 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import requests
|
|
|
|
|
4 |
from datetime import datetime
|
|
|
|
|
|
|
5 |
import time
|
6 |
import random
|
7 |
import markdown
|
|
|
|
|
|
|
8 |
from groq import Groq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
# --- Streamlit UI and Groq API Integration ---
|
|
|
|
|
|
|
11 |
|
12 |
+
def build_thesis_prompt(topic, document_type, academic_level, research_areas, word_count, additional_requirements):
|
13 |
+
prompt = f"""
|
14 |
+
You are an expert academic writer. Write a complete {document_type} on the topic: "{topic}".
|
15 |
+
Academic Level: {academic_level}
|
16 |
+
Target Length: {word_count} words
|
17 |
+
Research Areas: {research_areas}
|
18 |
+
"""
|
19 |
+
if additional_requirements and additional_requirements.strip():
|
20 |
+
prompt += f"\nAdditional Requirements: {additional_requirements.strip()}\n"
|
21 |
+
prompt += """
|
22 |
+
Requirements:
|
23 |
+
- Use credible academic sources and reference them in-text (APA style, e.g., (Author, Year)).
|
24 |
+
- Write in proper academic style for the specified level.
|
25 |
+
- Create logical structure with introduction, body, and conclusion.
|
26 |
+
- Use varied sentence structures and academic vocabulary.
|
27 |
+
- Include critical analysis and original insights.
|
28 |
+
- Maintain scholarly tone while sounding natural and human.
|
29 |
+
- Avoid AI-like patterns and robotic language.
|
30 |
+
- Include methodology, findings, and implications if relevant.
|
31 |
+
- Make it engaging and intellectually rigorous.
|
32 |
+
Structure:
|
33 |
+
1. Introduction and background
|
34 |
+
2. Literature review
|
35 |
+
3. Methodology
|
36 |
+
4. Analysis and findings
|
37 |
+
5. Discussion and implications
|
38 |
+
6. Conclusion and recommendations
|
39 |
+
Important: Write as if you're a human academic expert sharing original research and insights. Make it indistinguishable from human writing.\n\nBegin the document below:\n\n"
|
40 |
+
return prompt
|
41 |
+
|
42 |
+
def call_groq_llama(prompt, api_key, model_name="llama3-70b-8192"): # Use the correct Groq model name
|
43 |
+
client = Groq(api_key=api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
try:
|
45 |
+
response = client.chat.completions.create(
|
46 |
+
model=model_name,
|
47 |
+
messages=[
|
48 |
+
{"role": "system", "content": "You are an expert academic writer who creates sophisticated, well-researched thesis documents that sound completely human-written. You avoid AI patterns and create authentic academic content with proper citations and natural flow."},
|
49 |
+
{"role": "user", "content": prompt}
|
50 |
+
],
|
51 |
+
max_tokens=2500,
|
52 |
+
temperature=0.6,
|
53 |
+
top_p=0.9
|
54 |
)
|
55 |
+
return response.choices[0].message.content
|
|
|
|
|
56 |
except Exception as e:
|
57 |
+
st.error(f"Error from Groq API: {str(e)}")
|
58 |
+
return f"Error: {str(e)}"
|
59 |
+
|
60 |
+
def run_thesis_writer(topic, document_type, academic_level, research_areas, word_count, additional_requirements, api_key):
|
61 |
+
prompt = build_thesis_prompt(topic, document_type, academic_level, research_areas, word_count, additional_requirements)
|
62 |
+
with st.spinner("Generating your thesis document with Groq Llama-3..."):
|
63 |
+
result = call_groq_llama(prompt, api_key)
|
64 |
+
return result
|
65 |
|
66 |
def main():
|
67 |
st.set_page_config(
|
|
|
133 |
placeholder="Specific methodology, theoretical framework, case studies, etc...",
|
134 |
height=100
|
135 |
)
|
136 |
+
api_key = st.text_input(
|
137 |
+
"Enter your Groq API Key",
|
138 |
+
type="password",
|
139 |
+
help="Your API key is used only to generate your document and is never stored."
|
140 |
+
)
|
141 |
if st.button("π Generate Thesis Document", type="primary", use_container_width=True):
|
142 |
if not topic.strip():
|
143 |
st.error("Please enter a thesis topic!")
|
144 |
+
elif not api_key.strip():
|
145 |
+
st.error("Please enter your Groq API key!")
|
146 |
else:
|
147 |
research_areas_text = research_areas if research_areas.strip() else "general academic research"
|
148 |
+
result = run_thesis_writer(topic, document_type, academic_level, research_areas_text, word_count, additional_requirements, api_key)
|
149 |
if result:
|
150 |
st.session_state.generated_thesis = result
|
151 |
st.session_state.thesis_info = {
|
|
|
229 |
st.info("π Enter a thesis topic and click 'Generate Thesis Document' to create your academic content")
|
230 |
|
231 |
if __name__ == "__main__":
|
232 |
+
main()
|