Naz786 commited on
Commit
2dab4a2
Β·
verified Β·
1 Parent(s): 6b7df50

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +712 -0
app.py ADDED
@@ -0,0 +1,712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import requests
4
+ import hashlib
5
+ from typing import List, Dict, Any
6
+ from datetime import datetime
7
+ import json
8
+ import re
9
+ from urllib.parse import quote
10
+ import time
11
+ import random
12
+ import markdown
13
+
14
+ # Import required libraries
15
+ from crewai import Agent, Task, Crew, Process
16
+ from crewai.tools import BaseTool
17
+ from groq import Groq
18
+ import nltk
19
+ from textstat import flesch_reading_ease, flesch_kincaid_grade
20
+ from bs4 import BeautifulSoup
21
+ import concurrent.futures
22
+ from duckduckgo_search import DDGS
23
+
24
+ # Download NLTK data
25
+ try:
26
+ nltk.download('punkt', quiet=True)
27
+ nltk.download('stopwords', quiet=True)
28
+ nltk.download('wordnet', quiet=True)
29
+ except:
30
+ pass
31
+
32
+ # Custom Tools for Academic Research and Writing
33
+ class AcademicResearchTool(BaseTool):
34
+ name: str = "academic_research"
35
+ description: str = "Conduct comprehensive academic research for thesis/synopsis"
36
+
37
+ def _run(self, topic: str, research_areas: str) -> str:
38
+ """Conduct thorough academic research"""
39
+ try:
40
+ time.sleep(1)
41
+
42
+ # Create multiple search queries for comprehensive research
43
+ search_queries = [
44
+ f"{topic} research studies",
45
+ f"{topic} academic papers",
46
+ f"{topic} recent developments",
47
+ f"{topic} methodology",
48
+ f"{topic} literature review"
49
+ ]
50
+
51
+ all_research = []
52
+
53
+ with DDGS() as ddgs:
54
+ for query in search_queries:
55
+ try:
56
+ results = list(ddgs.text(query, max_results=6))
57
+ for result in results:
58
+ all_research.append({
59
+ 'query': query,
60
+ 'title': result.get('title', ''),
61
+ 'content': result.get('body', ''),
62
+ 'url': result.get('href', ''),
63
+ 'relevance_score': self._calculate_relevance(result.get('body', ''), topic)
64
+ })
65
+ time.sleep(0.5) # Rate limiting
66
+ except Exception as e:
67
+ continue
68
+
69
+ # Sort by relevance and remove duplicates
70
+ unique_research = self._remove_duplicates(all_research)
71
+ unique_research.sort(key=lambda x: x['relevance_score'], reverse=True)
72
+
73
+ return json.dumps(unique_research[:15]) # Top 15 most relevant sources
74
+ except Exception as e:
75
+ return f"Research failed: {str(e)}"
76
+
77
+ def _calculate_relevance(self, content: str, topic: str) -> float:
78
+ """Calculate relevance score for research content"""
79
+ topic_words = set(topic.lower().split())
80
+ content_words = set(content.lower().split())
81
+
82
+ if not topic_words or not content_words:
83
+ return 0.0
84
+
85
+ intersection = topic_words.intersection(content_words)
86
+ return len(intersection) / len(topic_words)
87
+
88
+ def _remove_duplicates(self, research_list: List[Dict]) -> List[Dict]:
89
+ """Remove duplicate research entries"""
90
+ seen_urls = set()
91
+ unique_research = []
92
+
93
+ for item in research_list:
94
+ if item['url'] not in seen_urls:
95
+ seen_urls.add(item['url'])
96
+ unique_research.append(item)
97
+
98
+ return unique_research
99
+
100
+ class CitationGeneratorTool(BaseTool):
101
+ name: str = "citation_generator"
102
+ description: str = "Generate proper academic citations and references"
103
+
104
+ def _run(self, research_data: str) -> str:
105
+ """Generate academic citations from research data"""
106
+ try:
107
+ research_items = json.loads(research_data)
108
+ citations = []
109
+
110
+ for i, item in enumerate(research_items[:10]): # Top 10 sources
111
+ # Generate citation in APA format
112
+ title = item.get('title', 'Unknown Title')
113
+ url = item.get('url', '')
114
+
115
+ # Extract domain for author/organization
116
+ domain = url.split('/')[2] if len(url.split('/')) > 2 else 'Unknown'
117
+
118
+ citation = {
119
+ 'id': f"source_{i+1}",
120
+ 'title': title,
121
+ 'url': url,
122
+ 'domain': domain,
123
+ 'apa_citation': f"{domain}. ({datetime.now().year}). {title}. Retrieved from {url}",
124
+ 'in_text': f"({domain}, {datetime.now().year})"
125
+ }
126
+ citations.append(citation)
127
+
128
+ return json.dumps(citations)
129
+ except Exception as e:
130
+ return f"Citation generation failed: {str(e)}"
131
+
132
+ class AcademicWritingTool(BaseTool):
133
+ name: str = "academic_writing"
134
+ description: str = "Analyze and improve academic writing style"
135
+
136
+ def _run(self, text: str, academic_level: str) -> str:
137
+ """Analyze academic writing quality and suggest improvements"""
138
+ try:
139
+ # Calculate academic writing metrics
140
+ flesch_score = flesch_reading_ease(text)
141
+ fk_grade = flesch_kincaid_grade(text)
142
+
143
+ # Analyze sentence structure
144
+ sentences = text.split('.')
145
+ sentence_lengths = [len(s.split()) for s in sentences if s.strip()]
146
+ avg_sentence_length = sum(sentence_lengths) / max(len(sentence_lengths), 1)
147
+
148
+ # Check for academic writing patterns
149
+ academic_patterns = [
150
+ "furthermore", "moreover", "additionally", "consequently",
151
+ "therefore", "thus", "hence", "accordingly", "subsequently"
152
+ ]
153
+
154
+ pattern_usage = sum(1 for pattern in academic_patterns if pattern in text.lower())
155
+
156
+ # Academic level guidelines
157
+ level_guidelines = {
158
+ 'undergraduate': {
159
+ 'target_flesch': 60-80,
160
+ 'target_grade': 12-14,
161
+ 'sentence_length': 15-25
162
+ },
163
+ 'masters': {
164
+ 'target_flesch': 50-70,
165
+ 'target_grade': 14-16,
166
+ 'sentence_length': 18-30
167
+ },
168
+ 'phd': {
169
+ 'target_flesch': 40-60,
170
+ 'target_grade': 16-18,
171
+ 'sentence_length': 20-35
172
+ }
173
+ }
174
+
175
+ guidelines = level_guidelines.get(academic_level, level_guidelines['masters'])
176
+
177
+ analysis = {
178
+ 'flesch_score': flesch_score,
179
+ 'fk_grade': fk_grade,
180
+ 'avg_sentence_length': avg_sentence_length,
181
+ 'academic_patterns_used': pattern_usage,
182
+ 'target_guidelines': guidelines,
183
+ 'suggestions': []
184
+ }
185
+
186
+ # Generate suggestions
187
+ if flesch_score > guidelines['target_flesch'][1]:
188
+ analysis['suggestions'].append("Consider more complex sentence structures for academic tone")
189
+ if avg_sentence_length < guidelines['sentence_length'][0]:
190
+ analysis['suggestions'].append("Use longer, more detailed sentences")
191
+ if pattern_usage < 3:
192
+ analysis['suggestions'].append("Include more academic transition phrases")
193
+
194
+ return json.dumps(analysis)
195
+ except Exception as e:
196
+ return f"Academic analysis failed: {str(e)}"
197
+
198
+ class HumanizationTool(BaseTool):
199
+ name: str = "humanization_tool"
200
+ description: str = "Make academic writing sound more human and less AI-like"
201
+
202
+ def _run(self, text: str) -> str:
203
+ """Apply humanization techniques to academic writing"""
204
+ try:
205
+ # Common AI patterns in academic writing
206
+ ai_patterns = [
207
+ "It is important to note that",
208
+ "This demonstrates that",
209
+ "This indicates that",
210
+ "As previously mentioned",
211
+ "It should be mentioned that",
212
+ "This suggests that",
213
+ "This implies that",
214
+ "It can be concluded that"
215
+ ]
216
+
217
+ # Human alternatives
218
+ human_alternatives = [
219
+ "Notably,",
220
+ "This shows",
221
+ "This reveals",
222
+ "As noted earlier",
223
+ "It's worth noting",
224
+ "This suggests",
225
+ "This implies",
226
+ "Therefore,"
227
+ ]
228
+
229
+ # Apply replacements
230
+ humanized_text = text
231
+ for ai_pattern, human_alt in zip(ai_patterns, human_alternatives):
232
+ humanized_text = humanized_text.replace(ai_pattern, human_alt)
233
+
234
+ # Add natural variations
235
+ variations = [
236
+ "Interestingly,",
237
+ "Surprisingly,",
238
+ "Remarkably,",
239
+ "Significantly,",
240
+ "Importantly,"
241
+ ]
242
+
243
+ # Insert variations at appropriate places
244
+ sentences = humanized_text.split('.')
245
+ for i in range(1, len(sentences), 3): # Every 3rd sentence
246
+ if i < len(sentences) and sentences[i].strip():
247
+ variation = random.choice(variations)
248
+ sentences[i] = f" {variation} {sentences[i].lstrip()}"
249
+
250
+ humanized_text = '.'.join(sentences)
251
+
252
+ # Add personal insights (subtle)
253
+ personal_insights = [
254
+ "Based on the available evidence,",
255
+ "From the research findings,",
256
+ "Considering the data,",
257
+ "In light of these results,"
258
+ ]
259
+
260
+ # Insert personal insights
261
+ if len(sentences) > 5:
262
+ insight = random.choice(personal_insights)
263
+ sentences[2] = f" {insight} {sentences[2].lstrip()}"
264
+
265
+ return '.'.join(sentences)
266
+ except Exception as e:
267
+ return f"Humanization failed: {str(e)}"
268
+
269
+ # Rate limit handling decorator
270
+ def rate_limit_handler(max_retries=3, base_delay=2):
271
+ def decorator(func):
272
+ def wrapper(*args, **kwargs):
273
+ for attempt in range(max_retries):
274
+ try:
275
+ return func(*args, **kwargs)
276
+ except Exception as e:
277
+ if "rate_limit" in str(e).lower() and attempt < max_retries - 1:
278
+ delay = base_delay * (2 ** attempt) + random.uniform(0, 1)
279
+ st.warning(f"Rate limit hit. Retrying in {delay:.1f} seconds... (Attempt {attempt + 1}/{max_retries})")
280
+ time.sleep(delay)
281
+ else:
282
+ raise e
283
+ return None
284
+ return wrapper
285
+ return decorator
286
+
287
+ # Custom LLM class for CrewAI with built-in API
288
+ import os
289
+ from langchain.llms.base import LLM
290
+ from typing import Optional, List, Mapping, Any
291
+ import litellm
292
+
293
+ class BuiltInLLM(LLM):
294
+ model_name: str = "groq/llama-3.3-70b-versatile"
295
+
296
+ def __init__(self):
297
+ super().__init__()
298
+ # Built-in API key (you can replace this with your own)
299
+ self.api_key = "API_KEY" # Replace with actual key
300
+ os.environ["GROQ_API_KEY"] = self.api_key
301
+ litellm.set_verbose = False
302
+
303
+ @property
304
+ def _llm_type(self) -> str:
305
+ return "groq"
306
+
307
+ @rate_limit_handler(max_retries=3, base_delay=2)
308
+ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
309
+ """Call API with rate limiting"""
310
+ try:
311
+ # Handle longer prompts for thesis writing
312
+ if len(prompt.split()) > 1500:
313
+ words = prompt.split()
314
+ prompt = ' '.join(words[:1500]) + "..."
315
+
316
+ response = litellm.completion(
317
+ model=self.model_name,
318
+ messages=[
319
+ {"role": "system", "content": "You are an expert academic writer who creates sophisticated, well-researched thesis documents that sound completely human-written. You avoid AI patterns and create authentic academic content with proper citations and natural flow."},
320
+ {"role": "user", "content": prompt}
321
+ ],
322
+ max_tokens=2500,
323
+ temperature=0.6, # Balanced creativity and consistency
324
+ top_p=0.9,
325
+ api_key=self.api_key
326
+ )
327
+
328
+ time.sleep(2)
329
+ return response.choices[0].message.content
330
+ except Exception as e:
331
+ st.error(f"Error in processing: {str(e)}")
332
+ return f"Error: {str(e)}"
333
+
334
+ @property
335
+ def _identifying_params(self) -> Mapping[str, Any]:
336
+ return {"model_name": self.model_name}
337
+
338
+ # Specialized agents for thesis writing
339
+ def create_thesis_agents(llm):
340
+ """Create specialized agents for thesis/synopsis writing"""
341
+
342
+ # Research Agent
343
+ research_agent = Agent(
344
+ role="Academic Research Specialist",
345
+ goal="Conduct comprehensive academic research and gather credible sources",
346
+ backstory="You are a PhD-level researcher with expertise in finding and analyzing academic sources. You understand how to identify credible information and synthesize research findings.",
347
+ tools=[AcademicResearchTool()],
348
+ verbose=True,
349
+ allow_delegation=False,
350
+ llm=llm
351
+ )
352
+
353
+ # Thesis Writer Agent
354
+ thesis_writer = Agent(
355
+ role="Academic Thesis Writer",
356
+ goal="Write sophisticated thesis documents that sound completely human-written",
357
+ backstory="You are an experienced academic writer who specializes in creating thesis documents. You know how to write in a way that sounds natural and scholarly, avoiding AI patterns while maintaining academic rigor.",
358
+ tools=[AcademicWritingTool(), CitationGeneratorTool()],
359
+ verbose=True,
360
+ allow_delegation=False,
361
+ llm=llm
362
+ )
363
+
364
+ # Humanization Agent
365
+ humanization_agent = Agent(
366
+ role="Academic Writing Humanizer",
367
+ goal="Make academic writing sound completely human and undetectable",
368
+ backstory="You are an expert editor who specializes in making academic content sound natural and human-written. You know how to eliminate AI patterns and create authentic scholarly writing.",
369
+ tools=[HumanizationTool()],
370
+ verbose=True,
371
+ allow_delegation=False,
372
+ llm=llm
373
+ )
374
+
375
+ return research_agent, thesis_writer, humanization_agent
376
+
377
+ def create_thesis_tasks(topic, document_type, academic_level, research_areas, word_count, agents):
378
+ """Create tasks for thesis/synopsis writing"""
379
+ research_agent, thesis_writer, humanization_agent = agents
380
+
381
+ # Task 1: Comprehensive Research
382
+ research_task = Task(
383
+ description=f"""
384
+ Conduct comprehensive academic research for a {document_type} on "{topic}".
385
+
386
+ Research Areas: {research_areas}
387
+ Academic Level: {academic_level}
388
+ Target Length: {word_count} words
389
+
390
+ Requirements:
391
+ - Find 10-15 credible academic sources
392
+ - Gather recent research and developments
393
+ - Identify key theories and methodologies
394
+ - Note different perspectives and debates
395
+ - Focus on peer-reviewed and scholarly sources
396
+ - Include both theoretical and practical aspects
397
+
398
+ Provide a detailed research summary with key findings, methodologies, and source analysis.
399
+ """,
400
+ agent=research_agent,
401
+ expected_output="Comprehensive research summary with credible sources and key insights"
402
+ )
403
+
404
+ # Task 2: Thesis Writing
405
+ thesis_task = Task(
406
+ description=f"""
407
+ Write a complete {document_type} on "{topic}" that sounds completely human-written.
408
+
409
+ Academic Level: {academic_level}
410
+ Target Length: {word_count} words
411
+ Research Areas: {research_areas}
412
+
413
+ Requirements:
414
+ - Use the comprehensive research provided
415
+ - Write in proper academic style for {academic_level} level
416
+ - Include proper citations and references
417
+ - Create logical structure with introduction, body, and conclusion
418
+ - Use varied sentence structures and academic vocabulary
419
+ - Include critical analysis and original insights
420
+ - Maintain scholarly tone while sounding natural
421
+ - Avoid AI-like patterns and formal robotic language
422
+ - Include methodology, findings, and implications
423
+ - Make it engaging and intellectually rigorous
424
+
425
+ Structure:
426
+ 1. Introduction and background
427
+ 2. Literature review
428
+ 3. Methodology
429
+ 4. Analysis and findings
430
+ 5. Discussion and implications
431
+ 6. Conclusion and recommendations
432
+
433
+ Important: Write as if you're a human academic expert sharing original research and insights.
434
+ """,
435
+ agent=thesis_writer,
436
+ expected_output="Complete academic thesis document with proper structure and citations",
437
+ dependencies=[research_task]
438
+ )
439
+
440
+ # Task 3: Humanization and Polish
441
+ humanization_task = Task(
442
+ description=f"""
443
+ Polish and humanize the thesis document to make it completely undetectable as AI-written.
444
+
445
+ Requirements:
446
+ - Remove any remaining AI patterns
447
+ - Improve natural academic flow
448
+ - Add authentic human writing touches
449
+ - Ensure varied sentence structures
450
+ - Make transitions feel natural and scholarly
451
+ - Add subtle personal insights and critical thinking
452
+ - Maintain academic rigor while sounding human
453
+ - Improve readability without losing sophistication
454
+ - Ensure proper citation integration
455
+ - Make it sound like expert human academic writing
456
+
457
+ Focus on making it indistinguishable from high-quality human academic writing.
458
+ """,
459
+ agent=humanization_agent,
460
+ expected_output="Final polished human-sounding academic thesis document",
461
+ dependencies=[thesis_task]
462
+ )
463
+
464
+ return [research_task, thesis_task, humanization_task]
465
+
466
+ def run_thesis_writer(topic, document_type, academic_level, research_areas, word_count):
467
+ """Run the thesis writing process"""
468
+ try:
469
+ # Initialize LLM
470
+ llm = BuiltInLLM()
471
+
472
+ # Create agents
473
+ agents = create_thesis_agents(llm)
474
+
475
+ # Create tasks
476
+ tasks = create_thesis_tasks(topic, document_type, academic_level, research_areas, word_count, agents)
477
+
478
+ # Create crew
479
+ crew = Crew(
480
+ agents=list(agents),
481
+ tasks=tasks,
482
+ process=Process.sequential,
483
+ verbose=True
484
+ )
485
+
486
+ # Execute with progress tracking
487
+ with st.spinner("Creating comprehensive thesis document with AI agents..."):
488
+ result = crew.kickoff()
489
+
490
+ return result
491
+ except Exception as e:
492
+ st.error(f"Error in thesis writing: {str(e)}")
493
+ return None
494
+
495
+ # Streamlit UI
496
+ def main():
497
+ st.set_page_config(
498
+ page_title="Thesis Writer Bot - Academic Document Creator",
499
+ page_icon="πŸŽ“",
500
+ layout="wide"
501
+ )
502
+
503
+ st.title("πŸŽ“ Thesis Writer Bot")
504
+ st.markdown("*Create sophisticated, human-like thesis and synopsis documents that pass any AI detection*")
505
+
506
+ # Sidebar configuration
507
+ with st.sidebar:
508
+ st.header("ℹ️ About")
509
+
510
+ st.success("βœ… Ready to generate your thesis!")
511
+
512
+ st.markdown("---")
513
+ st.markdown("### 🎯 What This Tool Does")
514
+ st.markdown("- Creates original, human-like thesis documents")
515
+ st.markdown("- Conducts comprehensive academic research")
516
+ st.markdown("- Generates proper citations and references")
517
+ st.markdown("- Ensures content passes AI detection")
518
+ st.markdown("- No plagiarism - completely original content")
519
+
520
+ st.markdown("---")
521
+ st.markdown("### πŸ“š Document Types")
522
+ st.markdown("- **Thesis**: Complete research thesis")
523
+ st.markdown("- **Synopsis**: Research proposal/synopsis")
524
+ st.markdown("- **Dissertation**: PhD-level document")
525
+ st.markdown("- **Research Paper**: Academic paper")
526
+ st.markdown("- **Literature Review**: Comprehensive review")
527
+
528
+ st.markdown("---")
529
+ st.markdown("### πŸŽ“ Academic Levels")
530
+ st.markdown("- **Undergraduate**: Bachelor's level")
531
+ st.markdown("- **Masters**: Graduate level")
532
+ st.markdown("- **PhD**: Doctoral level")
533
+
534
+ st.markdown("---")
535
+ st.markdown("### πŸ”₯ Features")
536
+ st.markdown("- **No Plagiarism**: Original research")
537
+ st.markdown("- **Human-like**: Natural academic writing")
538
+ st.markdown("- **AI Undetectable**: Passes detection")
539
+ st.markdown("- **Proper Citations**: Academic references")
540
+ st.markdown("- **Research-based**: Credible sources")
541
+ st.markdown("- **No Word Limits**: Any length needed")
542
+
543
+ # Main content area
544
+ col1, col2 = st.columns([1, 1])
545
+
546
+ with col1:
547
+ st.header("πŸ“ Thesis Request")
548
+
549
+ # Topic input
550
+ topic = st.text_input(
551
+ "What is your thesis/synopsis topic?",
552
+ placeholder="e.g., Impact of artificial intelligence on healthcare delivery systems"
553
+ )
554
+
555
+ # Document type selection
556
+ document_types = [
557
+ "Thesis", "Synopsis", "Dissertation", "Research Paper",
558
+ "Literature Review", "Research Proposal", "Academic Report"
559
+ ]
560
+ document_type = st.selectbox("Document Type", document_types)
561
+
562
+ # Academic level
563
+ academic_levels = ["Undergraduate", "Masters", "PhD"]
564
+ academic_level = st.selectbox("Academic Level", academic_levels)
565
+
566
+ # Research areas
567
+ research_areas = st.text_area(
568
+ "Specific Research Areas/Focus (Optional)",
569
+ placeholder="e.g., methodology, recent developments, case studies, theoretical frameworks...",
570
+ height=80
571
+ )
572
+
573
+ # Word count (no limit)
574
+ word_count = st.number_input(
575
+ "Target Word Count",
576
+ min_value=1000,
577
+ max_value=50000,
578
+ value=5000,
579
+ step=500,
580
+ help="No strict limit - write as much as needed"
581
+ )
582
+
583
+ # Additional requirements
584
+ additional_requirements = st.text_area(
585
+ "Additional Requirements (Optional)",
586
+ placeholder="Specific methodology, theoretical framework, case studies, etc...",
587
+ height=100
588
+ )
589
+
590
+ # Generate button
591
+ if st.button("πŸš€ Generate Thesis Document", type="primary", use_container_width=True):
592
+ if not topic.strip():
593
+ st.error("Please enter a thesis topic!")
594
+ else:
595
+ # Prepare research areas
596
+ research_areas_text = research_areas if research_areas.strip() else "general academic research"
597
+
598
+ # Run thesis generation
599
+ result = run_thesis_writer(topic, document_type, academic_level, research_areas_text, word_count)
600
+
601
+ if result:
602
+ st.session_state.generated_thesis = result
603
+ st.session_state.thesis_info = {
604
+ 'topic': topic,
605
+ 'type': document_type,
606
+ 'level': academic_level,
607
+ 'research_areas': research_areas_text,
608
+ 'word_count': word_count,
609
+ 'requirements': additional_requirements
610
+ }
611
+ st.success("βœ… Thesis document generated successfully!")
612
+
613
+ with col2:
614
+ st.header("πŸ“„ Generated Thesis")
615
+
616
+ if "generated_thesis" in st.session_state:
617
+ thesis = st.session_state.generated_thesis
618
+ info = st.session_state.thesis_info
619
+
620
+ # Display thesis info
621
+ st.subheader("πŸ“Š Document Information")
622
+ col_info1, col_info2 = st.columns(2)
623
+ with col_info1:
624
+ st.metric("Topic", info['topic'])
625
+ st.metric("Type", info['type'])
626
+ st.metric("Level", info['level'])
627
+ with col_info2:
628
+ st.metric("Generated Words", len(str(thesis).split()))
629
+ st.metric("Research Areas", info['research_areas'][:20] + "..." if len(info['research_areas']) > 20 else info['research_areas'])
630
+ st.metric("Quality", "βœ… Human-like")
631
+
632
+ # Display the thesis
633
+ st.subheader("πŸ“ Your Thesis Document")
634
+
635
+ # Format the thesis nicely
636
+ formatted_thesis = str(thesis)
637
+
638
+ st.text_area(
639
+ "Generated Thesis:",
640
+ value=formatted_thesis,
641
+ height=400,
642
+ help="This is your human-like thesis document"
643
+ )
644
+
645
+ # Download options
646
+ col_dl1, col_dl2 = st.columns(2)
647
+ with col_dl1:
648
+ st.download_button(
649
+ label="πŸ“₯ Download as TXT",
650
+ data=formatted_thesis,
651
+ file_name=f"{info['topic'].replace(' ', '_')}_{info['type']}.txt",
652
+ mime="text/plain"
653
+ )
654
+
655
+ with col_dl2:
656
+ # Create markdown version with academic formatting
657
+ markdown_content = f"""# {info['topic']}
658
+
659
+ **Document Type:** {info['type']}
660
+ **Academic Level:** {info['level']}
661
+ **Research Areas:** {info['research_areas']}
662
+ **Word Count:** {len(str(thesis).split())}
663
+ **Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
664
+
665
+ ---
666
+
667
+ {formatted_thesis}
668
+
669
+ ---
670
+
671
+ *This document was generated using advanced AI technology and is designed to be indistinguishable from human academic writing.*
672
+ """
673
+ st.download_button(
674
+ label="πŸ“₯ Download as MD",
675
+ data=markdown_content,
676
+ file_name=f"{info['topic'].replace(' ', '_')}_{info['type']}.md",
677
+ mime="text/markdown"
678
+ )
679
+
680
+ # Document analysis
681
+ st.subheader("πŸ” Document Analysis")
682
+
683
+ # Quick stats
684
+ actual_words = len(str(thesis).split())
685
+ actual_sentences = len(str(thesis).split('.'))
686
+ paragraphs = len(str(thesis).split('\n\n'))
687
+
688
+ col_stats1, col_stats2, col_stats3 = st.columns(3)
689
+ with col_stats1:
690
+ st.metric("Words", actual_words)
691
+ with col_stats2:
692
+ st.metric("Sentences", actual_sentences)
693
+ with col_stats3:
694
+ st.metric("Paragraphs", paragraphs)
695
+
696
+ # Academic quality indicators
697
+ st.success("βœ… Document optimized for academic writing")
698
+ st.info("πŸ’‘ This thesis is designed to pass AI detection tools and academic scrutiny")
699
+ st.warning("⚠️ Remember to review and customize the content for your specific requirements")
700
+
701
+ # Remove technical details
702
+ st.markdown("---")
703
+ st.markdown("### πŸ”’ Privacy & Security")
704
+ st.markdown("- Your content is processed securely")
705
+ st.markdown("- No data is stored or shared")
706
+ st.markdown("- All research is conducted privately")
707
+
708
+ else:
709
+ st.info("πŸ‘ˆ Enter a thesis topic and click 'Generate Thesis Document' to create your academic content")
710
+
711
+ if __name__ == "__main__":
712
+ main()