Kuberwastaken commited on
Commit
7402600
Β·
1 Parent(s): 270bfd7

better script

Browse files
Files changed (2) hide show
  1. app.py +408 -241
  2. gradio_app.py +502 -465
app.py CHANGED
@@ -1,41 +1,58 @@
1
  import gradio as gr
2
  import json
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
  import logging
6
- from pathlib import Path
 
 
 
 
 
7
  import tempfile
8
- import time
 
 
 
 
9
 
10
  # Setup logging
11
- logging.basicConfig(level=logging.INFO)
 
 
 
12
  logger = logging.getLogger(__name__)
13
 
14
  class SyllabusFormatter:
15
  def __init__(self, model_name="microsoft/Phi-3-mini-4k-instruct"):
 
16
  self.model_name = model_name
17
  self.tokenizer = None
18
  self.model = None
19
  self.pipe = None
20
- self.processed_count = 0
21
- self.total_count = 0
22
 
23
- def setup_model(self):
24
- """Download and setup the Phi model"""
 
 
 
25
  try:
 
 
26
  # Load tokenizer
27
  self.tokenizer = AutoTokenizer.from_pretrained(
28
  self.model_name,
29
  trust_remote_code=True
30
  )
31
 
32
- # Load model with 8-bit quantization for efficiency
33
  self.model = AutoModelForCausalLM.from_pretrained(
34
  self.model_name,
35
- torch_dtype=torch.float16,
36
- device_map="auto",
37
  trust_remote_code=True,
38
- load_in_8bit=True
39
  )
40
 
41
  # Create pipeline
@@ -43,23 +60,21 @@ class SyllabusFormatter:
43
  "text-generation",
44
  model=self.model,
45
  tokenizer=self.tokenizer,
46
- max_new_tokens=2048,
47
- temperature=0.1,
48
- do_sample=True,
49
- top_p=0.9,
50
- repetition_penalty=1.1
51
  )
52
 
53
- logger.info("Model setup complete!")
 
54
  return True
55
 
56
  except Exception as e:
57
- logger.error(f"Error setting up model: {str(e)}")
58
  return False
59
-
60
  def create_formatting_prompt(self, unit_content: str, unit_name: str, subject_name: str = "") -> str:
61
- """Create a very clear, focused prompt for formatting syllabus content"""
62
- prompt = f"""<|system|>You are a professional academic syllabus formatter. Your ONLY job is to take badly formatted syllabus content and make it beautifully organized and readable.
63
 
64
  RULES:
65
  1. PRESERVE every single word, topic, and concept from the original
@@ -83,254 +98,406 @@ Unit: {unit_name}
83
  Original content (poorly formatted):
84
  {unit_content}
85
 
86
- Task: Reformat this content to be beautifully organized and readable. Do NOT add any new information - only restructure what's already there. Make it professional and easy to scan.<|end|>
87
 
88
  <|assistant|>"""
89
  return prompt
90
 
91
- def format_unit_content(self, unit_content: str, unit_name: str, subject_name: str = "", progress=None) -> str:
92
- """Format a single unit's content using the AI model"""
 
 
93
  try:
94
- # Create prompt
95
- prompt = self.create_formatting_prompt(unit_content, unit_name, subject_name)
96
-
97
- # Generate formatted content
98
- response = self.pipe(prompt)
99
-
100
- # Extract formatted content
101
- generated_text = response[0]['generated_text']
102
- assistant_start = generated_text.find("<|assistant|>")
103
- if assistant_start != -1:
104
- formatted_content = generated_text[assistant_start + len("<|assistant|>"):].strip()
105
- else:
106
- formatted_content = generated_text.strip()
107
-
108
- # Clean up and validate
109
- formatted_content = self.clean_generated_content(formatted_content)
110
- if not self.validate_formatted_content(unit_content, formatted_content):
111
- return unit_content
112
 
113
- return formatted_content
114
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  except Exception as e:
116
- logger.error(f"Error formatting content: {str(e)}")
117
- return unit_content
118
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  def validate_formatted_content(self, original: str, formatted: str) -> bool:
120
- """Validate that formatted content preserves all important information"""
121
- # Basic validation
122
  if len(formatted) < len(original) * 0.4:
123
  return False
 
 
 
 
 
 
 
 
 
124
  return True
125
-
126
- def clean_generated_content(self, content: str) -> str:
127
- """Clean up generated content"""
128
- # Remove special tokens
129
- for token in ["<|system|>", "<|user|>", "<|assistant|>"]:
130
- content = content.replace(token, "")
131
-
132
- # Clean up extra whitespace
133
- content = "\n".join(line.strip() for line in content.split("\n") if line.strip())
134
 
135
- return content
136
-
137
- def process_syllabus_file(self, syllabus_data: dict, progress=gr.Progress()) -> dict:
138
- """Process the entire syllabus file with progress updates"""
139
- try:
140
- # Count total units
141
- total_units = 0
142
- processed = 0
143
-
144
- def count_units(data):
145
- count = 0
146
- if isinstance(data, dict):
147
- for value in data.values():
148
- if isinstance(value, dict):
149
- count += count_units(value)
150
- elif isinstance(value, str) and "Unit" in str(value):
151
- count += 1
152
- return count
153
-
154
- total_units = count_units(syllabus_data.get("syllabus", {}))
155
- logger.info(f"Total units to process: {total_units}")
156
-
157
- # Process each branch
158
- for branch_name, branch_data in syllabus_data.get("syllabus", {}).items():
159
- if not isinstance(branch_data, dict):
160
  continue
161
 
162
- # Process each semester
163
- for sem_name, sem_data in branch_data.items():
164
- if not isinstance(sem_data, dict):
165
  continue
166
 
167
- # Process each subject
168
- for subject_name, subject_data in sem_data.items():
169
- if not isinstance(subject_data, dict) or "content" not in subject_data:
170
- continue
171
-
172
- content = subject_data["content"]
173
- if not isinstance(content, dict):
174
- continue
175
-
176
- # Format each unit
177
- for unit_name, unit_content in content.items():
178
- if not unit_name.startswith("Unit") or not isinstance(unit_content, str):
179
- continue
180
-
181
- processed += 1
182
- progress(processed / total_units, desc=f"Processing {unit_name} in {subject_name}...")
183
-
184
- # Format the unit content
185
- formatted_content = self.format_unit_content(
186
- unit_content,
187
- unit_name,
188
- subject_name
189
- )
190
-
191
- # Update the content
192
- syllabus_data["syllabus"][branch_name][sem_name][subject_name]["content"][unit_name] = formatted_content
 
 
 
193
 
194
- # Add formatting metadata
195
- if "metadata" not in syllabus_data:
196
- syllabus_data["metadata"] = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
- syllabus_data["metadata"].update({
199
- "lastFormatted": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
200
- "formattingModel": "Phi-3 Mini",
201
- "unitsProcessed": processed,
202
- "version": "1.0"
203
- })
204
-
205
- return syllabus_data
206
-
207
- except Exception as e:
208
- logger.error(f"Error processing syllabus: {str(e)}")
209
- raise gr.Error(f"Error processing syllabus: {str(e)}")
210
-
211
- # Initialize the formatter
212
- formatter = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
- def setup_formatter():
215
- global formatter
216
- if formatter is None:
217
- formatter = SyllabusFormatter()
218
- return formatter.setup_model()
219
- return True
220
 
221
- def process_file(file):
222
- """Process the uploaded syllabus file"""
223
  try:
224
- # Setup formatter if needed
225
- if not setup_formatter():
226
- raise gr.Error("Failed to setup the formatting model. Please try again.")
 
 
 
 
227
 
228
- # Read and parse JSON
229
- content = file.read()
230
- syllabus_data = json.loads(content)
231
 
232
- # Process syllabus
233
- formatted_data = formatter.process_syllabus_file(syllabus_data)
 
 
 
 
 
 
 
 
234
 
235
  # Save to temporary file
236
- with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp:
237
- json.dump(formatted_data, tmp, indent=2)
238
- return tmp.name
239
-
240
- except json.JSONDecodeError:
241
- raise gr.Error("Invalid JSON file. Please check your syllabus file format.")
 
 
242
  except Exception as e:
243
- raise gr.Error(f"Error processing file: {str(e)}")
244
-
245
- # Custom theme
246
- theme = gr.themes.Soft(
247
- primary_hue="indigo",
248
- secondary_hue="blue",
249
- ).set(
250
- body_background_fill="#fafafa",
251
- body_background_fill_dark="#1a1a1a",
252
- button_primary_background_fill="*primary_500",
253
- button_primary_background_fill_hover="*primary_600"
254
- )
255
-
256
- # Gradio interface
257
- title = "πŸ“š Syllabus Formatter"
258
- description = """
259
- Transform your syllabus into a beautifully formatted, easy-to-read document using AI.
260
-
261
- ### Features:
262
- - Preserves all original content
263
- - Improves readability and organization
264
- - Creates logical grouping and sections
265
- - Adds professional formatting
266
-
267
- Simply upload your JSON syllabus file and get a formatted version back!
268
- """
269
-
270
- css = """
271
- .feedback {
272
- margin-top: 20px;
273
- padding: 10px;
274
- border-radius: 8px;
275
- background-color: #f0f9ff;
276
- border: 1px solid #bae6fd;
277
- }
278
- .dark .feedback {
279
- background-color: #082f49;
280
- border-color: #075985;
281
- }
282
- """
283
 
284
- with gr.Blocks(theme=theme, css=css) as iface:
285
- gr.Markdown(f"# {title}")
286
- gr.Markdown(description)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
288
- with gr.Row():
289
- with gr.Column():
290
- file_input = gr.File(
291
- label="Upload Syllabus JSON",
292
- file_types=[".json"],
293
- type="binary" # Use binary type for file upload
294
- )
295
- process_btn = gr.Button("πŸͺ„ Format Syllabus", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
- output_file = gr.File(
298
- label="Download Formatted Syllabus"
299
- )
300
-
301
- with gr.Row():
302
- feedback = gr.Markdown(
303
- value="Upload a JSON syllabus file to begin...",
304
- elem_classes=["feedback"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
- def update_feedback(file):
308
- return "Processing your syllabus... This may take a few minutes depending on the size."
309
-
310
- # Setup click event
311
- process_btn.click(
312
- fn=update_feedback,
313
- inputs=[file_input],
314
- outputs=[feedback],
315
- queue=False
316
- ).then(
317
- fn=process_file,
318
- inputs=[file_input],
319
- outputs=[output_file]
320
- ).success(
321
- fn=lambda: "✨ Syllabus formatting complete! You can now download the formatted file.",
322
- outputs=[feedback]
323
- )
324
-
325
- gr.Markdown("""
326
- ### πŸ“ Notes:
327
- - The formatter preserves all original content while improving organization
328
- - Processing time depends on the size of your syllabus
329
- - For large files, please be patient as the AI processes each section
330
-
331
- Made with ❀️ using Microsoft's Phi-3 Mini model
332
- """)
333
 
334
- # Launch in dev mode
335
  if __name__ == "__main__":
336
- iface.launch()
 
 
 
 
 
 
1
  import gradio as gr
2
  import json
3
+ import time
 
4
  import logging
5
+ import re
6
+ from typing import Dict, Any, List, Tuple
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+ import threading
9
+ from datetime import datetime
10
+ import os
11
  import tempfile
12
+
13
+ # Hugging Face Transformers
14
+ import torch
15
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
16
+ import gc
17
 
18
  # Setup logging
19
+ logging.basicConfig(
20
+ level=logging.INFO,
21
+ format='%(asctime)s - %(levelname)s - %(message)s'
22
+ )
23
  logger = logging.getLogger(__name__)
24
 
25
  class SyllabusFormatter:
26
  def __init__(self, model_name="microsoft/Phi-3-mini-4k-instruct"):
27
+ """Initialize the formatter with Phi-3 model"""
28
  self.model_name = model_name
29
  self.tokenizer = None
30
  self.model = None
31
  self.pipe = None
32
+ self.is_model_loaded = False
33
+ self.processing_lock = threading.Lock()
34
 
35
+ def load_model(self):
36
+ """Load the Phi-3 model with optimizations"""
37
+ if self.is_model_loaded:
38
+ return True
39
+
40
  try:
41
+ logger.info(f"Loading model: {self.model_name}")
42
+
43
  # Load tokenizer
44
  self.tokenizer = AutoTokenizer.from_pretrained(
45
  self.model_name,
46
  trust_remote_code=True
47
  )
48
 
49
+ # Load model with optimizations
50
  self.model = AutoModelForCausalLM.from_pretrained(
51
  self.model_name,
52
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
53
+ device_map="auto" if torch.cuda.is_available() else None,
54
  trust_remote_code=True,
55
+ low_cpu_mem_usage=True
56
  )
57
 
58
  # Create pipeline
 
60
  "text-generation",
61
  model=self.model,
62
  tokenizer=self.tokenizer,
63
+ device=0 if torch.cuda.is_available() else -1,
64
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
 
 
 
65
  )
66
 
67
+ self.is_model_loaded = True
68
+ logger.info("Model loaded successfully!")
69
  return True
70
 
71
  except Exception as e:
72
+ logger.error(f"Error loading model: {str(e)}")
73
  return False
74
+
75
  def create_formatting_prompt(self, unit_content: str, unit_name: str, subject_name: str = "") -> str:
76
+ """Create a focused prompt for formatting syllabus content"""
77
+ prompt = f"""<|system|>You are a professional academic syllabus formatter. Your job is to take poorly formatted syllabus content and make it beautifully organized and readable.
78
 
79
  RULES:
80
  1. PRESERVE every single word, topic, and concept from the original
 
98
  Original content (poorly formatted):
99
  {unit_content}
100
 
101
+ Task: Reformat this content to be beautifully organized and readable. Do NOT add any new information - only restructure what's already there.<|end|>
102
 
103
  <|assistant|>"""
104
  return prompt
105
 
106
+ def format_single_unit(self, unit_data: Tuple[str, str, str, str, str]) -> Tuple[str, str, str, str, str]:
107
+ """Format a single unit's content"""
108
+ branch, semester, subject, unit_name, unit_content = unit_data
109
+
110
  try:
111
+ with self.processing_lock:
112
+ # Create prompt
113
+ prompt = self.create_formatting_prompt(unit_content, unit_name, subject)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ # Generate formatted content
116
+ response = self.pipe(
117
+ prompt,
118
+ max_new_tokens=2048,
119
+ temperature=0.1,
120
+ do_sample=True,
121
+ top_p=0.9,
122
+ repetition_penalty=1.1,
123
+ pad_token_id=self.tokenizer.eos_token_id,
124
+ eos_token_id=self.tokenizer.eos_token_id
125
+ )
126
+
127
+ # Extract formatted content
128
+ generated_text = response[0]['generated_text']
129
+ assistant_start = generated_text.find("<|assistant|>")
130
+
131
+ if assistant_start != -1:
132
+ formatted_content = generated_text[assistant_start + len("<|assistant|>"):].strip()
133
+ else:
134
+ formatted_content = generated_text[len(prompt):].strip()
135
+
136
+ # Clean up the content
137
+ formatted_content = self.clean_generated_content(formatted_content)
138
+
139
+ # Validate content
140
+ if self.validate_formatted_content(unit_content, formatted_content):
141
+ return (branch, semester, subject, unit_name, formatted_content)
142
+ else:
143
+ logger.warning(f"Validation failed for {subject} - {unit_name}")
144
+ return (branch, semester, subject, unit_name, unit_content)
145
+
146
  except Exception as e:
147
+ logger.error(f"Error formatting {subject} - {unit_name}: {str(e)}")
148
+ return (branch, semester, subject, unit_name, unit_content)
149
+
150
+ def clean_generated_content(self, content: str) -> str:
151
+ """Clean up generated content"""
152
+ # Remove special tokens
153
+ content = re.sub(r'<\|.*?\|>', '', content)
154
+
155
+ # Remove AI commentary
156
+ lines = content.split('\n')
157
+ cleaned_lines = []
158
+
159
+ for line in lines:
160
+ line = line.strip()
161
+ if (line.startswith("Here") and ("formatted" in line.lower() or "organized" in line.lower())) or \
162
+ line.startswith("I have") or line.startswith("The content has been") or \
163
+ line.startswith("Note:") or line.startswith("This formatted version"):
164
+ continue
165
+ if line:
166
+ cleaned_lines.append(line)
167
+
168
+ content = '\n'.join(cleaned_lines)
169
+
170
+ # Fix spacing
171
+ content = re.sub(r'\n\s*\n\s*\n+', '\n\n', content)
172
+ content = re.sub(r'\n([A-Z][^:\n]*:)\n', r'\n\n\1\n', content)
173
+
174
+ return content.strip()
175
+
176
  def validate_formatted_content(self, original: str, formatted: str) -> bool:
177
+ """Validate that formatted content preserves important information"""
 
178
  if len(formatted) < len(original) * 0.4:
179
  return False
180
+
181
+ # Check for preservation of key terms
182
+ original_words = set(re.findall(r'\b[A-Z][a-z]*(?:[A-Z][a-z]*)*\b', original))
183
+ formatted_words = set(re.findall(r'\b[A-Z][a-z]*(?:[A-Z][a-z]*)*\b', formatted))
184
+
185
+ missing_terms = original_words - formatted_words
186
+ if len(missing_terms) > len(original_words) * 0.3:
187
+ return False
188
+
189
  return True
190
+
191
+ def extract_units_for_processing(self, syllabus_data: Dict[str, Any]) -> List[Tuple[str, str, str, str, str]]:
192
+ """Extract all units for concurrent processing"""
193
+ units = []
 
 
 
 
 
194
 
195
+ for branch_name, branch_data in syllabus_data.get("syllabus", {}).items():
196
+ if not isinstance(branch_data, dict):
197
+ continue
198
+
199
+ for sem_name, sem_data in branch_data.items():
200
+ if not isinstance(sem_data, dict):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  continue
202
 
203
+ for subject_name, subject_data in sem_data.items():
204
+ if not isinstance(subject_data, dict) or "content" not in subject_data:
 
205
  continue
206
 
207
+ content = subject_data["content"]
208
+ if not isinstance(content, dict):
209
+ continue
210
+
211
+ for unit_name, unit_content in content.items():
212
+ if unit_name.startswith("Unit") and isinstance(unit_content, str):
213
+ units.append((branch_name, sem_name, subject_name, unit_name, unit_content))
214
+
215
+ return units
216
+
217
+ def format_syllabus_concurrent(self, syllabus_data: Dict[str, Any], progress_callback=None, max_workers=4) -> Dict[str, Any]:
218
+ """Format syllabus using concurrent processing"""
219
+ if not self.is_model_loaded:
220
+ if not self.load_model():
221
+ raise Exception("Failed to load model")
222
+
223
+ # Extract units for processing
224
+ units = self.extract_units_for_processing(syllabus_data)
225
+ total_units = len(units)
226
+
227
+ logger.info(f"Processing {total_units} units with {max_workers} workers")
228
+
229
+ # Process units concurrently
230
+ processed_units = {}
231
+ completed_count = 0
232
+
233
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
234
+ # Submit all tasks
235
+ future_to_unit = {executor.submit(self.format_single_unit, unit): unit for unit in units}
236
 
237
+ # Process completed tasks
238
+ for future in as_completed(future_to_unit):
239
+ try:
240
+ branch, semester, subject, unit_name, formatted_content = future.result()
241
+
242
+ # Store the result
243
+ key = f"{branch}|{semester}|{subject}|{unit_name}"
244
+ processed_units[key] = formatted_content
245
+
246
+ completed_count += 1
247
+ progress = (completed_count / total_units) * 100
248
+
249
+ if progress_callback:
250
+ progress_callback(progress, f"Processed {subject} - {unit_name}")
251
+
252
+ logger.info(f"Completed {completed_count}/{total_units} ({progress:.1f}%)")
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error processing unit: {str(e)}")
256
+
257
+ # Update the syllabus data with formatted content
258
+ for branch_name, branch_data in syllabus_data.get("syllabus", {}).items():
259
+ if not isinstance(branch_data, dict):
260
+ continue
261
 
262
+ for sem_name, sem_data in branch_data.items():
263
+ if not isinstance(sem_data, dict):
264
+ continue
265
+
266
+ for subject_name, subject_data in sem_data.items():
267
+ if not isinstance(subject_data, dict) or "content" not in subject_data:
268
+ continue
269
+
270
+ content = subject_data["content"]
271
+ if not isinstance(content, dict):
272
+ continue
273
+
274
+ for unit_name in content.keys():
275
+ if unit_name.startswith("Unit"):
276
+ key = f"{branch_name}|{sem_name}|{subject_name}|{unit_name}"
277
+ if key in processed_units:
278
+ syllabus_data["syllabus"][branch_name][sem_name][subject_name]["content"][unit_name] = processed_units[key]
279
+
280
+ # Add metadata
281
+ if "metadata" not in syllabus_data:
282
+ syllabus_data["metadata"] = {}
283
+
284
+ syllabus_data["metadata"]["lastFormatted"] = datetime.now().isoformat()
285
+ syllabus_data["metadata"]["formattingNote"] = "Content formatted using Phi-3 AI for enhanced readability"
286
+ syllabus_data["metadata"]["originalContentPreserved"] = True
287
+ syllabus_data["metadata"]["unitsProcessed"] = completed_count
288
+ syllabus_data["metadata"]["formattingModel"] = self.model_name
289
+ syllabus_data["metadata"]["version"] = "2.0"
290
+ syllabus_data["metadata"]["processedConcurrently"] = True
291
+ syllabus_data["metadata"]["maxWorkers"] = max_workers
292
+
293
+ return syllabus_data
294
 
295
+ # Global formatter instance
296
+ formatter = SyllabusFormatter()
 
 
 
 
297
 
298
+ def format_syllabus_file(file_path, max_workers=4, progress=gr.Progress()):
299
+ """Main function to format syllabus file"""
300
  try:
301
+ # Load JSON file
302
+ with open(file_path, 'r', encoding='utf-8') as f:
303
+ syllabus_data = json.load(f)
304
+
305
+ # Count units
306
+ units = formatter.extract_units_for_processing(syllabus_data)
307
+ total_units = len(units)
308
 
309
+ progress(0, f"Found {total_units} units to process")
 
 
310
 
311
+ # Progress callback
312
+ def update_progress(percent, message):
313
+ progress(percent/100, message)
314
+
315
+ # Format the syllabus
316
+ formatted_data = formatter.format_syllabus_concurrent(
317
+ syllabus_data,
318
+ progress_callback=update_progress,
319
+ max_workers=max_workers
320
+ )
321
 
322
  # Save to temporary file
323
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f:
324
+ json.dump(formatted_data, f, indent=2, ensure_ascii=False)
325
+ temp_path = f.name
326
+
327
+ progress(1.0, f"Completed! Processed {total_units} units")
328
+
329
+ return temp_path, f"βœ… Successfully formatted {total_units} units!"
330
+
331
  except Exception as e:
332
+ error_msg = f"❌ Error: {str(e)}"
333
+ logger.error(error_msg)
334
+ return None, error_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
+ def create_sample_json():
337
+ """Create a sample JSON file for testing"""
338
+ sample_data = {
339
+ "metadata": {
340
+ "totalFiles": 1,
341
+ "generatedAt": datetime.now().isoformat(),
342
+ "source": "Sample syllabus for testing",
343
+ "description": "Sample syllabus content"
344
+ },
345
+ "syllabus": {
346
+ "CSE": {
347
+ "SEM1": {
348
+ "Mathematics": {
349
+ "extractedFrom": {
350
+ "path": "CSE > SEM1 > Mathematics",
351
+ "branch": "CSE",
352
+ "semester": "SEM1",
353
+ "subject": "Mathematics"
354
+ },
355
+ "content": {
356
+ "Unit I": "Differential Calculus: Limits, continuity, derivatives, applications of derivatives, maxima and minima, curve sketching, related rates, optimization problems, L'Hospital's rule, Taylor series, Partial derivatives, total differential, chain rule, implicit differentiation, Jacobians.",
357
+ "Unit II": "Integral Calculus: Integration techniques, definite integrals, applications of integrals, area under curves, volume of solids, arc length, surface area, Multiple integrals, double integrals, triple integrals, change of variables, applications in geometry and physics."
358
+ }
359
+ }
360
+ }
361
+ }
362
+ }
363
+ }
364
 
365
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f:
366
+ json.dump(sample_data, f, indent=2, ensure_ascii=False)
367
+ return f.name
368
+
369
+ # Gradio Interface
370
+ def create_interface():
371
+ with gr.Blocks(
372
+ title="Syllabus Formatter - AI-Powered JSON Syllabus Formatter",
373
+ theme=gr.themes.Soft(
374
+ primary_hue="blue",
375
+ secondary_hue="purple",
376
+ neutral_hue="gray"
377
+ )
378
+ ) as interface:
379
+
380
+ gr.HTML("""
381
+ <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 20px;">
382
+ <h1 style="font-size: 2.5em; margin-bottom: 10px;">πŸŽ“ Syllabus Formatter</h1>
383
+ <p style="font-size: 1.2em; opacity: 0.9;">AI-Powered JSON Syllabus Content Formatter using Phi-3</p>
384
+ <p style="font-size: 1em; opacity: 0.8;">Upload your JSON syllabus file and get beautifully formatted content with concurrent processing for speed!</p>
385
+ </div>
386
+ """)
387
+
388
+ with gr.Row():
389
+ with gr.Column(scale=1):
390
+ gr.HTML("""
391
+ <div style="background: #f8f9fa; padding: 15px; border-radius: 8px; margin-bottom: 15px;">
392
+ <h3>πŸ“‹ Instructions:</h3>
393
+ <ol>
394
+ <li>Upload your JSON syllabus file</li>
395
+ <li>Choose number of concurrent workers (1-8)</li>
396
+ <li>Click "Format Syllabus" to start processing</li>
397
+ <li>Download the formatted JSON file</li>
398
+ </ol>
399
+ <p><strong>Note:</strong> Only syllabus content will be formatted, metadata remains unchanged.</p>
400
+ </div>
401
+ """)
402
+
403
+ file_input = gr.File(
404
+ label="πŸ“ Upload JSON Syllabus File",
405
+ file_types=[".json"],
406
+ type="filepath"
407
+ )
408
+
409
+ workers_slider = gr.Slider(
410
+ minimum=1,
411
+ maximum=8,
412
+ value=4,
413
+ step=1,
414
+ label="πŸ”„ Concurrent Workers",
415
+ info="More workers = faster processing (but more memory usage)"
416
+ )
417
+
418
+ format_btn = gr.Button(
419
+ "πŸš€ Format Syllabus",
420
+ variant="primary",
421
+ size="lg"
422
+ )
423
+
424
+ sample_btn = gr.Button(
425
+ "πŸ“ Download Sample JSON",
426
+ variant="secondary"
427
+ )
428
 
429
+ with gr.Column(scale=1):
430
+ status_output = gr.Textbox(
431
+ label="πŸ“Š Status",
432
+ lines=3,
433
+ interactive=False
434
+ )
435
+
436
+ download_output = gr.File(
437
+ label="πŸ“₯ Download Formatted JSON",
438
+ visible=False
439
+ )
440
+
441
+ gr.HTML("""
442
+ <div style="background: #e3f2fd; padding: 15px; border-radius: 8px; margin-top: 15px;">
443
+ <h3>✨ Features:</h3>
444
+ <ul>
445
+ <li>πŸ€– Powered by Microsoft Phi-3 AI model</li>
446
+ <li>⚑ Concurrent processing for speed</li>
447
+ <li>πŸ”’ Preserves all original content</li>
448
+ <li>πŸ“Š Real-time progress tracking</li>
449
+ <li>🎯 Formats only syllabus content, not metadata</li>
450
+ <li>βœ… Validation to ensure content integrity</li>
451
+ </ul>
452
+ </div>
453
+ """)
454
+
455
+ # Event handlers
456
+ def format_handler(file_path, max_workers):
457
+ if file_path is None:
458
+ return "❌ Please upload a JSON file first.", gr.update(visible=False)
459
+
460
+ try:
461
+ result_path, message = format_syllabus_file(file_path, int(max_workers))
462
+ if result_path:
463
+ return message, gr.update(visible=True, value=result_path)
464
+ else:
465
+ return message, gr.update(visible=False)
466
+ except Exception as e:
467
+ return f"❌ Error: {str(e)}", gr.update(visible=False)
468
+
469
+ def sample_handler():
470
+ sample_path = create_sample_json()
471
+ return gr.update(visible=True, value=sample_path)
472
+
473
+ format_btn.click(
474
+ format_handler,
475
+ inputs=[file_input, workers_slider],
476
+ outputs=[status_output, download_output]
477
  )
478
+
479
+ sample_btn.click(
480
+ sample_handler,
481
+ outputs=[gr.File(label="πŸ“₯ Sample JSON File", visible=True)]
482
+ )
483
+
484
+ gr.HTML("""
485
+ <div style="text-align: center; padding: 15px; margin-top: 20px; border-top: 1px solid #ddd;">
486
+ <p style="color: #666;">
487
+ Built with ❀️ using Hugging Face Spaces |
488
+ Powered by Microsoft Phi-3 |
489
+ Optimized for concurrent processing
490
+ </p>
491
+ </div>
492
+ """)
493
 
494
+ return interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
 
496
+ # Launch the app
497
  if __name__ == "__main__":
498
+ interface = create_interface()
499
+ interface.launch(
500
+ server_name="0.0.0.0",
501
+ server_port=7860,
502
+ share=True
503
+ )
gradio_app.py CHANGED
@@ -1,466 +1,503 @@
1
- import gradio as gr
2
- from model.analyzer import analyze_content
3
- import asyncio
4
- import time
5
- import httpx
6
- import subprocess
7
- import atexit
8
-
9
- # Start the API server
10
- def start_api_server():
11
- # Start uvicorn in a subprocess
12
- process = subprocess.Popen(["uvicorn", "script_search_api:app", "--reload"])
13
- return process
14
-
15
- # Stop the API server
16
- def stop_api_server(process):
17
- process.terminate()
18
-
19
- # Register the exit handler
20
- api_process = start_api_server()
21
- atexit.register(stop_api_server, api_process)
22
-
23
-
24
- custom_css = """
25
- * {
26
- font-family: 'Inter', system-ui, sans-serif;
27
- transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
28
- }
29
-
30
- .gradio-container {
31
- background: #0a0a0a !important;
32
- color: #fff !important;
33
- min-height: 100vh;
34
- position: relative;
35
- overflow: hidden;
36
- }
37
-
38
- /* Subtle Animated Background */
39
- .gradio-container::before {
40
- content: '';
41
- position: fixed;
42
- top: 0;
43
- left: 0;
44
- right: 0;
45
- bottom: 0;
46
- background:
47
- linear-gradient(125deg,
48
- #0a0a0a 0%,
49
- rgba(218, 165, 32, 0.03) 30%,
50
- rgba(218, 165, 32, 0.05) 50%,
51
- rgba(218, 165, 32, 0.03) 70%,
52
- #0a0a0a 100%);
53
- animation: subtleGradient 20s ease infinite;
54
- background-size: 400% 400%;
55
- z-index: 0;
56
- }
57
-
58
- /* Premium Particles Effect */
59
- .gradio-container::after {
60
- content: '';
61
- position: fixed;
62
- top: 0;
63
- left: 0;
64
- width: 100%;
65
- height: 100%;
66
- background: radial-gradient(circle at center, transparent 0%, #0a0a0a 70%),
67
- url("data:image/svg+xml,%3Csvg width='100' height='100' viewBox='0 0 100 100' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='50' cy='50' r='1' fill='rgba(218, 165, 32, 0.08)'/%3E%3C/svg%3E");
68
- opacity: 0.3;
69
- animation: floatingParticles 30s linear infinite;
70
- z-index: 1;
71
- }
72
-
73
- /* Luxurious Header */
74
- .treat-title {
75
- text-align: center;
76
- padding: 3rem 1rem;
77
- position: relative;
78
- overflow: hidden;
79
- z-index: 2;
80
- background: linear-gradient(180deg,
81
- rgba(218, 165, 32, 0.05),
82
- transparent 70%);
83
- }
84
-
85
- .treat-title::before {
86
- content: '';
87
- position: absolute;
88
- top: 0;
89
- left: 50%;
90
- width: 60%;
91
- height: 1px;
92
- background: linear-gradient(90deg,
93
- transparent,
94
- rgba(218, 165, 32, 0.3),
95
- transparent);
96
- transform: translateX(-50%);
97
- animation: luxuryScan 5s ease-in-out infinite;
98
- }
99
-
100
- .treat-title h1 {
101
- font-size: 4.5rem;
102
- font-weight: 800;
103
- background: linear-gradient(135deg,
104
- #996515 0%,
105
- #DAA520 25%,
106
- #FFD700 50%,
107
- #DAA520 75%,
108
- #996515 100%);
109
- background-size: 200% auto;
110
- -webkit-background-clip: text;
111
- -webkit-text-fill-color: transparent;
112
- margin-bottom: 0.5rem;
113
- letter-spacing: -0.05em;
114
- animation: goldenFlow 8s ease infinite;
115
- position: relative;
116
- }
117
-
118
- .treat-title h1::after {
119
- content: attr(data-text);
120
- position: absolute;
121
- left: 0;
122
- top: 0;
123
- width: 100%;
124
- height: 100%;
125
- background: linear-gradient(135deg,
126
- transparent 0%,
127
- rgba(218, 165, 32, 0.4) 50%,
128
- transparent 100%);
129
- background-size: 200% auto;
130
- -webkit-background-clip: text;
131
- -webkit-text-fill-color: transparent;
132
- opacity: 0.5;
133
- animation: premiumGlow 4s ease-in-out infinite;
134
- }
135
-
136
- /* Premium Content Areas */
137
- .tabs {
138
- background: rgba(10, 10, 10, 0.95);
139
- border: 1px solid rgba(218, 165, 32, 0.15);
140
- border-radius: 16px;
141
- padding: 1rem;
142
- margin: 0 1rem 2rem 1rem;
143
- position: relative;
144
- z-index: 2;
145
- backdrop-filter: blur(10px);
146
- box-shadow: 0 0 30px rgba(218, 165, 32, 0.05);
147
- }
148
-
149
- .tabs::before {
150
- content: '';
151
- position: absolute;
152
- top: -1px;
153
- left: -1px;
154
- right: -1px;
155
- bottom: -1px;
156
- background: linear-gradient(45deg,
157
- rgba(218, 165, 32, 0.1),
158
- transparent,
159
- rgba(218, 165, 32, 0.1));
160
- border-radius: 16px;
161
- z-index: -1;
162
- animation: luxuryBorder 6s ease-in-out infinite;
163
- }
164
-
165
- .content-area {
166
- background: rgba(10, 10, 10, 0.95) !important;
167
- border: 1px solid rgba(218, 165, 32, 0.15) !important;
168
- border-radius: 12px !important;
169
- padding: 1.5rem !important;
170
- backdrop-filter: blur(10px);
171
- position: relative;
172
- }
173
-
174
- /* Premium Input Fields */
175
- .gradio-textbox textarea {
176
- background: rgba(10, 10, 10, 0.9) !important;
177
- border: 1px solid rgba(218, 165, 32, 0.2) !important;
178
- border-radius: 8px !important;
179
- color: rgba(255, 255, 255, 0.9) !important;
180
- font-size: 0.95rem !important;
181
- line-height: 1.6 !important;
182
- padding: 1rem !important;
183
- }
184
-
185
- .gradio-textbox textarea:focus {
186
- border-color: #DAA520 !important;
187
- box-shadow: 0 0 20px rgba(218, 165, 32, 0.1) !important;
188
- background: rgba(10, 10, 10, 0.95) !important;
189
- transform: translateY(-2px);
190
- }
191
-
192
- /* Luxury Buttons */
193
- .gradio-button {
194
- background: linear-gradient(45deg,
195
- #996515,
196
- #DAA520,
197
- #FFD700,
198
- #DAA520) !important;
199
- background-size: 200% auto !important;
200
- border: none !important;
201
- border-radius: 8px !important;
202
- color: #0a0a0a !important;
203
- font-weight: 600 !important;
204
- font-size: 0.95rem !important;
205
- padding: 0.75rem 1.5rem !important;
206
- letter-spacing: 0.025em !important;
207
- position: relative;
208
- overflow: hidden;
209
- transition: all 0.3s ease !important;
210
- animation: goldenFlow 4s ease infinite;
211
- }
212
-
213
- .gradio-button:hover {
214
- transform: translateY(-2px);
215
- box-shadow: 0 5px 20px rgba(218, 165, 32, 0.2) !important;
216
- }
217
-
218
- /* Premium Footer */
219
- .footer {
220
- text-align: center;
221
- padding: 2rem 0;
222
- margin-top: 3rem;
223
- font-size: 1.0rem;
224
- position: relative;
225
- z-index: 2;
226
- }
227
-
228
- .footer p {
229
- color: rgba(218, 165, 32, 0.8);
230
- }
231
-
232
- .footer .heart {
233
- color: #DAA520;
234
- animation: luxuryPulse 2s ease infinite;
235
- }
236
-
237
- .footer .name {
238
- color: #DAA520;
239
- text-decoration: none;
240
- transition: all 0.3s ease;
241
- padding: 0 4px;
242
- }
243
-
244
- .footer .name:hover {
245
- color: #FFD700;
246
- }
247
-
248
- footer {
249
- visibility: hidden;
250
- }
251
-
252
- /* Premium Animations */
253
- @keyframes subtleGradient {
254
- 0% { background-position: 0% 50%; }
255
- 50% { background-position: 100% 50%; }
256
- 100% { background-position: 0% 50%; }
257
- }
258
-
259
- @keyframes floatingParticles {
260
- 0% { transform: translateY(0); }
261
- 100% { transform: translateY(-100%); }
262
- }
263
-
264
- @keyframes luxuryScan {
265
- 0% { transform: translateX(-150%) scaleX(0.3); opacity: 0; }
266
- 50% { transform: translateX(-50%) scaleX(1); opacity: 0.5; }
267
- 100% { transform: translateX(50%) scaleX(0.3); opacity: 0; }
268
- }
269
-
270
- @keyframes goldenFlow {
271
- 0% { background-position: 0% 50%; }
272
- 50% { background-position: 100% 50%; }
273
- 100% { background-position: 0% 50%; }
274
- }
275
-
276
- @keyframes premiumGlow {
277
- 0% { opacity: 0.2; transform: scale(1); }
278
- 50% { opacity: 0.4; transform: scale(1.01); }
279
- 100% { opacity: 0.2; transform: scale(1); }
280
- }
281
-
282
- @keyframes luxuryBorder {
283
- 0% { opacity: 0.3; }
284
- 50% { opacity: 0.7; }
285
- 100% { opacity: 0.3; }
286
- }
287
-
288
- @keyframes luxuryPulse {
289
- 0% { transform: scale(1); opacity: 0.8; }
290
- 50% { transform: scale(1.1); opacity: 1; }
291
- 100% { transform: scale(1); opacity: 0.8; }
292
- }
293
- """
294
- # Start the API server
295
- def start_api_server():
296
- # Start uvicorn in a subprocess
297
- process = subprocess.Popen(["uvicorn", "script_search_api:app", "--reload"])
298
- return process
299
-
300
- # Stop the API server
301
- def stop_api_server(process):
302
- process.terminate()
303
-
304
- # Register the exit handler
305
- api_process = start_api_server()
306
- atexit.register(stop_api_server, api_process)
307
-
308
- async def analyze_with_progress(movie_name, progress=gr.Progress()):
309
- """Handle analysis with progress updates in Gradio"""
310
- try:
311
- async with httpx.AsyncClient(timeout=60.0) as client:
312
- # Start the analysis
313
- response = await client.get(
314
- "http://localhost:8000/api/start_analysis",
315
- params={"movie_name": movie_name}
316
- )
317
- response.raise_for_status()
318
- task_id = response.json()["task_id"]
319
-
320
- # Poll for progress
321
- while True:
322
- progress_response = await client.get(
323
- f"http://localhost:8000/api/progress/{task_id}"
324
- )
325
- progress_response.raise_for_status()
326
- status = progress_response.json()
327
-
328
- # Update Gradio progress
329
- progress(status["progress"], desc=status["status"])
330
-
331
- if status["is_complete"]:
332
- if status["error"]:
333
- return f"Error: {status['error']}"
334
- elif status["result"]:
335
- triggers = status["result"].get("detected_triggers", [])
336
- if not triggers or triggers == ["None"]:
337
- return "βœ“ No triggers detected in the content."
338
- else:
339
- trigger_list = "\n".join([f"β€’ {trigger}" for trigger in triggers])
340
- return f"⚠ Triggers Detected:\n{trigger_list}"
341
- break
342
-
343
- await asyncio.sleep(0.5)
344
-
345
- except Exception as e:
346
- return f"Error: {str(e)}"
347
-
348
- def analyze_with_loading(text, progress=gr.Progress()):
349
- """
350
- Synchronous wrapper for the async analyze_content function with smooth progress updates
351
- """
352
- # Initialize progress
353
- progress(0, desc="Starting analysis...")
354
-
355
- # Initial setup phase - smoother progression
356
- for i in range(25):
357
- time.sleep(0.04) # Slightly longer sleep for smoother animation
358
- progress((i + 1) / 100, desc="Initializing analysis...")
359
-
360
- # Pre-processing phase
361
- for i in range(25, 45):
362
- time.sleep(0.03)
363
- progress((i + 1) / 100, desc="Pre-processing content...")
364
-
365
- # Perform analysis
366
- progress(0.45, desc="Analyzing content...")
367
- try:
368
- result = asyncio.run(analyze_content(text))
369
-
370
- # Analysis progress simulation
371
- for i in range(45, 75):
372
- time.sleep(0.03)
373
- progress((i + 1) / 100, desc="Processing results...")
374
-
375
- except Exception as e:
376
- return f"Error during analysis: {str(e)}"
377
-
378
- # Final processing with smooth progression
379
- for i in range(75, 100):
380
- time.sleep(0.02)
381
- progress((i + 1) / 100, desc="Finalizing results...")
382
-
383
- # Format the results
384
- triggers = result["detected_triggers"]
385
- if triggers == ["None"]:
386
- return "βœ“ No triggers detected in the content."
387
- else:
388
- trigger_list = "\n".join([f"β€’ {trigger}" for trigger in triggers])
389
- return f"⚠ Triggers Detected:\n{trigger_list}"
390
-
391
- # Update the Gradio interface with new styling
392
- import gradio as gr
393
- from model.analyzer import analyze_content
394
- import asyncio
395
- import time
396
- import httpx
397
- import subprocess
398
- import atexit
399
-
400
- # Keep your existing CSS and server setup code...
401
- # [Previous code until the interface definition remains the same]
402
-
403
- # Update the Gradio interface with fixed button handling
404
- with gr.Blocks(css=custom_css, theme=gr.themes.Monochrome()) as iface:
405
- # Title section
406
- gr.HTML("""
407
- <div class="treat-title">
408
- <h1 data-text="TREAT R1">TREAT R1</h1>
409
- <p>Trigger Recognition for Enjoyable and Appropriate Television</p>
410
- </div>
411
- """)
412
-
413
- with gr.Tabs() as tabs:
414
- with gr.Tab("Content Analysis"): # Changed from TabItem to Tab
415
- with gr.Column():
416
- input_text = gr.Textbox(
417
- label="ANALYZE CONTENT",
418
- placeholder="Enter the content you want to analyze...",
419
- lines=8
420
- )
421
- analyze_btn = gr.Button("✨ Analyze")
422
-
423
- with gr.Tab("Movie Search"): # Changed from TabItem to Tab
424
- with gr.Column():
425
- search_query = gr.Textbox(
426
- label="SEARCH MOVIES",
427
- placeholder="Type a movie title to search...",
428
- lines=1
429
- )
430
- search_button = gr.Button("πŸ” Search")
431
-
432
- output_text = gr.Textbox(
433
- label="ANALYSIS RESULTS",
434
- lines=5,
435
- interactive=False
436
- )
437
-
438
- status_text = gr.Markdown(
439
- value=""
440
- )
441
-
442
- # Define click events
443
- analyze_btn.click(
444
- fn=analyze_with_loading,
445
- inputs=input_text,
446
- outputs=output_text
447
- )
448
-
449
- search_button.click(
450
- fn=analyze_with_progress,
451
- inputs=search_query,
452
- outputs=output_text
453
- )
454
-
455
- gr.HTML("""
456
- <div class="footer">
457
- <p>Made with <span class="heart">πŸ’–</span> by <a href="https://www.linkedin.com/in/kubermehta/" target="_blank">Kuber Mehta</a></p>
458
- </div>
459
- """)
460
-
461
- if __name__ == "__main__":
462
- iface.launch(
463
- share=False,
464
- debug=True,
465
- show_error=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466
  )
 
1
+ import gradio as gr
2
+ import json
3
+ import time
4
+ import logging
5
+ import re
6
+ from typing import Dict, Any, List, Tuple
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+ import threading
9
+ from datetime import datetime
10
+ import os
11
+ import tempfile
12
+
13
+ # Hugging Face Transformers
14
+ import torch
15
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
16
+ import gc
17
+
18
+ # Setup logging
19
+ logging.basicConfig(
20
+ level=logging.INFO,
21
+ format='%(asctime)s - %(levelname)s - %(message)s'
22
+ )
23
+ logger = logging.getLogger(__name__)
24
+
25
+ class SyllabusFormatter:
26
+ def __init__(self, model_name="microsoft/Phi-3-mini-4k-instruct"):
27
+ """Initialize the formatter with Phi-3 model"""
28
+ self.model_name = model_name
29
+ self.tokenizer = None
30
+ self.model = None
31
+ self.pipe = None
32
+ self.is_model_loaded = False
33
+ self.processing_lock = threading.Lock()
34
+
35
+ def load_model(self):
36
+ """Load the Phi-3 model with optimizations"""
37
+ if self.is_model_loaded:
38
+ return True
39
+
40
+ try:
41
+ logger.info(f"Loading model: {self.model_name}")
42
+
43
+ # Load tokenizer
44
+ self.tokenizer = AutoTokenizer.from_pretrained(
45
+ self.model_name,
46
+ trust_remote_code=True
47
+ )
48
+
49
+ # Load model with optimizations
50
+ self.model = AutoModelForCausalLM.from_pretrained(
51
+ self.model_name,
52
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
53
+ device_map="auto" if torch.cuda.is_available() else None,
54
+ trust_remote_code=True,
55
+ low_cpu_mem_usage=True
56
+ )
57
+
58
+ # Create pipeline
59
+ self.pipe = pipeline(
60
+ "text-generation",
61
+ model=self.model,
62
+ tokenizer=self.tokenizer,
63
+ device=0 if torch.cuda.is_available() else -1,
64
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
65
+ )
66
+
67
+ self.is_model_loaded = True
68
+ logger.info("Model loaded successfully!")
69
+ return True
70
+
71
+ except Exception as e:
72
+ logger.error(f"Error loading model: {str(e)}")
73
+ return False
74
+
75
+ def create_formatting_prompt(self, unit_content: str, unit_name: str, subject_name: str = "") -> str:
76
+ """Create a focused prompt for formatting syllabus content"""
77
+ prompt = f"""<|system|>You are a professional academic syllabus formatter. Your job is to take poorly formatted syllabus content and make it beautifully organized and readable.
78
+
79
+ RULES:
80
+ 1. PRESERVE every single word, topic, and concept from the original
81
+ 2. NEVER add explanations, examples, or new content
82
+ 3. ONLY restructure and format the existing text
83
+ 4. Use clear headings, bullet points, and logical grouping
84
+ 5. Separate different topics with proper spacing
85
+ 6. Make it scannable and easy to read
86
+
87
+ FORMAT STYLE:
88
+ - Use main topic headings with proper capitalization
89
+ - Group related subtopics under main topics
90
+ - Use bullet points (β€’) for lists of concepts
91
+ - Use sub-bullets (β—¦) for details under main bullets
92
+ - Separate major sections with line breaks
93
+ - Keep technical terms exactly as written<|end|>
94
+
95
+ <|user|>Subject: {subject_name}
96
+ Unit: {unit_name}
97
+
98
+ Original content (poorly formatted):
99
+ {unit_content}
100
+
101
+ Task: Reformat this content to be beautifully organized and readable. Do NOT add any new information - only restructure what's already there.<|end|>
102
+
103
+ <|assistant|>"""
104
+ return prompt
105
+
106
+ def format_single_unit(self, unit_data: Tuple[str, str, str, str, str]) -> Tuple[str, str, str, str, str]:
107
+ """Format a single unit's content"""
108
+ branch, semester, subject, unit_name, unit_content = unit_data
109
+
110
+ try:
111
+ with self.processing_lock:
112
+ # Create prompt
113
+ prompt = self.create_formatting_prompt(unit_content, unit_name, subject)
114
+
115
+ # Generate formatted content
116
+ response = self.pipe(
117
+ prompt,
118
+ max_new_tokens=2048,
119
+ temperature=0.1,
120
+ do_sample=True,
121
+ top_p=0.9,
122
+ repetition_penalty=1.1,
123
+ pad_token_id=self.tokenizer.eos_token_id,
124
+ eos_token_id=self.tokenizer.eos_token_id
125
+ )
126
+
127
+ # Extract formatted content
128
+ generated_text = response[0]['generated_text']
129
+ assistant_start = generated_text.find("<|assistant|>")
130
+
131
+ if assistant_start != -1:
132
+ formatted_content = generated_text[assistant_start + len("<|assistant|>"):].strip()
133
+ else:
134
+ formatted_content = generated_text[len(prompt):].strip()
135
+
136
+ # Clean up the content
137
+ formatted_content = self.clean_generated_content(formatted_content)
138
+
139
+ # Validate content
140
+ if self.validate_formatted_content(unit_content, formatted_content):
141
+ return (branch, semester, subject, unit_name, formatted_content)
142
+ else:
143
+ logger.warning(f"Validation failed for {subject} - {unit_name}")
144
+ return (branch, semester, subject, unit_name, unit_content)
145
+
146
+ except Exception as e:
147
+ logger.error(f"Error formatting {subject} - {unit_name}: {str(e)}")
148
+ return (branch, semester, subject, unit_name, unit_content)
149
+
150
+ def clean_generated_content(self, content: str) -> str:
151
+ """Clean up generated content"""
152
+ # Remove special tokens
153
+ content = re.sub(r'<\|.*?\|>', '', content)
154
+
155
+ # Remove AI commentary
156
+ lines = content.split('\n')
157
+ cleaned_lines = []
158
+
159
+ for line in lines:
160
+ line = line.strip()
161
+ if (line.startswith("Here") and ("formatted" in line.lower() or "organized" in line.lower())) or \
162
+ line.startswith("I have") or line.startswith("The content has been") or \
163
+ line.startswith("Note:") or line.startswith("This formatted version"):
164
+ continue
165
+ if line:
166
+ cleaned_lines.append(line)
167
+
168
+ content = '\n'.join(cleaned_lines)
169
+
170
+ # Fix spacing
171
+ content = re.sub(r'\n\s*\n\s*\n+', '\n\n', content)
172
+ content = re.sub(r'\n([A-Z][^:\n]*:)\n', r'\n\n\1\n', content)
173
+
174
+ return content.strip()
175
+
176
+ def validate_formatted_content(self, original: str, formatted: str) -> bool:
177
+ """Validate that formatted content preserves important information"""
178
+ if len(formatted) < len(original) * 0.4:
179
+ return False
180
+
181
+ # Check for preservation of key terms
182
+ original_words = set(re.findall(r'\b[A-Z][a-z]*(?:[A-Z][a-z]*)*\b', original))
183
+ formatted_words = set(re.findall(r'\b[A-Z][a-z]*(?:[A-Z][a-z]*)*\b', formatted))
184
+
185
+ missing_terms = original_words - formatted_words
186
+ if len(missing_terms) > len(original_words) * 0.3:
187
+ return False
188
+
189
+ return True
190
+
191
+ def extract_units_for_processing(self, syllabus_data: Dict[str, Any]) -> List[Tuple[str, str, str, str, str]]:
192
+ """Extract all units for concurrent processing"""
193
+ units = []
194
+
195
+ for branch_name, branch_data in syllabus_data.get("syllabus", {}).items():
196
+ if not isinstance(branch_data, dict):
197
+ continue
198
+
199
+ for sem_name, sem_data in branch_data.items():
200
+ if not isinstance(sem_data, dict):
201
+ continue
202
+
203
+ for subject_name, subject_data in sem_data.items():
204
+ if not isinstance(subject_data, dict) or "content" not in subject_data:
205
+ continue
206
+
207
+ content = subject_data["content"]
208
+ if not isinstance(content, dict):
209
+ continue
210
+
211
+ for unit_name, unit_content in content.items():
212
+ if unit_name.startswith("Unit") and isinstance(unit_content, str):
213
+ units.append((branch_name, sem_name, subject_name, unit_name, unit_content))
214
+
215
+ return units
216
+
217
+ def format_syllabus_concurrent(self, syllabus_data: Dict[str, Any], progress_callback=None, max_workers=4) -> Dict[str, Any]:
218
+ """Format syllabus using concurrent processing"""
219
+ if not self.is_model_loaded:
220
+ if not self.load_model():
221
+ raise Exception("Failed to load model")
222
+
223
+ # Extract units for processing
224
+ units = self.extract_units_for_processing(syllabus_data)
225
+ total_units = len(units)
226
+
227
+ logger.info(f"Processing {total_units} units with {max_workers} workers")
228
+
229
+ # Process units concurrently
230
+ processed_units = {}
231
+ completed_count = 0
232
+
233
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
234
+ # Submit all tasks
235
+ future_to_unit = {executor.submit(self.format_single_unit, unit): unit for unit in units}
236
+
237
+ # Process completed tasks
238
+ for future in as_completed(future_to_unit):
239
+ try:
240
+ branch, semester, subject, unit_name, formatted_content = future.result()
241
+
242
+ # Store the result
243
+ key = f"{branch}|{semester}|{subject}|{unit_name}"
244
+ processed_units[key] = formatted_content
245
+
246
+ completed_count += 1
247
+ progress = (completed_count / total_units) * 100
248
+
249
+ if progress_callback:
250
+ progress_callback(progress, f"Processed {subject} - {unit_name}")
251
+
252
+ logger.info(f"Completed {completed_count}/{total_units} ({progress:.1f}%)")
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error processing unit: {str(e)}")
256
+
257
+ # Update the syllabus data with formatted content
258
+ for branch_name, branch_data in syllabus_data.get("syllabus", {}).items():
259
+ if not isinstance(branch_data, dict):
260
+ continue
261
+
262
+ for sem_name, sem_data in branch_data.items():
263
+ if not isinstance(sem_data, dict):
264
+ continue
265
+
266
+ for subject_name, subject_data in sem_data.items():
267
+ if not isinstance(subject_data, dict) or "content" not in subject_data:
268
+ continue
269
+
270
+ content = subject_data["content"]
271
+ if not isinstance(content, dict):
272
+ continue
273
+
274
+ for unit_name in content.keys():
275
+ if unit_name.startswith("Unit"):
276
+ key = f"{branch_name}|{sem_name}|{subject_name}|{unit_name}"
277
+ if key in processed_units:
278
+ syllabus_data["syllabus"][branch_name][sem_name][subject_name]["content"][unit_name] = processed_units[key]
279
+
280
+ # Add metadata
281
+ if "metadata" not in syllabus_data:
282
+ syllabus_data["metadata"] = {}
283
+
284
+ syllabus_data["metadata"]["lastFormatted"] = datetime.now().isoformat()
285
+ syllabus_data["metadata"]["formattingNote"] = "Content formatted using Phi-3 AI for enhanced readability"
286
+ syllabus_data["metadata"]["originalContentPreserved"] = True
287
+ syllabus_data["metadata"]["unitsProcessed"] = completed_count
288
+ syllabus_data["metadata"]["formattingModel"] = self.model_name
289
+ syllabus_data["metadata"]["version"] = "2.0"
290
+ syllabus_data["metadata"]["processedConcurrently"] = True
291
+ syllabus_data["metadata"]["maxWorkers"] = max_workers
292
+
293
+ return syllabus_data
294
+
295
+ # Global formatter instance
296
+ formatter = SyllabusFormatter()
297
+
298
+ def format_syllabus_file(file_path, max_workers=4, progress=gr.Progress()):
299
+ """Main function to format syllabus file"""
300
+ try:
301
+ # Load JSON file
302
+ with open(file_path, 'r', encoding='utf-8') as f:
303
+ syllabus_data = json.load(f)
304
+
305
+ # Count units
306
+ units = formatter.extract_units_for_processing(syllabus_data)
307
+ total_units = len(units)
308
+
309
+ progress(0, f"Found {total_units} units to process")
310
+
311
+ # Progress callback
312
+ def update_progress(percent, message):
313
+ progress(percent/100, message)
314
+
315
+ # Format the syllabus
316
+ formatted_data = formatter.format_syllabus_concurrent(
317
+ syllabus_data,
318
+ progress_callback=update_progress,
319
+ max_workers=max_workers
320
+ )
321
+
322
+ # Save to temporary file
323
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f:
324
+ json.dump(formatted_data, f, indent=2, ensure_ascii=False)
325
+ temp_path = f.name
326
+
327
+ progress(1.0, f"Completed! Processed {total_units} units")
328
+
329
+ return temp_path, f"βœ… Successfully formatted {total_units} units!"
330
+
331
+ except Exception as e:
332
+ error_msg = f"❌ Error: {str(e)}"
333
+ logger.error(error_msg)
334
+ return None, error_msg
335
+
336
+ def create_sample_json():
337
+ """Create a sample JSON file for testing"""
338
+ sample_data = {
339
+ "metadata": {
340
+ "totalFiles": 1,
341
+ "generatedAt": datetime.now().isoformat(),
342
+ "source": "Sample syllabus for testing",
343
+ "description": "Sample syllabus content"
344
+ },
345
+ "syllabus": {
346
+ "CSE": {
347
+ "SEM1": {
348
+ "Mathematics": {
349
+ "extractedFrom": {
350
+ "path": "CSE > SEM1 > Mathematics",
351
+ "branch": "CSE",
352
+ "semester": "SEM1",
353
+ "subject": "Mathematics"
354
+ },
355
+ "content": {
356
+ "Unit I": "Differential Calculus: Limits, continuity, derivatives, applications of derivatives, maxima and minima, curve sketching, related rates, optimization problems, L'Hospital's rule, Taylor series, Partial derivatives, total differential, chain rule, implicit differentiation, Jacobians.",
357
+ "Unit II": "Integral Calculus: Integration techniques, definite integrals, applications of integrals, area under curves, volume of solids, arc length, surface area, Multiple integrals, double integrals, triple integrals, change of variables, applications in geometry and physics."
358
+ }
359
+ }
360
+ }
361
+ }
362
+ }
363
+ }
364
+
365
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f:
366
+ json.dump(sample_data, f, indent=2, ensure_ascii=False)
367
+ return f.name
368
+
369
+ # Gradio Interface
370
+ def create_interface():
371
+ with gr.Blocks(
372
+ title="Syllabus Formatter - AI-Powered JSON Syllabus Formatter",
373
+ theme=gr.themes.Soft(
374
+ primary_hue="blue",
375
+ secondary_hue="purple",
376
+ neutral_hue="gray"
377
+ )
378
+ ) as interface:
379
+
380
+ gr.HTML("""
381
+ <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 20px;">
382
+ <h1 style="font-size: 2.5em; margin-bottom: 10px;">πŸŽ“ Syllabus Formatter</h1>
383
+ <p style="font-size: 1.2em; opacity: 0.9;">AI-Powered JSON Syllabus Content Formatter using Phi-3</p>
384
+ <p style="font-size: 1em; opacity: 0.8;">Upload your JSON syllabus file and get beautifully formatted content with concurrent processing for speed!</p>
385
+ </div>
386
+ """)
387
+
388
+ with gr.Row():
389
+ with gr.Column(scale=1):
390
+ gr.HTML("""
391
+ <div style="background: #f8f9fa; padding: 15px; border-radius: 8px; margin-bottom: 15px;">
392
+ <h3>πŸ“‹ Instructions:</h3>
393
+ <ol>
394
+ <li>Upload your JSON syllabus file</li>
395
+ <li>Choose number of concurrent workers (1-8)</li>
396
+ <li>Click "Format Syllabus" to start processing</li>
397
+ <li>Download the formatted JSON file</li>
398
+ </ol>
399
+ <p><strong>Note:</strong> Only syllabus content will be formatted, metadata remains unchanged.</p>
400
+ </div>
401
+ """)
402
+
403
+ file_input = gr.File(
404
+ label="πŸ“ Upload JSON Syllabus File",
405
+ file_types=[".json"],
406
+ type="filepath"
407
+ )
408
+
409
+ workers_slider = gr.Slider(
410
+ minimum=1,
411
+ maximum=8,
412
+ value=4,
413
+ step=1,
414
+ label="πŸ”„ Concurrent Workers",
415
+ info="More workers = faster processing (but more memory usage)"
416
+ )
417
+
418
+ format_btn = gr.Button(
419
+ "πŸš€ Format Syllabus",
420
+ variant="primary",
421
+ size="lg"
422
+ )
423
+
424
+ sample_btn = gr.Button(
425
+ "πŸ“ Download Sample JSON",
426
+ variant="secondary"
427
+ )
428
+
429
+ with gr.Column(scale=1):
430
+ status_output = gr.Textbox(
431
+ label="πŸ“Š Status",
432
+ lines=3,
433
+ interactive=False
434
+ )
435
+
436
+ download_output = gr.File(
437
+ label="πŸ“₯ Download Formatted JSON",
438
+ visible=False
439
+ )
440
+
441
+ gr.HTML("""
442
+ <div style="background: #e3f2fd; padding: 15px; border-radius: 8px; margin-top: 15px;">
443
+ <h3>✨ Features:</h3>
444
+ <ul>
445
+ <li>πŸ€– Powered by Microsoft Phi-3 AI model</li>
446
+ <li>⚑ Concurrent processing for speed</li>
447
+ <li>πŸ”’ Preserves all original content</li>
448
+ <li>πŸ“Š Real-time progress tracking</li>
449
+ <li>🎯 Formats only syllabus content, not metadata</li>
450
+ <li>βœ… Validation to ensure content integrity</li>
451
+ </ul>
452
+ </div>
453
+ """)
454
+
455
+ # Event handlers
456
+ def format_handler(file_path, max_workers):
457
+ if file_path is None:
458
+ return "❌ Please upload a JSON file first.", gr.update(visible=False)
459
+
460
+ try:
461
+ result_path, message = format_syllabus_file(file_path, int(max_workers))
462
+ if result_path:
463
+ return message, gr.update(visible=True, value=result_path)
464
+ else:
465
+ return message, gr.update(visible=False)
466
+ except Exception as e:
467
+ return f"❌ Error: {str(e)}", gr.update(visible=False)
468
+
469
+ def sample_handler():
470
+ sample_path = create_sample_json()
471
+ return gr.update(visible=True, value=sample_path)
472
+
473
+ format_btn.click(
474
+ format_handler,
475
+ inputs=[file_input, workers_slider],
476
+ outputs=[status_output, download_output]
477
+ )
478
+
479
+ sample_btn.click(
480
+ sample_handler,
481
+ outputs=[gr.File(label="πŸ“₯ Sample JSON File", visible=True)]
482
+ )
483
+
484
+ gr.HTML("""
485
+ <div style="text-align: center; padding: 15px; margin-top: 20px; border-top: 1px solid #ddd;">
486
+ <p style="color: #666;">
487
+ Built with ❀️ using Hugging Face Spaces |
488
+ Powered by Microsoft Phi-3 |
489
+ Optimized for concurrent processing
490
+ </p>
491
+ </div>
492
+ """)
493
+
494
+ return interface
495
+
496
+ # Launch the app
497
+ if __name__ == "__main__":
498
+ interface = create_interface()
499
+ interface.launch(
500
+ server_name="0.0.0.0",
501
+ server_port=7860,
502
+ share=True
503
  )