Spaces:
Sleeping
Sleeping
File size: 6,849 Bytes
1d6eeba 402772f 1d6eeba 402772f 17495bf 402772f 1bda6d9 9b53571 402772f e6845d8 1bda6d9 1d6eeba e227091 17495bf 1bda6d9 17495bf e6845d8 cec851a e6845d8 cec851a e6845d8 17495bf 310b5ef 1bda6d9 9b53571 2ff90cd 1bda6d9 9b53571 2ff90cd 1bda6d9 9b53571 2ff90cd 1bda6d9 d06792d 17495bf 9b53571 e6845d8 9b53571 e6845d8 17495bf e6845d8 17495bf d06792d 17495bf e6845d8 17495bf e6845d8 9b53571 e6845d8 17495bf e6845d8 9b53571 e6845d8 17495bf 9b53571 17495bf 9b53571 17495bf 2e174b1 8ddae8d e6845d8 1d6eeba 402772f bec9c56 402772f 4698e24 1d6eeba e6845d8 1d6eeba 4698e24 1d6eeba 4698e24 1d6eeba 402772f bec9c56 402772f 4698e24 1d6eeba e6845d8 1d6eeba 17495bf 1d6eeba e6845d8 17495bf d06792d 17495bf 1d6eeba e227091 8ddae8d e227091 17495bf e227091 e6845d8 1bda6d9 e227091 2e174b1 e227091 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import gradio as gr
import openai
from langdetect import detect
from transformers import pipeline
from keybert import KeyBERT
from fpdf import FPDF
import os
import re
import unicodedata
# --- SETUP ---
openai.api_key = os.getenv("OPENAI_API_KEY") # Set in HF Space Secrets
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
kw_model = KeyBERT()
FONT_PATH = "DejaVuSans.ttf" # Must be uploaded to Space root!
BRANDS = [
"Apple", "Google", "Microsoft", "Amazon", "Coca-Cola", "Pepsi", "Samsung", "Nike", "ICICI",
"Meta", "Facebook", "Instagram", "YouTube", "Netflix", "Reliance", "Tata", "Airtel", "Jio",
"Motilal", "Wipro", "Paytm", "Zomato", "Swiggy", "OLA", "Uber"
]
def extract_brands(text):
found = [brand for brand in BRANDS if brand.lower() in text.lower()]
return found if found else ["None detected"]
def extract_topics(text, top_n=5):
keywords = kw_model.extract_keywords(text, top_n=top_n, stop_words='english')
topics = [kw for kw, score in keywords]
return topics if topics else ["None extracted"]
def make_bullets(summary):
sentences = summary.replace("\n", " ").split('. ')
bullets = [f"- {s.strip()}" for s in sentences if s.strip()]
return "\n".join(bullets)
def make_str(val):
try:
if val is None:
return ""
if isinstance(val, (bool, int, float)):
return str(val)
if isinstance(val, list):
return "\n".join([make_str(v) for v in val])
if isinstance(val, dict):
return str(val)
return str(val)
except Exception:
return ""
def very_safe_multicell(pdf, text, w=0, h=8, maxlen=50):
"""Force-break lines so no line/word exceeds maxlen chars, avoiding fpdf2 crash."""
if not isinstance(text, str):
text = str(text)
# Remove unprintable chars (e.g. control characters)
text = "".join(ch for ch in text if unicodedata.category(ch)[0] != "C")
# Step 1: break long words
def break_long_words(t):
lines = []
for paragraph in t.split('\n'):
for word in paragraph.split(' '):
while len(word) > maxlen:
lines.append(word[:maxlen])
word = word[maxlen:]
lines.append(word)
lines.append('')
return '\n'.join(lines)
text = break_long_words(text)
# Step 2: ensure no line is too long (wrap at maxlen)
wrapped = []
for line in text.splitlines():
while len(line) > maxlen:
wrapped.append(line[:maxlen])
line = line[maxlen:]
wrapped.append(line)
safe_text = '\n'.join(wrapped)
pdf.multi_cell(w, h, safe_text)
def create_pdf_report(language, transcript_en, brands, topics, key_takeaways):
pdf = FPDF()
pdf.set_auto_page_break(auto=True, margin=10)
pdf.set_margins(left=10, top=10, right=10)
pdf.add_font("DejaVu", style="", fname=FONT_PATH, uni=True)
pdf.add_font("DejaVu", style="B", fname=FONT_PATH, uni=True)
pdf.add_page()
pdf.set_font("DejaVu", "B", 16)
pdf.cell(0, 10, "Audio Transcript & Analysis Report", ln=True, align="C")
pdf.set_font("DejaVu", size=12)
pdf.ln(5)
pdf.cell(0, 10, f"Detected Language: {language}", ln=True)
pdf.ln(5)
pdf.set_font("DejaVu", "B", 12)
pdf.cell(0, 10, "English Transcript:", ln=True)
pdf.set_font("DejaVu", size=12)
very_safe_multicell(pdf, transcript_en or "", maxlen=50)
pdf.ln(3)
pdf.set_font("DejaVu", "B", 12)
pdf.cell(0, 10, "Brands Detected:", ln=True)
pdf.set_font("DejaVu", size=12)
very_safe_multicell(pdf, ", ".join(brands), maxlen=50)
pdf.set_font("DejaVu", "B", 12)
pdf.cell(0, 10, "Key Topics:", ln=True)
pdf.set_font("DejaVu", size=12)
very_safe_multicell(pdf, ", ".join(topics), maxlen=50)
pdf.set_font("DejaVu", "B", 12)
pdf.cell(0, 10, "Summary (Bulleted):", ln=True)
pdf.set_font("DejaVu", size=10)
for takeaway in key_takeaways.split('\n'):
very_safe_multicell(pdf, takeaway, maxlen=50)
pdf_file = "/tmp/analysis_report.pdf"
pdf.output(pdf_file)
return pdf_file
def process_audio(audio_path):
if not audio_path or not isinstance(audio_path, str):
return ("No audio file provided.", "", "", "", "", "", None)
try:
with open(audio_path, "rb") as audio_file:
transcript = openai.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
transcript = make_str(transcript).strip()
except Exception as e:
return (f"Error in transcription: {e}", "", "", "", "", "", None)
try:
detected_lang = detect(transcript)
lang_text = {'en': 'English', 'hi': 'Hindi', 'ta': 'Tamil'}.get(detected_lang, detected_lang)
except Exception:
lang_text = "unknown"
transcript_en = transcript
if detected_lang != "en":
try:
with open(audio_path, "rb") as audio_file:
transcript_en = openai.audio.translations.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
transcript_en = make_str(transcript_en).strip()
except Exception as e:
transcript_en = f"Error translating: {e}"
try:
summary_obj = summarizer(transcript_en, max_length=100, min_length=30, do_sample=False)
summary = summary_obj[0]["summary_text"] if isinstance(summary_obj, list) and "summary_text" in summary_obj[0] else make_str(summary_obj)
except Exception as e:
summary = f"Error summarizing: {e}"
brands = extract_brands(transcript_en)
topics = extract_topics(transcript_en)
key_takeaways = make_bullets(summary)
pdf_file = create_pdf_report(lang_text, transcript_en, brands, topics, key_takeaways)
return (
lang_text,
transcript,
transcript_en,
", ".join(brands),
", ".join(topics),
key_takeaways,
pdf_file
)
iface = gr.Interface(
fn=process_audio,
inputs=gr.Audio(type="filepath", label="Upload MP3/WAV Audio"),
outputs=[
gr.Textbox(label="Detected Language"),
gr.Textbox(label="Original Transcript"),
gr.Textbox(label="English Transcript (if translated)"),
gr.Textbox(label="Brands Detected"),
gr.Textbox(label="Key Topics"),
gr.Textbox(label="Bulleted Key Takeaways"),
gr.File(label="Download PDF Report")
],
title="Audio Transcript, Brand & Topic Analysis (OpenAI Whisper + Unicode PDF Download)",
description="Upload your audio file (MP3/WAV). Get transcript, summary, brand & topic detection, and download PDF. Unicode (Indian language/emoji) supported."
)
iface.launch()
|