Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import hf_hub_download
|
2 |
+
import joblib
|
3 |
+
|
4 |
+
repo_id = "DevBhojani/Classification-SamsumDataset"
|
5 |
+
model_filename = "random_forest_classifier_model.joblib"
|
6 |
+
|
7 |
+
model_path = hf_hub_download(repo_id=repo_id, filename=model_filename)
|
8 |
+
loaded_classifier_model = joblib.load(model_path)
|
9 |
+
|
10 |
+
import joblib
|
11 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
12 |
+
|
13 |
+
repo_id = "DevBhojani/Classification-SamsumDataset"
|
14 |
+
model_filename = "random_forest_classifier_model.joblib"
|
15 |
+
vectorizer_filename = "tfidf_vectorizer.joblib"
|
16 |
+
|
17 |
+
model_path = hf_hub_download(repo_id=repo_id, filename=model_filename)
|
18 |
+
vectorizer_path = hf_hub_download(repo_id=repo_id, filename=vectorizer_filename)
|
19 |
+
|
20 |
+
loaded_classifier_model = joblib.load(model_path)
|
21 |
+
loaded_tfidf_vectorizer = joblib.load(vectorizer_path)
|
22 |
+
|
23 |
+
import gradio as gr
|
24 |
+
from transformers import pipeline, AutoTokenizer
|
25 |
+
import re
|
26 |
+
import contractions
|
27 |
+
# Assuming loaded_classifier_model and loaded_tfidf_vectorizer are already loaded from the previous cell
|
28 |
+
|
29 |
+
def remove_html_tags(text):
|
30 |
+
pattern = r'[^a-zA-Z0-9\s]'
|
31 |
+
text = re.sub(pattern, '', str(text))
|
32 |
+
return text
|
33 |
+
|
34 |
+
def remove_url(text):
|
35 |
+
pattern = re.compile(r'https?://\S+|www\.\S+')
|
36 |
+
return pattern.sub(r'', str(text))
|
37 |
+
|
38 |
+
def remove_emojis(text):
|
39 |
+
emoji_pattern = re.compile(
|
40 |
+
"["
|
41 |
+
u"\U0001F600-\U0001F64F" # emoticons
|
42 |
+
u"\U0001F300-\U0001F5FF" # symbols & pictographs
|
43 |
+
u"\U0001F680-\U0001F6FF" # transport & map symbols
|
44 |
+
u"\U0001F1E0-\U0001F1FF" # flags
|
45 |
+
u"\U00002700-\U000027BF" # miscellaneous symbols
|
46 |
+
u"\U0001F900-\U0001F9FF" # supplemental symbols
|
47 |
+
u"\U00002600-\U000026FF" # weather & other symbols
|
48 |
+
u"\U0001FA70-\U0001FAFF" # extended symbols
|
49 |
+
"]+",
|
50 |
+
flags=re.UNICODE
|
51 |
+
)
|
52 |
+
return emoji_pattern.sub(r'', str(text))
|
53 |
+
|
54 |
+
def expand_contractions(text):
|
55 |
+
return contractions.fix(text)
|
56 |
+
|
57 |
+
def remove_special_and_numbers(text):
|
58 |
+
return re.sub(r'[^a-zA-Z\s]', '', str(text))
|
59 |
+
|
60 |
+
def clean_text(text):
|
61 |
+
text = remove_url(text)
|
62 |
+
text = remove_emojis(text)
|
63 |
+
text = expand_contractions(text)
|
64 |
+
text = text.lower()
|
65 |
+
return text
|
66 |
+
|
67 |
+
summarizer = pipeline("summarization", model="luisotorres/bart-finetuned-samsum")
|
68 |
+
# summarizer2 = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
|
69 |
+
tokenizer = AutoTokenizer.from_pretrained("luisotorres/bart-finetuned-samsum")
|
70 |
+
|
71 |
+
def split_into_chunks(conversation, n=15):
|
72 |
+
lines = conversation.strip().split('\n')
|
73 |
+
chunk_size = max(1, len(lines) // n)
|
74 |
+
return ['\n'.join(lines[i:i+chunk_size]) for i in range(0, len(lines), chunk_size)]
|
75 |
+
|
76 |
+
def truncate_chunk(text, max_tokens=1024):
|
77 |
+
tokens = tokenizer.encode(text, truncation=True, max_length=max_tokens)
|
78 |
+
return tokenizer.decode(tokens, skip_special_tokens=True)
|
79 |
+
|
80 |
+
def summarize_chunks(chunks, model):
|
81 |
+
summaries = []
|
82 |
+
for chunk in chunks:
|
83 |
+
chunk = chunk.strip()
|
84 |
+
if not chunk:
|
85 |
+
continue
|
86 |
+
try:
|
87 |
+
truncated_chunk = truncate_chunk(chunk)
|
88 |
+
summary = model(truncated_chunk, max_length=1024, min_length=20, do_sample=False)[0]['summary_text']
|
89 |
+
summaries.append(summary)
|
90 |
+
except Exception as e:
|
91 |
+
print(f"Error summarizing chunk: {e}")
|
92 |
+
return summaries
|
93 |
+
|
94 |
+
def combine_summaries(summaries):
|
95 |
+
return ' '.join(summaries)
|
96 |
+
|
97 |
+
def summarize_dialogue(conversation, model):
|
98 |
+
chunks = split_into_chunks(conversation, n=1)
|
99 |
+
summaries = summarize_chunks(chunks, model)
|
100 |
+
final_summary = combine_summaries(summaries)
|
101 |
+
return final_summary
|
102 |
+
|
103 |
+
def analyze_meeting_transcript(user_input):
|
104 |
+
if not user_input.strip():
|
105 |
+
return "Please enter some text to summarize.", ""
|
106 |
+
|
107 |
+
cleaned_input = clean_text(user_input)
|
108 |
+
summary1 = summarize_dialogue(cleaned_input, summarizer)
|
109 |
+
|
110 |
+
# Use the loaded vectorizer to transform the input
|
111 |
+
cleaned_input_vectorized = loaded_tfidf_vectorizer.transform([cleaned_input])
|
112 |
+
|
113 |
+
intent_classification = loaded_classifier_model.predict(cleaned_input_vectorized)[0]
|
114 |
+
# print(intent_classification)
|
115 |
+
# print(cleaned_input_vectorized)
|
116 |
+
# intent_classification = "Transactional Inquiry & Information Exchange"
|
117 |
+
|
118 |
+
# Format the intent classification output
|
119 |
+
formatted_intent = intent_classification.replace("__label__", "").replace("_", " ")
|
120 |
+
|
121 |
+
|
122 |
+
return summary1, formatted_intent
|
123 |
+
|
124 |
+
interface = gr.Interface(
|
125 |
+
fn=analyze_meeting_transcript,
|
126 |
+
inputs=gr.Textbox(label="Enter dialogue here", lines=12, placeholder="Paste your meeting transcript..."),
|
127 |
+
outputs=[
|
128 |
+
gr.Textbox(label="Summary (Luis Torres BART)"),
|
129 |
+
# gr.Textbox(label="Summary 2 (KN Karthick MEETING_SUMMARY)"),
|
130 |
+
gr.Textbox(label="Intent Classification") # Removed "Placeholder"
|
131 |
+
],
|
132 |
+
title="Meeting Transcript Analyzer",
|
133 |
+
description="Summarizes meeting dialogues and classifies the intent.",
|
134 |
+
allow_flagging="never",
|
135 |
+
examples=[
|
136 |
+
[
|
137 |
+
'''
|
138 |
+
Amanda: guess what!
|
139 |
+
Chris: hey ;) ur pregnant!
|
140 |
+
Amanda: noo ;) but close enough! I'm so proud of myself! Remember I go to these dancing classes with Michael?
|
141 |
+
Chris: Yeah?
|
142 |
+
Amanda: So we went yesterday and the instructor needed a partner to show the steps we had so far
|
143 |
+
Chris: so there's only one guy teaching you? without a female partner?
|
144 |
+
Amanda: Well, this time he was alone, BUT THAT'S NOT THE POINT! Listen!
|
145 |
+
Chris: yeah, sorry :D tell me!
|
146 |
+
Amanda: So he needed a partner and noone really knew the steps like perfectly
|
147 |
+
Amanda: and obviously noone wanted to be mocked
|
148 |
+
Amanda: so I thought, aaaah :D
|
149 |
+
Chris: u volunteered? really? you??
|
150 |
+
Amanda: yeah!
|
151 |
+
Chris: whooa! that's so great! #therapy #worthit :D
|
152 |
+
Amanda: yeah i know :D maybe one day i'll actually stop being so shy
|
153 |
+
Chris: that's definitely the first step! :D congrats!
|
154 |
+
Amanda: tx ^_^
|
155 |
+
Chris: what dance was it?
|
156 |
+
Amanda: English waltz
|
157 |
+
Chris: isn't it, like, SO difficult?
|
158 |
+
Amanda: yeah it is! but everyone said I looked like a pro :D
|
159 |
+
Chris: Well done!!
|
160 |
+
'''
|
161 |
+
],
|
162 |
+
["I have some exciting news to share!"],
|
163 |
+
[
|
164 |
+
'''
|
165 |
+
Beryl: Hello guys! How are you doing? We've lost contact for a few months now. Hope you are well.
|
166 |
+
Anton: A happy hello to you Beryl! Great to hear from you. We are fine, thanks. And yourself?
|
167 |
+
Beryl: I'm very well indeed. Thank you. Any changes in your setup?
|
168 |
+
Anton: Not really. SOS. Same Old Soup ;) But we are happy for that.
|
169 |
+
Beryl: Are you still running your lovely airbnb?
|
170 |
+
Anton: Oh yes, we are. We had a few months off during summer, our summer, but now bookings start flowing in. Well... Are you planning to visit us? You two are always welcome!
|
171 |
+
Beryl: You caught me here. I'm vaguely considering going down to Onrus again, most likely in January. What does it look like with vacancies then?
|
172 |
+
Anton: Perfect! Just give me your dates and I'll keep it booked for you.
|
173 |
+
Beryl: Would you prefer me to do it via airbnb website or just like this directly with you?
|
174 |
+
Anton: I think it'll be more advantageous for both of us to do it directly. Do you know exactly when you'll be coming?
|
175 |
+
Beryl: Not so much. Can I get back to you in 2, 3 days' time?
|
176 |
+
Anton: ASAP really. As I say we've been receiving bookings daily now.
|
177 |
+
Beryl: Well, no big deal. I'll be staying in Cape Town for a longer time and am quite flexible in my dates.
|
178 |
+
Anton: Will you be coming with Tino, if I may ask?
|
179 |
+
Beryl: No. I am single again. Hurray! So pls make it single occupancy any week in January, Anton.
|
180 |
+
Anton: Great! 4th till 12th?
|
181 |
+
Beryl: Very good. I'll call you beforehand from Cape Town. Greetings to you both!
|
182 |
+
Anton: Take care!'''
|
183 |
+
],
|
184 |
+
]
|
185 |
+
)
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
interface.launch(debug=True, share=True)
|