DevBhojani commited on
Commit
c19fef6
·
verified ·
1 Parent(s): 6b6c19e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +180 -0
app.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer
3
+ import re
4
+ import contractions
5
+ import joblib
6
+ from sklearn.feature_extraction.text import TfidfVectorizer
7
+ from huggingface_hub import hf_hub_download
8
+
9
+ repo_id = "DevBhojani/Classification-SamsumDataset"
10
+ model_filename = "random_forest_classifier_model.joblib"
11
+
12
+ model_path = hf_hub_download(repo_id=repo_id, filename=model_filename)
13
+ loaded_classifier_model = joblib.load(model_path)
14
+
15
+
16
+ vectorizer_filename = "tfidf_vectorizer.joblib"
17
+
18
+ vectorizer_path = hf_hub_download(repo_id=repo_id, filename=vectorizer_filename)
19
+ loaded_tfidf_vectorizer = joblib.load(vectorizer_path)
20
+
21
+ def remove_html_tags(text):
22
+ pattern = r'[^a-zA-Z0-9\s]'
23
+ text = re.sub(pattern, '', str(text))
24
+ return text
25
+
26
+ def remove_url(text):
27
+ pattern = re.compile(r'https?://\S+|www\.\S+')
28
+ return pattern.sub(r'', str(text))
29
+
30
+ def remove_emojis(text):
31
+ emoji_pattern = re.compile(
32
+ "["
33
+ u"\U0001F600-\U0001F64F" # emoticons
34
+ u"\U0001F300-\U0001F5FF" # symbols & pictographs
35
+ u"\U0001F680-\U0001F6FF" # transport & map symbols
36
+ u"\U0001F1E0-\U0001F1FF" # flags
37
+ u"\U00002700-\U000027BF" # miscellaneous symbols
38
+ u"\U0001F900-\U0001F9FF" # supplemental symbols
39
+ u"\U00002600-\U000026FF" # weather & other symbols
40
+ u"\U0001FA70-\U0001FAFF" # extended symbols
41
+ "]+",
42
+ flags=re.UNICODE
43
+ )
44
+ return emoji_pattern.sub(r'', str(text))
45
+
46
+ def expand_contractions(text):
47
+ return contractions.fix(text)
48
+
49
+ def remove_special_and_numbers(text):
50
+ return re.sub(r'[^a-zA-Z\s]', '', str(text))
51
+
52
+ def clean_text(text):
53
+ text = remove_url(text)
54
+ text = remove_emojis(text)
55
+ text = expand_contractions(text)
56
+ text = text.lower()
57
+ return text
58
+
59
+ summarizer = pipeline("summarization", model="luisotorres/bart-finetuned-samsum")
60
+ # summarizer2 = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
61
+ tokenizer = AutoTokenizer.from_pretrained("luisotorres/bart-finetuned-samsum")
62
+
63
+ def split_into_chunks(conversation, n=15):
64
+ lines = conversation.strip().split('\n')
65
+ chunk_size = max(1, len(lines) // n)
66
+ return ['\n'.join(lines[i:i+chunk_size]) for i in range(0, len(lines), chunk_size)]
67
+
68
+ def truncate_chunk(text, max_tokens=1024):
69
+ tokens = tokenizer.encode(text, truncation=True, max_length=max_tokens)
70
+ return tokenizer.decode(tokens, skip_special_tokens=True)
71
+
72
+ def summarize_chunks(chunks, model):
73
+ summaries = []
74
+ for chunk in chunks:
75
+ chunk = chunk.strip()
76
+ if not chunk:
77
+ continue
78
+ try:
79
+ truncated_chunk = truncate_chunk(chunk)
80
+ summary = model(truncated_chunk, max_length=1024, min_length=20, do_sample=False)[0]['summary_text']
81
+ summaries.append(summary)
82
+ except Exception as e:
83
+ print(f"Error summarizing chunk: {e}")
84
+ return summaries
85
+
86
+ def combine_summaries(summaries):
87
+ return ' '.join(summaries)
88
+
89
+ def summarize_dialogue(conversation, model):
90
+ chunks = split_into_chunks(conversation, n=1)
91
+ summaries = summarize_chunks(chunks, model)
92
+ final_summary = combine_summaries(summaries)
93
+ return final_summary
94
+
95
+ def analyze_meeting_transcript(user_input):
96
+ if not user_input.strip():
97
+ return "Please enter some text to summarize.", ""
98
+
99
+ cleaned_input = clean_text(user_input)
100
+ summary1 = summarize_dialogue(cleaned_input, summarizer)
101
+
102
+ # Use the loaded vectorizer to transform the input
103
+ cleaned_input_vectorized = loaded_tfidf_vectorizer.transform([cleaned_input])
104
+
105
+ intent_classification = loaded_classifier_model.predict(cleaned_input_vectorized)[0]
106
+ # print(intent_classification)
107
+ # print(cleaned_input_vectorized)
108
+ # intent_classification = "Transactional Inquiry & Information Exchange"
109
+
110
+ # Format the intent classification output
111
+ formatted_intent = intent_classification.replace("__label__", "").replace("_", " ")
112
+
113
+
114
+ return summary1, formatted_intent
115
+
116
+ interface = gr.Interface(
117
+ fn=analyze_meeting_transcript,
118
+ inputs=gr.Textbox(label="Enter dialogue here", lines=12, placeholder="Paste your meeting transcript..."),
119
+ outputs=[
120
+ gr.Textbox(label="Summary (Luis Torres BART)"),
121
+ # gr.Textbox(label="Summary 2 (KN Karthick MEETING_SUMMARY)"),
122
+ gr.Textbox(label="Intent Classification") # Removed "Placeholder"
123
+ ],
124
+ title="Meeting Transcript Analyzer",
125
+ description="Summarizes meeting dialogues and classifies the intent.",
126
+ allow_flagging="never",
127
+ examples=[
128
+ [
129
+ '''
130
+ Amanda: guess what!
131
+ Chris: hey ;) ur pregnant!
132
+ Amanda: noo ;) but close enough! I'm so proud of myself! Remember I go to these dancing classes with Michael?
133
+ Chris: Yeah?
134
+ Amanda: So we went yesterday and the instructor needed a partner to show the steps we had so far
135
+ Chris: so there's only one guy teaching you? without a female partner?
136
+ Amanda: Well, this time he was alone, BUT THAT'S NOT THE POINT! Listen!
137
+ Chris: yeah, sorry :D tell me!
138
+ Amanda: So he needed a partner and noone really knew the steps like perfectly
139
+ Amanda: and obviously noone wanted to be mocked
140
+ Amanda: so I thought, aaaah :D
141
+ Chris: u volunteered? really? you??
142
+ Amanda: yeah!
143
+ Chris: whooa! that's so great! #therapy #worthit :D
144
+ Amanda: yeah i know :D maybe one day i'll actually stop being so shy
145
+ Chris: that's definitely the first step! :D congrats!
146
+ Amanda: tx ^_^
147
+ Chris: what dance was it?
148
+ Amanda: English waltz
149
+ Chris: isn't it, like, SO difficult?
150
+ Amanda: yeah it is! but everyone said I looked like a pro :D
151
+ Chris: Well done!!
152
+ '''
153
+ ],
154
+ ["I have some exciting news to share!"],
155
+ [
156
+ '''
157
+ Beryl: Hello guys! How are you doing? We've lost contact for a few months now. Hope you are well.
158
+ Anton: A happy hello to you Beryl! Great to hear from you. We are fine, thanks. And yourself?
159
+ Beryl: I'm very well indeed. Thank you. Any changes in your setup?
160
+ Anton: Not really. SOS. Same Old Soup ;) But we are happy for that.
161
+ Beryl: Are you still running your lovely airbnb?
162
+ Anton: Oh yes, we are. We had a few months off during summer, our summer, but now bookings start flowing in. Well... Are you planning to visit us? You two are always welcome!
163
+ Beryl: You caught me here. I'm vaguely considering going down to Onrus again, most likely in January. What does it look like with vacancies then?
164
+ Anton: Perfect! Just give me your dates and I'll keep it booked for you.
165
+ Beryl: Would you prefer me to do it via airbnb website or just like this directly with you?
166
+ Anton: I think it'll be more advantageous for both of us to do it directly. Do you know exactly when you'll be coming?
167
+ Beryl: Not so much. Can I get back to you in 2, 3 days' time?
168
+ Anton: ASAP really. As I say we've been receiving bookings daily now.
169
+ Beryl: Well, no big deal. I'll be staying in Cape Town for a longer time and am quite flexible in my dates.
170
+ Anton: Will you be coming with Tino, if I may ask?
171
+ Beryl: No. I am single again. Hurray! So pls make it single occupancy any week in January, Anton.
172
+ Anton: Great! 4th till 12th?
173
+ Beryl: Very good. I'll call you beforehand from Cape Town. Greetings to you both!
174
+ Anton: Take care!'''
175
+ ],
176
+ ]
177
+ )
178
+
179
+ if __name__ == "__main__":
180
+ interface.launch(debug=True, share=True)