Spaces:
Running
Running
Update model.py
Browse files
model.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
import os
|
2 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf-cache"
|
3 |
os.environ["HF_HOME"] = "/tmp/hf-home"
|
|
|
4 |
import nltk
|
5 |
nltk.download("punkt", download_dir="/tmp/nltk_data")
|
|
|
6 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
7 |
from sklearn.cluster import KMeans
|
8 |
from sklearn.metrics.pairwise import cosine_similarity
|
@@ -10,26 +12,42 @@ from nltk.tokenize import sent_tokenize
|
|
10 |
from transformers import pipeline
|
11 |
import numpy as np
|
12 |
|
|
|
13 |
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
|
14 |
|
15 |
def summarize_review(text):
|
|
|
16 |
return summarizer(text, max_length=60, min_length=10, do_sample=False)[0]["summary_text"]
|
17 |
|
18 |
def smart_summarize(text, n_clusters=1):
|
|
|
|
|
|
|
|
|
19 |
sentences = sent_tokenize(text)
|
20 |
if len(sentences) <= 1:
|
21 |
return text
|
22 |
|
23 |
-
|
|
|
|
|
24 |
if len(sentences) <= n_clusters:
|
25 |
return " ".join(sentences)
|
26 |
|
27 |
kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(tfidf_matrix)
|
28 |
-
|
|
|
29 |
for i in range(n_clusters):
|
30 |
idx = np.where(kmeans.labels_ == i)[0]
|
31 |
-
if not len(idx):
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf-cache"
|
3 |
os.environ["HF_HOME"] = "/tmp/hf-home"
|
4 |
+
|
5 |
import nltk
|
6 |
nltk.download("punkt", download_dir="/tmp/nltk_data")
|
7 |
+
|
8 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
9 |
from sklearn.cluster import KMeans
|
10 |
from sklearn.metrics.pairwise import cosine_similarity
|
|
|
12 |
from transformers import pipeline
|
13 |
import numpy as np
|
14 |
|
15 |
+
# Load summarizer model
|
16 |
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
|
17 |
|
18 |
def summarize_review(text):
|
19 |
+
"""Standard transformer-based summarization"""
|
20 |
return summarizer(text, max_length=60, min_length=10, do_sample=False)[0]["summary_text"]
|
21 |
|
22 |
def smart_summarize(text, n_clusters=1):
|
23 |
+
"""
|
24 |
+
Clustering + cosine similarity-based summarization
|
25 |
+
Selects most representative sentence(s) from each cluster
|
26 |
+
"""
|
27 |
sentences = sent_tokenize(text)
|
28 |
if len(sentences) <= 1:
|
29 |
return text
|
30 |
|
31 |
+
tfidf = TfidfVectorizer(stop_words="english")
|
32 |
+
tfidf_matrix = tfidf.fit_transform(sentences)
|
33 |
+
|
34 |
if len(sentences) <= n_clusters:
|
35 |
return " ".join(sentences)
|
36 |
|
37 |
kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(tfidf_matrix)
|
38 |
+
summary_sentences = []
|
39 |
+
|
40 |
for i in range(n_clusters):
|
41 |
idx = np.where(kmeans.labels_ == i)[0]
|
42 |
+
if not len(idx):
|
43 |
+
continue
|
44 |
+
# Average vector from cluster, converted to ndarray
|
45 |
+
avg_vector = np.asarray(tfidf_matrix[idx].mean(axis=0))
|
46 |
+
# Similarity computation (ensure both inputs are arrays)
|
47 |
+
sim = cosine_similarity(avg_vector, tfidf_matrix[idx].toarray())
|
48 |
+
# Select sentence with highest similarity to avg
|
49 |
+
most_representative = sentences[idx[np.argmax(sim)]]
|
50 |
+
summary_sentences.append(most_representative)
|
51 |
+
|
52 |
+
# Preserve original sentence order
|
53 |
+
return " ".join(sorted(summary_sentences, key=sentences.index))
|