File size: 1,316 Bytes
c06888f
 
cd098b8
80cb61e
494f3c9
b5ad127
 
 
494f3c9
 
b5ad127
c06888f
b5ad127
 
 
494f3c9
b5ad127
 
494f3c9
b5ad127
 
 
494f3c9
b5ad127
 
 
494f3c9
b5ad127
 
 
494f3c9
 
b5ad127
494f3c9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import os
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf-cache"
os.environ["HF_HOME"] = "/tmp/hf-home"
import nltk
nltk.download("punkt", download_dir="/tmp/nltk_data")
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from nltk.tokenize import sent_tokenize
from transformers import pipeline
import numpy as np

summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")

def summarize_review(text):
    return summarizer(text, max_length=60, min_length=10, do_sample=False)[0]["summary_text"]

def smart_summarize(text, n_clusters=1):
    sentences = sent_tokenize(text)
    if len(sentences) <= 1:
        return text

    tfidf_matrix = TfidfVectorizer(stop_words="english").fit_transform(sentences)
    if len(sentences) <= n_clusters:
        return " ".join(sentences)

    kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(tfidf_matrix)
    avg = []
    for i in range(n_clusters):
        idx = np.where(kmeans.labels_ == i)[0]
        if not len(idx): continue
        avg_vector = tfidf_matrix[idx].mean(axis=0)
        sim = cosine_similarity(avg_vector, tfidf_matrix[idx])
        avg.append(sentences[idx[np.argmax(sim)]])
    return " ".join(sorted(avg, key=sentences.index))