churnsight-ai / model.py
Hasitha16's picture
Update model.py
a6cc94f verified
raw
history blame
1.9 kB
import os
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf-cache"
os.environ["HF_HOME"] = "/tmp/hf-home"
import nltk
nltk.download("punkt", download_dir="/tmp/nltk_data")
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from nltk.tokenize import sent_tokenize
from transformers import pipeline
import numpy as np
# Load summarizer model
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
def summarize_review(text):
"""Standard transformer-based summarization"""
return summarizer(text, max_length=60, min_length=10, do_sample=False)[0]["summary_text"]
def smart_summarize(text, n_clusters=1):
"""
Clustering + cosine similarity-based summarization
Selects most representative sentence(s) from each cluster
"""
sentences = sent_tokenize(text)
if len(sentences) <= 1:
return text
tfidf = TfidfVectorizer(stop_words="english")
tfidf_matrix = tfidf.fit_transform(sentences)
if len(sentences) <= n_clusters:
return " ".join(sentences)
kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(tfidf_matrix)
summary_sentences = []
for i in range(n_clusters):
idx = np.where(kmeans.labels_ == i)[0]
if not len(idx):
continue
# Average vector from cluster, converted to ndarray
avg_vector = np.asarray(tfidf_matrix[idx].mean(axis=0))
# Similarity computation (ensure both inputs are arrays)
sim = cosine_similarity(avg_vector, tfidf_matrix[idx].toarray())
# Select sentence with highest similarity to avg
most_representative = sentences[idx[np.argmax(sim)]]
summary_sentences.append(most_representative)
# Preserve original sentence order
return " ".join(sorted(summary_sentences, key=sentences.index))