File size: 925 Bytes
37fc8cc
 
 
 
 
 
 
 
 
24f12b3
 
 
 
 
db3725f
 
 
 
 
24f12b3
fe58823
72c6f28
 
 
 
 
 
 
 
 
 
24f12b3
 
72c6f28
24f12b3
72c6f28
24f12b3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 16:16:37 2021

@author: PC
"""

import streamlit as st

from transformers import PegasusForConditionalGeneration, PegasusTokenizer
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch  


device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_name_xsum = 'google/pegasus-xsum'
    
tokenizer = AutoTokenizer.from_pretrained(model_name_xsum)
model = PegasusForConditionalGeneration.from_pretrained(model_name_xsum).to(device)

@st.cache
def pegasus_abs_summarize(src_text):
        
    
    batch = tokenizer(src_text, truncation=True, padding='longest', return_tensors="pt").to(device)
    
    translated = model.generate(**batch)
    
    target_text = tokenizer.batch_decode(translated, skip_special_tokens =True)
    
    return target_text


st.text_input("Input:", key="input")

abs_output = pegasus_abs_summarize(st.session_state.input)