File size: 4,277 Bytes
716037e
 
 
 
71754ec
716037e
 
 
 
 
 
 
 
 
 
 
 
 
71754ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
716037e
71754ec
 
 
 
 
716037e
71754ec
 
 
 
 
 
 
 
 
 
 
 
716037e
71754ec
 
 
 
716037e
71754ec
716037e
 
71754ec
 
 
 
 
 
716037e
 
71754ec
 
716037e
71754ec
 
 
 
716037e
71754ec
 
 
 
 
 
 
 
 
 
 
 
716037e
 
 
 
71754ec
 
716037e
 
71754ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
716037e
71754ec
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
import logging
import sys
import os
import gc

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger(__name__)

# Log startup information
logger.info("Starting StudAI Summarization Service with Gradio")
logger.info(f"Python version: {sys.version}")

# Force garbage collection
gc.collect()

# Create a simple function for summarization that doesn't use ML in case model loading fails
def simple_summarize(text, max_length=150, min_length=30):
    """Simple extractive summarization as fallback"""
    import re
    sentences = re.split(r'(?<=[.!?])\s+', text)
    
    if len(sentences) <= 3:
        return text
        
    # Take first, middle and last sentences
    summary = [
        sentences[0],
        sentences[len(sentences) // 2],
        sentences[-1]
    ]
    return " ".join(summary)

# Set a flag for model availability
model_available = False

# Try to import and load the model with memory optimizations
try:
    # Import and load only when needed
    from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
    import torch
    
    logger.info("Loading small model for summarization...")
    
    # Use a tiny model instead of t5-small
    model_name = "facebook/bart-large-cnn"
    
    # Enable memory optimization
    if torch.cuda.is_available():
        logger.info("CUDA available, using GPU")
        device = 0
    else:
        logger.info("CUDA not available, using CPU")
        device = -1
    
    # Enable memory-efficient loading
    summarizer = pipeline(
        "summarization",
        model=model_name,
        device=device,
        framework="pt"
    )
    
    logger.info("Model loaded successfully!")
    model_available = True
    
    # Force garbage collection after model loading
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        
except Exception as e:
    logger.error(f"Failed to load model: {str(e)}")
    logger.info("Will use simple extractive summarization instead")

def summarize_text(text, max_length=150, min_length=30):
    """Summarize the provided text"""
    if not text or len(text.strip()) < 50:
        return text
    
    try:
        if model_available:
            logger.info(f"Summarizing text of length {len(text)} with model")
            result = summarizer(
                text, 
                max_length=max_length, 
                min_length=min_length,
                truncation=True
            )
            summary = result[0]["summary_text"]
        else:
            logger.info(f"Using simple summarization for text of length {len(text)}")
            summary = simple_summarize(text, max_length, min_length)
            
        return summary
    except Exception as e:
        logger.error(f"Error during summarization: {str(e)}")
        # Fall back to simple summarization on error
        return simple_summarize(text, max_length, min_length)

# Create Gradio interface
demo = gr.Interface(
    fn=summarize_text,
    inputs=[
        gr.Textbox(
            lines=10, 
            label="Text to Summarize",
            placeholder="Enter text to summarize (at least 50 characters)"
        ),
        gr.Slider(50, 500, value=150, label="Max Length"),
        gr.Slider(10, 200, value=30, label="Min Length")
    ],
    outputs=gr.Textbox(label="Summary"),
    title="StudAI Text Summarization",
    description="This service provides text summarization for the StudAI Android app.",
    examples=[
        ["The coronavirus pandemic has led to a surge in remote work. Companies around the world have had to adapt to new ways of working, with many employees setting up home offices. This shift has led to changes in productivity, work-life balance, and communication patterns. Some studies suggest that remote work can increase productivity, while others point to challenges in collaboration and team cohesion. Organizations are now considering hybrid models for the future of work.", 150, 30]
    ],
    allow_flagging="never"
)

# Launch with parameters optimized for Spaces
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)