File size: 8,954 Bytes
065339b
b76c831
 
64dcaa8
b76c831
aa62c4f
065339b
b8a3699
 
aa62c4f
 
b76c831
aa62c4f
 
 
 
 
 
 
64dcaa8
 
aa62c4f
 
64dcaa8
aa62c4f
 
64dcaa8
aa62c4f
64dcaa8
 
 
aa62c4f
64dcaa8
aa62c4f
 
 
 
 
 
 
 
64dcaa8
aa62c4f
 
64dcaa8
 
 
aa62c4f
 
64dcaa8
 
 
aa62c4f
64dcaa8
 
 
b76c831
64dcaa8
aa62c4f
 
64dcaa8
4821f3c
8824874
64dcaa8
aa62c4f
 
64dcaa8
aa62c4f
 
 
64dcaa8
 
aa62c4f
 
 
 
 
64dcaa8
aa62c4f
64dcaa8
aa62c4f
 
 
64dcaa8
aa62c4f
 
 
 
64dcaa8
aa62c4f
 
64dcaa8
aa62c4f
64dcaa8
aa62c4f
 
 
 
 
 
64dcaa8
aa62c4f
 
64dcaa8
aa62c4f
64dcaa8
aa62c4f
64dcaa8
aa62c4f
 
64dcaa8
 
 
aa62c4f
 
 
64dcaa8
 
aa62c4f
64dcaa8
aa62c4f
64dcaa8
aa62c4f
 
b8a3699
64dcaa8
 
b76c831
 
41c9f53
b76c831
41c9f53
 
 
 
 
 
aa62c4f
b76c831
aa62c4f
 
b76c831
64dcaa8
b76c831
 
 
41c9f53
76a7157
b76c831
 
 
 
aa62c4f
 
 
b76c831
b8a3699
 
41c9f53
794e838
b76c831
64dcaa8
065339b
794e838
64dcaa8
b76c831
64dcaa8
 
 
b8a3699
64dcaa8
 
 
 
 
 
2ab9f5f
64dcaa8
 
794e838
64dcaa8
794e838
64dcaa8
 
 
 
3c6afdf
794e838
b8a3699
 
2ab9f5f
794e838
d97628c
d0bd726
64dcaa8
2ab9f5f
794e838
 
065339b
aa62c4f
64dcaa8
b8a3699
64dcaa8
 
 
 
 
 
 
 
 
 
 
065339b
794e838
64dcaa8
794e838
 
64dcaa8
794e838
 
 
 
b8a3699
 
065339b
64dcaa8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import gradio as gr
import requests
import time
import re
from duckduckgo_search import DDGS
from bs4 import BeautifulSoup

# === Model functions ===

def get_full_article(url):
    """Fetch full article content from URL"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        }

        response = requests.get(url, headers=headers, timeout=20, verify=True)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')

        for element in soup(['script', 'style', 'nav', 'header', 'footer', 'aside', 'ads', 'noscript', 'form']):
            element.decompose()

        article_selectors = [
            'article', '.article-content', '.post-content', '.story-body', '.story-content',
            '.entry-content', '.content-body', '.article-body', 'main article', 'main .content', 'main',
            '[role="main"]', '.main-content', '.page-content', '.text', '.article-text'
        ]

        for selector in article_selectors:
            content = soup.select_one(selector)
            if content:
                paragraphs = content.find_all(['p', 'div'], string=True)
                if paragraphs:
                    text_parts = []
                    for p in paragraphs:
                        text = p.get_text(strip=True)
                        if len(text) > 30:
                            text_parts.append(text)
                    full_text = '\n\n'.join(text_parts)
                    if len(full_text) > 300:
                        return full_text[:10000]

        body_text = soup.get_text(separator='\n\n', strip=True)
        body_text = re.sub(r'\n{3,}', '\n\n', body_text)

        return body_text[:10000] if len(body_text) > 300 else "[INFO] Could not extract substantial content"

    except requests.exceptions.Timeout:
        return "[WARNING] Article fetch timeout - using snippet instead"
    except requests.exceptions.RequestException:
        return "[ERROR] Could not fetch article: Network error"
    except Exception as e:
        return f"[ERROR] Could not fetch article: {str(e)}"


def search_articles(name: str, max_articles: int = 2) -> str:
    keywords = ['owners', 'partners', 'stockholders']
    search_query = f'"{name}" ({" AND ".join(keywords)}) site:news'

    max_retries = 3
    base_delay = 3

    for attempt in range(max_retries):
        try:
            print(f"Search attempt {attempt + 1}: {search_query}")
            time.sleep(base_delay * (attempt + 1))

            configs = [
                {'timeout': 20, 'region': 'us-en', 'safesearch': 'moderate'},
                {'timeout': 25, 'region': 'wt-wt', 'safesearch': 'off'},
                {'timeout': 30, 'region': None, 'safesearch': 'moderate'}
            ]

            config = configs[min(attempt, len(configs)-1)]

            with DDGS(timeout=config['timeout']) as ddgs:
                search_params = {
                    'keywords': search_query,
                    'max_results': max_articles,
                    'safesearch': config['safesearch']
                }
                if config['region']:
                    search_params['region'] = config['region']

                results = list(ddgs.text(**search_params))
                print(f"Found {len(results)} results on attempt {attempt + 1}")

            if not results:
                continue

            articles = []
            for i, result in enumerate(results, 1):
                url = result.get('href', 'No URL')
                title = result.get('title', 'No Title')
                snippet = result.get('body', 'No snippet available')

                if i > 1:
                    time.sleep(2)

                full_text = get_full_article(url)
                if any(error in full_text for error in ["[ERROR]", "timeout", "Network error"]):
                    print(f"Using snippet fallback for article {i}")
                    content = f"[SNIPPET ONLY]\n{snippet}"
                else:
                    content = full_text

                article = f"### {i}. {title}\n"
                article += f"[Source]({url})\n\n"
                article += f"{content}\n"
                articles.append(article)

            return "\n---\n".join(articles)

        except Exception as e:
            print(f"Attempt {attempt + 1} failed: {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(base_delay * (attempt + 2))
            else:
                return f"[ERROR] Search failed after {max_retries} attempts. Last error: {str(e)}"

    return f"[INFO] No articles found for {name}"


def extract_entities(search_results: str) -> str:
    """Extract entities using Mistral 7B endpoint"""
    modal_endpoint = "https://msoaresdiego--mistral-llm-endpoint-fastapi-app.modal.run/generate"

    # Truncate input to avoid excessive model load
    MAX_CHARS = 8000
    if len(search_results) > MAX_CHARS:
        search_results = search_results[:MAX_CHARS]

    prompt = f"""Extract all person names and organization names from the following text. Do not extract products and service names. Only individuals and organizations. Bring the full details of the name in the newspaper article. For example, if only ACME is mentioned as company name, bring only ACME. IF ACME Inc is mentioned as company name, then you have to extract ACME Inc. In addition, define the relationship between the entity and the company that is being searched. For example, is ACME Inc an owner of the company being searched? Then write 'owner'. Is ACME Inc. a funder of the company being searched? Then write 'funder' 
Format as:
PERSON: [name] - [relationship]
ORG: [organization name] - [relationship]
Text: {search_results}"""

    try:
        response = requests.post(
            modal_endpoint,
            json={"prompt": prompt, "max_tokens": 1000, "temperature": 0.15},
            timeout=1000  # Increased timeout
        )
        if response.status_code == 200:
            return response.json().get("response", "No entities extracted")
        else:
            return f"[ERROR] API Error: {response.status_code} - {response.text}"
    except requests.exceptions.Timeout:
        return "[ERROR] Entity extraction timeout - please try again"
    except Exception as e:
        return f"[ERROR] Extraction failed: {str(e)}"


# === Gradio interface functions ===

def search_only(name: str, article_count: int):
    if not name.strip():
        return "No name provided", ""

    try:
        start = time.time()
        articles_output = search_articles(name.strip(), max_articles=article_count)
        elapsed = time.time() - start

        results = f"βœ… Search completed for **{name}** in {elapsed:.1f}s\n\n"
        results += articles_output

        return results, articles_output
    except Exception as e:
        return f"[ERROR] Search failed: {str(e)}", ""

def extract_only(stored_results: str):
    if not stored_results.strip():
        return "No search results available. Please search first."

    try:
        start = time.time()
        entities = extract_entities(stored_results)
        elapsed = time.time() - start
        return f"βœ… Extraction completed in {elapsed:.1f}s\n\n{entities}"
    except Exception as e:
        return f"[ERROR] Extraction failed: {str(e)}"

# === Gradio UI ===

with gr.Blocks(title="Related Entities Finder") as demo:
    gr.Markdown("# πŸ”Ž Related Entities Finder")
    gr.Markdown("Enter a business or project name to search for related articles and extract key entities.")
    gr.Markdown("*Note: Full article extraction may take 30–60 seconds. Snippets will be used as fallback if needed.*")

    search_state = gr.State("")

    with gr.Row():
        name_input = gr.Textbox(label="Company/Project Name", placeholder="Enter business or project name")
        article_count_slider = gr.Slider(1, 10, value=2, step=1, label="Number of Articles")
        with gr.Column():
            search_btn = gr.Button("πŸ” Search Articles", variant="primary")
            extract_btn = gr.Button("πŸ“‹ Extract Entities", variant="secondary")

    output1 = gr.Markdown(label="Search Results")
    output2 = gr.Textbox(
        label="Extracted Entities and Relationships", 
        lines=10, 
        max_lines=20,
        show_copy_button=True
    )

    search_btn.click(
        fn=search_only,
        inputs=[name_input, article_count_slider],
        outputs=[output1, search_state]
    )

    extract_btn.click(
        fn=extract_only,
        inputs=[search_state],
        outputs=[output2]
    )

if __name__ == "__main__":
    demo.launch()