Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,232 @@
|
|
1 |
-
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import requests
|
3 |
import time
|
4 |
import re
|
@@ -225,3 +453,4 @@ with gr.Blocks(title="Related Entities Finder") as demo:
|
|
225 |
|
226 |
if __name__ == "__main__":
|
227 |
demo.launch()
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
import re
|
5 |
+
from duckduckgo_search import DDGS
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
|
8 |
+
# === Model functions ===
|
9 |
+
|
10 |
+
def get_full_article(url):
|
11 |
+
try:
|
12 |
+
headers = {
|
13 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)',
|
14 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
15 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
16 |
+
'Connection': 'keep-alive',
|
17 |
+
'Upgrade-Insecure-Requests': '1'
|
18 |
+
}
|
19 |
+
|
20 |
+
response = requests.get(url, headers=headers, timeout=20, verify=True)
|
21 |
+
response.raise_for_status()
|
22 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
23 |
+
|
24 |
+
for element in soup(['script', 'style', 'nav', 'header', 'footer', 'aside', 'ads', 'noscript', 'form']):
|
25 |
+
element.decompose()
|
26 |
+
|
27 |
+
article_selectors = [
|
28 |
+
'article', '.article-content', '.post-content', '.story-body', '.story-content',
|
29 |
+
'.entry-content', '.content-body', '.article-body', 'main article', 'main .content', 'main',
|
30 |
+
'[role="main"]', '.main-content', '.page-content', '.text', '.article-text'
|
31 |
+
]
|
32 |
+
|
33 |
+
for selector in article_selectors:
|
34 |
+
content = soup.select_one(selector)
|
35 |
+
if content:
|
36 |
+
paragraphs = content.find_all(['p', 'div'], string=True)
|
37 |
+
if paragraphs:
|
38 |
+
text_parts = [p.get_text(strip=True) for p in paragraphs if len(p.get_text(strip=True)) > 30]
|
39 |
+
full_text = '\n\n'.join(text_parts)
|
40 |
+
if len(full_text) > 300:
|
41 |
+
return full_text[:10000]
|
42 |
+
|
43 |
+
body_text = soup.get_text(separator='\n\n', strip=True)
|
44 |
+
body_text = re.sub(r'\n{3,}', '\n\n', body_text)
|
45 |
+
return body_text[:10000] if len(body_text) > 300 else "[INFO] Could not extract substantial content"
|
46 |
+
|
47 |
+
except requests.exceptions.Timeout:
|
48 |
+
return "[WARNING] Article fetch timeout - using snippet instead"
|
49 |
+
except requests.exceptions.RequestException:
|
50 |
+
return "[ERROR] Could not fetch article: Network error"
|
51 |
+
except Exception as e:
|
52 |
+
return f"[ERROR] Could not fetch article: {str(e)}"
|
53 |
+
|
54 |
+
def search_articles(name: str, max_articles: int = 2) -> str:
|
55 |
+
keywords = ['owners', 'partners', 'stockholders']
|
56 |
+
search_query = f'"{name}" ({" AND ".join(keywords)}) site:news'
|
57 |
+
|
58 |
+
max_retries = 3
|
59 |
+
base_delay = 3
|
60 |
+
|
61 |
+
for attempt in range(max_retries):
|
62 |
+
try:
|
63 |
+
print(f"Search attempt {attempt + 1}: {search_query}")
|
64 |
+
time.sleep(base_delay * (attempt + 1))
|
65 |
+
|
66 |
+
configs = [
|
67 |
+
{'timeout': 20, 'region': 'us-en', 'safesearch': 'moderate'},
|
68 |
+
{'timeout': 25, 'region': 'wt-wt', 'safesearch': 'off'},
|
69 |
+
{'timeout': 30, 'region': None, 'safesearch': 'moderate'}
|
70 |
+
]
|
71 |
+
|
72 |
+
config = configs[min(attempt, len(configs)-1)]
|
73 |
+
|
74 |
+
with DDGS(timeout=config['timeout']) as ddgs:
|
75 |
+
search_params = {
|
76 |
+
'keywords': search_query,
|
77 |
+
'max_results': max_articles,
|
78 |
+
'safesearch': config['safesearch']
|
79 |
+
}
|
80 |
+
if config['region']:
|
81 |
+
search_params['region'] = config['region']
|
82 |
+
|
83 |
+
results = list(ddgs.text(**search_params))
|
84 |
+
print(f"Found {len(results)} results on attempt {attempt + 1}")
|
85 |
+
|
86 |
+
if not results:
|
87 |
+
continue
|
88 |
+
|
89 |
+
articles = []
|
90 |
+
for i, result in enumerate(results, 1):
|
91 |
+
url = result.get('href', 'No URL')
|
92 |
+
title = result.get('title', 'No Title')
|
93 |
+
snippet = result.get('body', 'No snippet available')
|
94 |
+
|
95 |
+
if i > 1:
|
96 |
+
time.sleep(2)
|
97 |
+
|
98 |
+
full_text = get_full_article(url)
|
99 |
+
if any(error in full_text for error in ["[ERROR]", "timeout", "Network error"]):
|
100 |
+
print(f"Using snippet fallback for article {i}")
|
101 |
+
content = f"[SNIPPET ONLY]\n{snippet}"
|
102 |
+
else:
|
103 |
+
content = full_text
|
104 |
+
|
105 |
+
article = f"### {i}. {title}\n"
|
106 |
+
article += f"[Source]({url})\n\n"
|
107 |
+
article += f"{content}\n"
|
108 |
+
articles.append(article)
|
109 |
+
|
110 |
+
return "\n---\n".join(articles)
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
print(f"Attempt {attempt + 1} failed: {str(e)}")
|
114 |
+
if attempt < max_retries - 1:
|
115 |
+
time.sleep(base_delay * (attempt + 2))
|
116 |
+
else:
|
117 |
+
return f"[ERROR] Search failed after {max_retries} attempts. Last error: {str(e)}"
|
118 |
+
|
119 |
+
return f"[INFO] No articles found for {name}"
|
120 |
+
|
121 |
+
def extract_entities(search_results: str) -> str:
|
122 |
+
"""Extract entities using Mistral 7B endpoint"""
|
123 |
+
modal_endpoint = "https://msoaresdiego--mistral-llm-endpoint-fastapi-app.modal.run/generate"
|
124 |
+
|
125 |
+
MAX_CHARS = 8000
|
126 |
+
if len(search_results) > MAX_CHARS:
|
127 |
+
trunc = search_results[:MAX_CHARS]
|
128 |
+
last_period = trunc.rfind('. ')
|
129 |
+
search_results = trunc[:last_period + 1] if last_period > 3000 else trunc
|
130 |
+
|
131 |
+
prompt = f"""Extract all person names and organization names from the following text. Do not extract products and service names. Only individuals and organizations. Bring the full details of the name in the newspaper article. For example, if only ACME is mentioned as company name, bring only ACME. IF ACME Inc is mentioned as company name, then you have to extract ACME Inc. In addition, define the relationship between the entity and the company that is being searched. For example, is ACME Inc an owner of the company being searched? Then write 'owner'. Is ACME Inc. a funder of the company being searched? Then write 'funder'.
|
132 |
+
Format as:
|
133 |
+
PERSON: [name] - [relationship]
|
134 |
+
ORG: [organization name] - [relationship]
|
135 |
+
|
136 |
+
Text:
|
137 |
+
{search_results}"""
|
138 |
+
|
139 |
+
try:
|
140 |
+
response = requests.post(
|
141 |
+
modal_endpoint,
|
142 |
+
json={"prompt": prompt, "max_tokens": 1000, "temperature": 0.15},
|
143 |
+
timeout=180
|
144 |
+
)
|
145 |
+
|
146 |
+
if response.status_code != 200:
|
147 |
+
return f"[ERROR] API Error: {response.status_code} - {response.text}"
|
148 |
+
|
149 |
+
try:
|
150 |
+
return response.json().get("response", "No entities extracted")
|
151 |
+
except ValueError:
|
152 |
+
return f"[ERROR] Failed to parse model response as JSON:\n\n{response.text[:500]}"
|
153 |
+
|
154 |
+
except requests.exceptions.Timeout:
|
155 |
+
return "[ERROR] Entity extraction timeout - please try again"
|
156 |
+
except Exception as e:
|
157 |
+
return f"[ERROR] Extraction failed: {str(e)}"
|
158 |
+
|
159 |
+
# === Gradio interface functions ===
|
160 |
+
|
161 |
+
def search_only(name: str, article_count: int):
|
162 |
+
if not name.strip():
|
163 |
+
return "No name provided", ""
|
164 |
+
|
165 |
+
try:
|
166 |
+
start = time.time()
|
167 |
+
articles_output = search_articles(name.strip(), max_articles=article_count)
|
168 |
+
elapsed = time.time() - start
|
169 |
+
|
170 |
+
results = f"✅ Search completed for **{name}** in {elapsed:.1f}s\n\n"
|
171 |
+
results += articles_output
|
172 |
+
|
173 |
+
return results, articles_output
|
174 |
+
except Exception as e:
|
175 |
+
return f"[ERROR] Search failed: {str(e)}", ""
|
176 |
+
|
177 |
+
def extract_only(stored_results: str):
|
178 |
+
if not stored_results.strip():
|
179 |
+
return "No search results available. Please search first."
|
180 |
+
|
181 |
+
try:
|
182 |
+
start = time.time()
|
183 |
+
entities = extract_entities(stored_results)
|
184 |
+
elapsed = time.time() - start
|
185 |
+
return f"✅ Extraction completed in {elapsed:.1f}s\n\n{entities}"
|
186 |
+
except Exception as e:
|
187 |
+
return f"[ERROR] Extraction failed: {str(e)}"
|
188 |
+
|
189 |
+
# === Gradio UI ===
|
190 |
+
|
191 |
+
with gr.Blocks(title="Related Entities Finder") as demo:
|
192 |
+
gr.Markdown("# 🔎 Related Entities Finder")
|
193 |
+
gr.Markdown("Enter a business or project name to search for related articles and extract key entities.")
|
194 |
+
gr.Markdown("*Note: Full article extraction may take 30–60 seconds. Snippets will be used as fallback if needed.*")
|
195 |
+
|
196 |
+
search_state = gr.State("")
|
197 |
+
|
198 |
+
with gr.Row():
|
199 |
+
name_input = gr.Textbox(label="Company/Project Name", placeholder="Enter business or project name")
|
200 |
+
article_count_slider = gr.Slider(1, 10, value=2, step=1, label="Number of Articles")
|
201 |
+
with gr.Column():
|
202 |
+
search_btn = gr.Button("🔍 Search Articles", variant="primary")
|
203 |
+
extract_btn = gr.Button("📋 Extract Entities", variant="secondary")
|
204 |
+
|
205 |
+
output1 = gr.Markdown(label="Search Results")
|
206 |
+
output2 = gr.Textbox(
|
207 |
+
label="Extracted Entities and Relationships",
|
208 |
+
lines=10,
|
209 |
+
max_lines=20,
|
210 |
+
show_copy_button=True
|
211 |
+
)
|
212 |
+
|
213 |
+
search_btn.click(
|
214 |
+
fn=search_only,
|
215 |
+
inputs=[name_input, article_count_slider],
|
216 |
+
outputs=[output1, search_state]
|
217 |
+
)
|
218 |
+
|
219 |
+
extract_btn.click(
|
220 |
+
fn=extract_only,
|
221 |
+
inputs=[search_state],
|
222 |
+
outputs=[output2]
|
223 |
+
)
|
224 |
+
|
225 |
+
if __name__ == "__main__":
|
226 |
+
demo.launch()
|
227 |
+
|
228 |
+
|
229 |
+
'''import gradio as gr
|
230 |
import requests
|
231 |
import time
|
232 |
import re
|
|
|
453 |
|
454 |
if __name__ == "__main__":
|
455 |
demo.launch()
|
456 |
+
'''
|