File size: 8,719 Bytes
4362da8 12ee044 6ae41c9 12ee044 4362da8 12ee044 6ae41c9 12ee044 6ae41c9 12ee044 4827ea7 12ee044 6ae41c9 12ee044 6ae41c9 12ee044 c270c31 12ee044 4362da8 12ee044 4362da8 12ee044 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
import gradio as gr
import urllib.request
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from duckduckgo_search import DDGS
import re
import markdown # Import markdown library to convert Markdown to HTML
# Function to correct custom source names
def correct_source_names(site_name):
dict_source_names = {
"dw": "DW",
"dw.com": "DW",
"ap news": "Associated Press",
"ap": "Associated Press",
"mongabay environmental news": "Mongabay",
"mongabay": "Mongabay",
"guardian": "The Guardian",
"guardian.com": "The Guardian",
"bbc": "BBC",
"bbc.com/news": "BBC"
}
# Convert the site_name to lowercase for case-insensitive comparison
site_name_lower = site_name.lower()
# Check the site_name with the dictionary keys
for key, value in dict_source_names.items():
if key in site_name_lower:
return value
# If no match is found, return the original site_name
return site_name.capitalize()
# Function to infer the source name from the URL
def infer_source_from_url(url):
try:
# Parse the URL to extract the domain
domain = urlparse(url).netloc
print(domain)
# Remove known subdomains like 'www', 'm', 'mobile'
domain_parts = domain.split('.')
if len(domain_parts) > 2 and domain_parts[0] in ['www', 'm', 'mobile']:
domain_parts.pop(0)
# Focus on the second-to-last part for the main domain
primary_domain = domain_parts[-2] if len(domain_parts) > 1 else domain_parts[0]
# Correct source names
primary_domain = correct_source_names(primary_domain)
return primary_domain
except Exception as e:
return "Unknown Source"
# Function to fetch and parse HTML content using urllib
def fetch_metadata(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:106.0) Gecko/20100101 Firefox/106.0')
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8')
req.add_header('Accept-Language', 'en-US,en;q=0.5')
try:
with urllib.request.urlopen(req) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
# Extracting title tag
title_tag = soup.title.string if soup.title else 'No title available'
# Extracting og:site_name meta tag
site_name = soup.find('meta', property='og:site_name')
site_name = site_name['content'] if site_name else 'Unknown Source'
# Extracting og:title meta tag
og_title = soup.find('meta', property='og:title')
og_title = og_title['content'] if og_title else title_tag
# Attempt to infer the source from the URL
if site_name == "Unknown Source":
if "reuters.com" in url:
site_name = "Reuters"
elif "guardian.com" in url:
site_name = "The Guardian"
elif "apnews.com" in url:
site_name = "Associated Press"
elif "bbc.com/news" in url:
site_name = "BBC"
else:
site_name = infer_source_from_url(url)
# Correct the site_name
site_name = correct_source_names(site_name)
# Formatting the result
markdown_text = f"[{og_title}]({url}) ({site_name})"
return markdown_text
except Exception as e:
return duckduckgo_search_fallback(url)
# Function to perform DuckDuckGo search as a fallback
def duckduckgo_search_fallback(url):
try:
# Use the library to perform the search
results = DDGS().text(url, max_results=1)
if results:
top_result = results[0]
result_title = top_result.get('title', 'No title found')
result_url = top_result.get('href', 'No URL found')
result_source = "Unknown Source" # Default fallback
# Attempt to infer the source from the URL
if "reuters.com" in result_url:
result_source = "Reuters"
elif "guardian.com" in result_url:
result_source = "The Guardian"
elif "apnews.com" in result_url:
result_source = "AP News"
elif "bbc.com/news" in result_url:
result_source = "BBC"
else:
result_source = infer_source_from_url(result_url)
# Correct source name with custom dictionary
result_source = correct_source_names(result_source)
markdown_text = f"[{result_title}]({result_url}) ({result_source})"
return markdown_text
else:
return f"No search results found for {url}"
except Exception as e:
return f"Failed to fetch data for {url}: {str(e)}"
# Function to trim and format the output
def trimming_chat_answer(text):
pattern = r'\[.*?\]\(.*?\) \([^\)]+\)'
match = re.search(pattern, text)
return match.group(0) if match else "No match found"
# Integrated function to process URLs and format the output
def process_urls(urls):
results = []
for url in urls:
url = url.strip() # Remove leading and trailing whitespace
if url: # Proceed only if the URL is not empty
# Try to fetch metadata directly from the URL
raw_markdown = fetch_metadata(url)
# If raw_markdown is a fallback message, try DuckDuckGo search
if "Failed to fetch data" in raw_markdown or "No search results" in raw_markdown:
raw_markdown = duckduckgo_search_fallback(url)
# Use the DuckDuckGo chat to format the output, but fallback to raw_markdown if unsuccessful
try:
ddgschat = DDGS().chat(f"""Please rewrite the following markdown string so the news headline is capitalized as a sentence, only proper nouns or names should have capital initials. Also correct capitalization of the source name if necessary.
Then, please check if the source name (surrounded by round parenthesis) is repeated inside the headline (surrounded by square brackets); if it is repeated, please remove the source name mention from the headline keeping the URL and the source name in parentheses outside the headline.
Please answer only with one line of the markdown output.
Example input = [Montana Is a Frontier for Deep Carbon Storage, Mr. António Guterres from the United Nations Claims for Urgent Action - Inside Climate News](https://insideclimatenews.org/news/18072024/montana-deep-carbon-storage-controversies/) (Inside Climate News)
Example output = [Montana is a frontier for deep carbon storage, Mr. António Guterres from the United Nations claims for urgent action](https://insideclimatenews.org/news/18072024/montana-deep-carbon-storage-controversies/) (Inside Climate News)
Input:
{raw_markdown}
""", model='claude-3-haiku')
except Exception as e:
ddgschat = raw_markdown # If there's an error with the chat API, use raw_markdown
# Trim the result using the chat output or fallback to raw_markdown
clean_markdown = trimming_chat_answer(ddgschat)
if clean_markdown == "No match found":
clean_markdown = raw_markdown
results.append(clean_markdown)
return results
# Gradio app function
def gradio_interface(input_text):
# Split the input text by line breaks to get URLs
urls = input_text.strip().split('\n')
# Process the URLs and get the results
results = process_urls(urls)
# Join the results with line breaks for display
html_output = "<br>".join(results)
return markdown.markdown(html_output)
# Create the Gradio interface
iface = gr.Interface(
fn=gradio_interface,
inputs="textarea",
outputs="html",
title="News headline and title scraper - Melty 2.0 🔭",
description="""Enter URLs separated by line breaks to fetch metadata and format it into markdown.\n\n
\t\t
👀 Example input:\n
\t\thttps://www.example1.com\n
\t\thttps://www.example2.org\n\n
🎯 Example output:\n
\t\t[Headline 1](https://www.example1.com) (Source)\n
\t\t[Headline 2](https://www.example2.org) (Source)"""
)
# Launch the Gradio app
iface.launch()
|