Commit
·
bcb8c1d
1
Parent(s):
365be8e
Changed timeouts
Browse files
test1.py
CHANGED
@@ -11,44 +11,9 @@ from urllib.parse import urlparse, parse_qs
|
|
11 |
logging.basicConfig(level=logging.INFO)
|
12 |
logger = logging.getLogger(__name__)
|
13 |
|
14 |
-
app = FastAPI(title="
|
15 |
|
16 |
-
|
17 |
-
text: str
|
18 |
-
href: str
|
19 |
-
|
20 |
-
class ContactInfo(BaseModel):
|
21 |
-
emails: List[str] = []
|
22 |
-
phones: List[str] = []
|
23 |
-
social_media: List[str] = []
|
24 |
-
contact_forms: List[str] = []
|
25 |
-
|
26 |
-
class ScriptInfo(BaseModel):
|
27 |
-
src: str
|
28 |
-
script_type: Optional[str] = None
|
29 |
-
is_external: bool = False
|
30 |
-
|
31 |
-
class BusinessInfo(BaseModel):
|
32 |
-
company_name: Optional[str] = None
|
33 |
-
address: Optional[str] = None
|
34 |
-
description: Optional[str] = None
|
35 |
-
industry_keywords: List[str] = []
|
36 |
-
|
37 |
-
class LeadData(BaseModel):
|
38 |
-
contact_info: ContactInfo
|
39 |
-
business_info: BusinessInfo
|
40 |
-
lead_score: int = 0
|
41 |
-
technologies: List[str] = []
|
42 |
-
|
43 |
-
class ScrapeResponse(BaseModel):
|
44 |
-
body_content: Optional[str] = None
|
45 |
-
screenshot: Optional[str] = None
|
46 |
-
links: Optional[List[LinkInfo]] = None
|
47 |
-
scripts: Optional[List[ScriptInfo]] = None
|
48 |
-
page_title: Optional[str] = None
|
49 |
-
meta_description: Optional[str] = None
|
50 |
-
lead_data: Optional[LeadData] = None
|
51 |
-
source_url: Optional[str] = None
|
52 |
|
53 |
@app.get("/")
|
54 |
async def root():
|
@@ -64,14 +29,35 @@ async def root():
|
|
64 |
}
|
65 |
|
66 |
async def get_top_search_result(query: str):
|
67 |
-
"""Perform Google search and return top result URL"""
|
68 |
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
69 |
async with async_playwright() as p:
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
context = await browser.new_context(
|
72 |
user_agent=user_agent,
|
73 |
locale='en-US',
|
74 |
-
viewport={'width': 1920, 'height': 1080}
|
|
|
|
|
|
|
75 |
)
|
76 |
page = await context.new_page()
|
77 |
|
@@ -81,21 +67,42 @@ async def get_top_search_result(query: str):
|
|
81 |
|
82 |
# Handle consent form if it appears
|
83 |
try:
|
84 |
-
consent_button = await page.wait_for_selector('button:has-text("Accept all")', timeout=5000)
|
85 |
if consent_button:
|
86 |
await consent_button.click()
|
87 |
logger.info("Accepted Google consent form")
|
88 |
-
await
|
89 |
except:
|
90 |
pass # Consent form didn't appear
|
91 |
|
92 |
# Perform search
|
93 |
-
await page.
|
|
|
94 |
await page.keyboard.press("Enter")
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
# Extract top results
|
98 |
-
results = await page.query_selector_all('.g')
|
|
|
|
|
|
|
99 |
if not results:
|
100 |
raise Exception("No search results found")
|
101 |
|
@@ -133,6 +140,7 @@ async def get_top_search_result(query: str):
|
|
133 |
|
134 |
except Exception as e:
|
135 |
logger.error(f"Search failed: {str(e)}")
|
|
|
136 |
await browser.close()
|
137 |
raise
|
138 |
|
@@ -154,193 +162,4 @@ async def scrape_page(
|
|
154 |
logger.error(f"Search error: {str(e)}")
|
155 |
raise HTTPException(status_code=500, detail=f"Search failed: {str(e)}")
|
156 |
|
157 |
-
|
158 |
-
async with async_playwright() as p:
|
159 |
-
logger.info("Launching browser...")
|
160 |
-
browser = await p.chromium.launch(
|
161 |
-
headless=True,
|
162 |
-
args=[
|
163 |
-
'--no-sandbox',
|
164 |
-
'--disable-setuid-sandbox',
|
165 |
-
'--disable-dev-shm-usage',
|
166 |
-
'--disable-accelerated-2d-canvas',
|
167 |
-
'--no-first-run',
|
168 |
-
'--no-zygote',
|
169 |
-
'--disable-gpu'
|
170 |
-
]
|
171 |
-
)
|
172 |
-
page = await browser.new_page()
|
173 |
-
|
174 |
-
try:
|
175 |
-
logger.info(f"Navigating to {target_url}...")
|
176 |
-
await page.goto(target_url, wait_until="networkidle")
|
177 |
-
response = ScrapeResponse(source_url=target_url)
|
178 |
-
|
179 |
-
# Always get page title and meta description
|
180 |
-
logger.info("Getting page metadata...")
|
181 |
-
response.page_title = await page.title()
|
182 |
-
|
183 |
-
meta_desc = await page.evaluate("""
|
184 |
-
() => {
|
185 |
-
const meta = document.querySelector('meta[name="description"]');
|
186 |
-
return meta ? meta.getAttribute('content') : null;
|
187 |
-
}
|
188 |
-
""")
|
189 |
-
response.meta_description = meta_desc
|
190 |
-
|
191 |
-
# Get body content (clean text)
|
192 |
-
if get_body:
|
193 |
-
logger.info("Extracting body content...")
|
194 |
-
body_content = await page.evaluate("""
|
195 |
-
() => {
|
196 |
-
const body = document.querySelector('body');
|
197 |
-
if (!body) return null;
|
198 |
-
|
199 |
-
// Remove script and style elements
|
200 |
-
const scripts = body.querySelectorAll('script, style, noscript');
|
201 |
-
scripts.forEach(el => el.remove());
|
202 |
-
|
203 |
-
// Get clean text content
|
204 |
-
return body.innerText.trim();
|
205 |
-
}
|
206 |
-
""")
|
207 |
-
response.body_content = body_content
|
208 |
-
|
209 |
-
# Get screenshot (full page)
|
210 |
-
if screenshot:
|
211 |
-
logger.info("Taking full page screenshot...")
|
212 |
-
screenshot_bytes = await page.screenshot(full_page=True)
|
213 |
-
response.screenshot = base64.b64encode(screenshot_bytes).decode('utf-8')
|
214 |
-
|
215 |
-
# Get links with better filtering
|
216 |
-
if get_links:
|
217 |
-
logger.info("Extracting links...")
|
218 |
-
links = await page.evaluate("""
|
219 |
-
() => {
|
220 |
-
return Array.from(document.querySelectorAll('a[href]')).map(a => {
|
221 |
-
const text = a.innerText.trim();
|
222 |
-
const href = a.href;
|
223 |
-
|
224 |
-
// Only include links with meaningful text and valid URLs
|
225 |
-
if (text && href && href.startsWith('http')) {
|
226 |
-
return {
|
227 |
-
text: text.substring(0, 200), // Limit text length
|
228 |
-
href: href
|
229 |
-
}
|
230 |
-
}
|
231 |
-
return null;
|
232 |
-
}).filter(link => link !== null);
|
233 |
-
}
|
234 |
-
""")
|
235 |
-
response.links = [LinkInfo(**link) for link in links]
|
236 |
-
|
237 |
-
# Lead Generation Extraction
|
238 |
-
if lead_generation:
|
239 |
-
logger.info("Extracting lead generation data...")
|
240 |
-
lead_data_raw = await page.evaluate("""
|
241 |
-
() => {
|
242 |
-
const result = {
|
243 |
-
emails: [],
|
244 |
-
phones: [],
|
245 |
-
social_media: [],
|
246 |
-
contact_forms: [],
|
247 |
-
company_name: null,
|
248 |
-
address: null,
|
249 |
-
technologies: [],
|
250 |
-
industry_keywords: []
|
251 |
-
};
|
252 |
-
|
253 |
-
// Extract emails
|
254 |
-
const emailRegex = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g;
|
255 |
-
const pageText = document.body.innerText;
|
256 |
-
const emails = pageText.match(emailRegex) || [];
|
257 |
-
result.emails = [...new Set(emails)].slice(0, 10); // Unique emails, max 10
|
258 |
-
|
259 |
-
// Extract phone numbers
|
260 |
-
const phoneRegex = /(\+?1?[-.\s]?)?\(?([0-9]{3})\)?[-.\s]?([0-9]{3})[-.\s]?([0-9]{4})/g;
|
261 |
-
const phones = pageText.match(phoneRegex) || [];
|
262 |
-
result.phones = [...new Set(phones)].slice(0, 5); // Unique phones, max 5
|
263 |
-
|
264 |
-
// Extract social media links
|
265 |
-
const socialLinks = Array.from(document.querySelectorAll('a[href]')).map(a => a.href)
|
266 |
-
.filter(href => /facebook|twitter|linkedin|instagram|youtube|tiktok/i.test(href));
|
267 |
-
result.social_media = [...new Set(socialLinks)].slice(0, 10);
|
268 |
-
|
269 |
-
// Find contact forms
|
270 |
-
const forms = Array.from(document.querySelectorAll('form')).map(form => {
|
271 |
-
const action = form.action || window.location.href;
|
272 |
-
return action;
|
273 |
-
});
|
274 |
-
result.contact_forms = [...new Set(forms)].slice(0, 5);
|
275 |
-
|
276 |
-
// Extract company name (try multiple methods)
|
277 |
-
result.company_name =
|
278 |
-
document.querySelector('meta[property="og:site_name"]')?.content ||
|
279 |
-
document.querySelector('meta[name="application-name"]')?.content ||
|
280 |
-
document.querySelector('h1')?.innerText?.trim() ||
|
281 |
-
document.title?.split('|')[0]?.split('-')[0]?.trim();
|
282 |
-
|
283 |
-
// Extract address
|
284 |
-
const addressRegex = /\d+\s+[A-Za-z\s]+(?:Street|St|Avenue|Ave|Road|Rd|Boulevard|Blvd|Lane|Ln|Drive|Dr|Court|Ct|Place|Pl)\s*,?\s*[A-Za-z\s]+,?\s*[A-Z]{2}\s*\d{5}/g;
|
285 |
-
const addresses = pageText.match(addressRegex) || [];
|
286 |
-
result.address = addresses[0] || null;
|
287 |
-
|
288 |
-
// Detect technologies
|
289 |
-
const techKeywords = ['wordpress', 'shopify', 'react', 'angular', 'vue', 'bootstrap', 'jquery', 'google analytics', 'facebook pixel'];
|
290 |
-
const htmlContent = document.documentElement.outerHTML.toLowerCase();
|
291 |
-
result.technologies = techKeywords.filter(tech => htmlContent.includes(tech));
|
292 |
-
|
293 |
-
// Industry keywords
|
294 |
-
const industryKeywords = ['consulting', 'marketing', 'software', 'healthcare', 'finance', 'real estate', 'education', 'retail', 'manufacturing', 'legal', 'restaurant', 'fitness', 'beauty', 'automotive'];
|
295 |
-
const lowerPageText = pageText.toLowerCase();
|
296 |
-
result.industry_keywords = industryKeywords.filter(keyword => lowerPageText.includes(keyword));
|
297 |
-
|
298 |
-
return result;
|
299 |
-
}
|
300 |
-
""")
|
301 |
-
|
302 |
-
# Calculate lead score
|
303 |
-
lead_score = 0
|
304 |
-
if lead_data_raw['emails']: lead_score += 30
|
305 |
-
if lead_data_raw['phones']: lead_score += 25
|
306 |
-
if lead_data_raw['contact_forms']: lead_score += 20
|
307 |
-
if lead_data_raw['social_media']: lead_score += 15
|
308 |
-
if lead_data_raw['company_name']: lead_score += 10
|
309 |
-
if lead_data_raw['address']: lead_score += 15
|
310 |
-
if lead_data_raw['technologies']: lead_score += 10
|
311 |
-
if lead_data_raw['industry_keywords']: lead_score += 5
|
312 |
-
|
313 |
-
# Create lead data object
|
314 |
-
contact_info = ContactInfo(
|
315 |
-
emails=lead_data_raw['emails'],
|
316 |
-
phones=lead_data_raw['phones'],
|
317 |
-
social_media=lead_data_raw['social_media'],
|
318 |
-
contact_forms=lead_data_raw['contact_forms']
|
319 |
-
)
|
320 |
-
|
321 |
-
business_info = BusinessInfo(
|
322 |
-
company_name=lead_data_raw['company_name'],
|
323 |
-
address=lead_data_raw['address'],
|
324 |
-
description=response.meta_description,
|
325 |
-
industry_keywords=lead_data_raw['industry_keywords']
|
326 |
-
)
|
327 |
-
|
328 |
-
response.lead_data = LeadData(
|
329 |
-
contact_info=contact_info,
|
330 |
-
business_info=business_info,
|
331 |
-
lead_score=min(lead_score, 100), # Cap at 100
|
332 |
-
technologies=lead_data_raw['technologies']
|
333 |
-
)
|
334 |
-
|
335 |
-
await browser.close()
|
336 |
-
logger.info("Scraping completed successfully")
|
337 |
-
return response
|
338 |
-
|
339 |
-
except Exception as e:
|
340 |
-
logger.error(f"Error during scraping: {str(e)}")
|
341 |
-
await browser.close()
|
342 |
-
raise HTTPException(status_code=500, detail=f"Scraping error: {str(e)}")
|
343 |
-
|
344 |
-
except Exception as e:
|
345 |
-
logger.error(f"Error launching browser: {str(e)}")
|
346 |
-
raise HTTPException(status_code=500, detail=f"Browser launch error: {str(e)}")
|
|
|
11 |
logging.basicConfig(level=logging.INFO)
|
12 |
logger = logging.getLogger(__name__)
|
13 |
|
14 |
+
app = FastAPI(title="Query-Based Web Scraper", description="Scrape websites based on search queries")
|
15 |
|
16 |
+
# ... (Keep all your Pydantic models unchanged) ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
@app.get("/")
|
19 |
async def root():
|
|
|
29 |
}
|
30 |
|
31 |
async def get_top_search_result(query: str):
|
32 |
+
"""Perform Google search and return top result URL with CAPTCHA handling"""
|
33 |
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
34 |
async with async_playwright() as p:
|
35 |
+
# Use a proxy to avoid CAPTCHAs
|
36 |
+
proxy_server = "us.proxyrack.net:10000"
|
37 |
+
browser = await p.chromium.launch(
|
38 |
+
headless=True,
|
39 |
+
proxy={
|
40 |
+
"server": f"http://{proxy_server}",
|
41 |
+
"username": "your-proxy-username", # Replace with actual credentials
|
42 |
+
"password": "your-proxy-password"
|
43 |
+
},
|
44 |
+
args=[
|
45 |
+
'--no-sandbox',
|
46 |
+
'--disable-setuid-sandbox',
|
47 |
+
'--disable-dev-shm-usage',
|
48 |
+
'--disable-accelerated-2d-canvas',
|
49 |
+
'--no-first-run',
|
50 |
+
'--no-zygote',
|
51 |
+
'--disable-gpu'
|
52 |
+
]
|
53 |
+
)
|
54 |
context = await browser.new_context(
|
55 |
user_agent=user_agent,
|
56 |
locale='en-US',
|
57 |
+
viewport={'width': 1920, 'height': 1080},
|
58 |
+
# Bypass automation detection
|
59 |
+
java_script_enabled=True,
|
60 |
+
bypass_csp=True
|
61 |
)
|
62 |
page = await context.new_page()
|
63 |
|
|
|
67 |
|
68 |
# Handle consent form if it appears
|
69 |
try:
|
70 |
+
consent_button = await page.wait_for_selector('button:has-text("Accept all"), button:has-text("I agree")', timeout=5000)
|
71 |
if consent_button:
|
72 |
await consent_button.click()
|
73 |
logger.info("Accepted Google consent form")
|
74 |
+
await asyncio.sleep(1) # Small delay for consent to apply
|
75 |
except:
|
76 |
pass # Consent form didn't appear
|
77 |
|
78 |
# Perform search
|
79 |
+
search_box = await page.wait_for_selector('textarea[name="q"]', timeout=10000)
|
80 |
+
await search_box.fill(query)
|
81 |
await page.keyboard.press("Enter")
|
82 |
+
|
83 |
+
# Wait for search results - use more reliable method
|
84 |
+
try:
|
85 |
+
# Check if CAPTCHA appeared
|
86 |
+
captcha = await page.query_selector('form#captcha-form, div#recaptcha')
|
87 |
+
if captcha:
|
88 |
+
logger.error("CAPTCHA encountered during search")
|
89 |
+
raise Exception("Google CAPTCHA encountered. Cannot proceed with search.")
|
90 |
+
|
91 |
+
# Wait for search results to appear
|
92 |
+
await page.wait_for_selector('.g, .tF2Cxc', timeout=30000)
|
93 |
+
except:
|
94 |
+
# Try alternative search result container
|
95 |
+
try:
|
96 |
+
await page.wait_for_selector('#search', timeout=10000)
|
97 |
+
except:
|
98 |
+
logger.error("Search results not found")
|
99 |
+
raise Exception("Search results not found")
|
100 |
|
101 |
# Extract top results
|
102 |
+
results = await page.query_selector_all('.g, .tF2Cxc')
|
103 |
+
if not results:
|
104 |
+
results = await page.query_selector_all('div[data-snf]')
|
105 |
+
|
106 |
if not results:
|
107 |
raise Exception("No search results found")
|
108 |
|
|
|
140 |
|
141 |
except Exception as e:
|
142 |
logger.error(f"Search failed: {str(e)}")
|
143 |
+
await page.screenshot(path="search_error.png")
|
144 |
await browser.close()
|
145 |
raise
|
146 |
|
|
|
162 |
logger.error(f"Search error: {str(e)}")
|
163 |
raise HTTPException(status_code=500, detail=f"Search failed: {str(e)}")
|
164 |
|
165 |
+
# ... (keep the rest of the scraping function unchanged) ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|