import gradio as gr from search import search_google from scraper import scrape_url from rag import VectorStore from llm import generate_answer import time vs = VectorStore() def ask_agent(question): start_time = time.time() # Search Google urls = search_google(question, num_results=3) if not urls: return "⚠️ No search results found. Try a different query." # Scrape URLs texts_images = [] for url in urls: texts_images.append(scrape_url(url)) time.sleep(0.5) # Add delay between requests texts = [ti[0] for ti in texts_images if not ti[0].startswith("[Error")] images = [ti[1] for ti in texts_images] # Add to vector store only if we have texts if texts: vs.add_texts(texts) # Retrieve context relevant = vs.retrieve(question, top_k=2) if vs.has_data() else [] context = "\n\n".join(relevant) if relevant else "No relevant context found." # Generate answer answer = generate_answer(context, question) # Prepare output image_markdown = "" for i, (url, imgs) in enumerate(zip(urls, images)): if imgs: # Show first image with source link img_url = imgs[0] image_markdown += f'
' processing_time = round(time.time() - start_time, 2) final_output = f""" ## 🧠 Answer {answer} --- ## 📸 Images & Sources {image_markdown if image_markdown else "No images found"}