Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
# coding: utf-8 | |
# In[2]: | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
import os | |
import google.generativeai as genai | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings | |
from langchain.vectorstores import FAISS | |
import gradio as gr | |
os.environ["MY_SECRET_KEY"] = "AIzaSyDRj3wAgqOCjc_D45W_u-G3y9dk5YDgxEo" | |
# In[3]: | |
#pip install pypdf | |
#!pip install faiss-cpu | |
# In[4]: | |
google_api_key = os.environ["MY_SECRET_KEY"] | |
# Check if the API key was found | |
if google_api_key: | |
# Set the environment variable if the API key was found | |
os.environ["GOOGLE_API_KEY"] = google_api_key | |
llm = ChatGoogleGenerativeAI( | |
model="gemini-pro", # Specify the model name | |
google_api_key=os.environ["GOOGLE_API_KEY"] | |
) | |
else: | |
print("Error: GOOGLE_API_KEY not found in Colab secrets. Please store your API key.") | |
genai.configure(api_key=google_api_key) | |
model = genai.GenerativeModel("gemini-pro") | |
# In[5]: | |
work_dir=os.getcwd() | |
# In[6]: | |
# Verify file existence | |
assert "Team1.pdf" in os.listdir(work_dir), "Team1.pdf not found in the specified directory!" | |
print(f"Current Working Directory: {os.getcwd()}") | |
# In[7]: | |
# Load PDF and split text | |
pdf_path = "Team1.pdf" # Ensure this file is uploaded to Colab | |
loader = PyPDFLoader(pdf_path) | |
documents = loader.load() | |
# Split text into chunks | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=10) | |
text_chunks = text_splitter.split_documents(documents) | |
# In[8]: | |
# Generate embeddings | |
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") | |
# Store embeddings in FAISS index | |
vectorstore = FAISS.from_documents(text_chunks, embeddings) | |
retriever = vectorstore.as_retriever(search_kwargs={"k": 4}) | |
# In[9]: | |
# Set up Gemini model | |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-001", temperature=0) | |
# In[10]: | |
import gradio as gr | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
def rag_query(query): | |
# Retrieve relevant documents | |
docs = retriever.get_relevant_documents(query) | |
# Otherwise, use RAG | |
context = "\n".join([doc.page_content for doc in docs]) | |
prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer directly and concisely:" | |
try: | |
response = llm.invoke(prompt) | |
except Exception as e: | |
response = f"Error in RAG processing: {str(e)}" | |
return response.content | |
# In[11]: | |
import gradio as gr | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
# Initialize LLM once (avoid repeated initialization) | |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) | |
# Define the general query function | |
def general_query(query): | |
try: | |
# Define the prompt correctly | |
prompt = PromptTemplate.from_template("Answer the following query: {query}") | |
# Create an LLM Chain | |
chain = LLMChain(llm=llm, prompt=prompt) | |
# Run chatbot and return response | |
response = chain.run(query=query) | |
return response # Return response directly (not response.content) | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# In[12]: | |
import gradio as gr | |
# Function to call the selected query method | |
def query_router(query, method): | |
if method == "Team Query": # Ensure exact match with dropdown options | |
return rag_query(query) | |
elif method == "General Query": | |
return general_query(query) | |
return "Invalid selection!" | |
# Define local image paths | |
logo_path = "equinix-sign.jpg" # Ensure this file exists | |
# Custom CSS for background styling | |
custom_css = """ | |
.gradio-container { | |
background-color: #f0f0f0; | |
text-align: center; | |
} | |
#logo img { | |
display: block; | |
margin: 0 auto; | |
max-width: 200px; /* Adjust size */ | |
} | |
""" | |
# Create Gradio UI | |
with gr.Blocks(css=custom_css) as ui: | |
gr.Image(logo_path, elem_id="logo", show_label=False, height=100, width=200) # Display Logo | |
# Title & Description | |
gr.Markdown("<h1 style='text-align: center; color: black;'>Equinix Chatbot for Automation Team</h1>") | |
gr.Markdown("<p style='text-align: center; color: black;'>Ask me anything!</p>") | |
# Input & Dropdown Section | |
with gr.Row(): | |
query_input = gr.Textbox(label="Enter your query") | |
query_method = gr.Dropdown(["Team Query", "General Query"], label="Select Query Type") | |
# Button for submitting query | |
submit_button = gr.Button("Submit") | |
# Output Textbox | |
output_box = gr.Textbox(label="Response", interactive=False) | |
# Button Click Event | |
submit_button.click(query_router, inputs=[query_input, query_method], outputs=output_box) | |
# Launch UI | |
ui.launch(share=True) | |
# In[28]: | |
get_ipython().system('jupyter nbconvert -- GenAI_1.py') | |