from langchain_groq import ChatGroq import os import gradio as gr from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain_community.embeddings.fastembed import FastEmbedEmbeddings from langchain.document_loaders import PyPDFLoader from langchain_experimental.text_splitter import SemanticChunker from langchain.vectorstores import FAISS from gtts import gTTS import tempfile # Set your API key from Hugging Face Secrets # DO NOT hardcode your API key here GROQ_API_KEY = os.environ.get('GROQ_API_KEY') # Initialize Groq LLM llm = ChatGroq( model_name="llama3-70b-8192", temperature=0.7, api_key=GROQ_API_KEY ) # Initialize memory memory = ConversationBufferMemory() conversation = ConversationChain(llm=llm, memory=memory) # Load PDF and create embeddings def initialize_rag(): try: # Load the PDF document loader = PyPDFLoader("TourismChatbot.pdf") pages = loader.load_and_split() # Create embeddings embed_model = FastEmbedEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2") # Create semantic chunks semantic_chunker = SemanticChunker(embed_model, breakpoint_threshold_type="percentile") semantic_chunks = semantic_chunker.create_documents([d.page_content for d in pages]) # Create vector store vectorstore = FAISS.from_documents(documents=semantic_chunks, embedding=embed_model) return vectorstore, embed_model except Exception as e: print(f"Error initializing RAG: {e}") # Return None if initialization fails return None, None # Initialize RAG components vectorstore, embed_model = initialize_rag() # Function to retrieve relevant information from the vector store def retrieve_relevant_chunks(query, top_k=3): try: if vectorstore is not None: documents = vectorstore.similarity_search(query, k=top_k) return [doc.page_content for doc in documents] else: # Fallback content if vectorstore is not available return ["Rajasthan is a state in India known for its forts, palaces, and desert landscapes."] except Exception as e: print(f"Error retrieving chunks: {e}") return ["Rajasthan is a state in India known for its forts, palaces, and desert landscapes."] def generate_rag_response(query, language="English"): retrieved_chunks = retrieve_relevant_chunks(query) context = "\n".join(retrieved_chunks) prompt = f""" Please provide the answer in **{language}**. You are a helpful AI assistant providing tourism information about Rajasthan. Answer based on the following context. If information is unavailable, say "I don't know." Context: {context} Question: {query} Answer: """ response = conversation.run(prompt) return response.strip() def generate_speech(text, language): lang_map = {"English": "en", "Hindi": "hi", "Spanish": "es", "French": "fr", "German": "de", "Tamil": "ta"} lang_code = lang_map.get(language, "en") tts = gTTS(text, lang=lang_code) temp_audio_path = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False).name tts.save(temp_audio_path) return temp_audio_path def chatbot_interface(query, language, chat_history): response = generate_rag_response(query, language) speech_path = generate_speech(response, language) # Just append a 🔊 icon and use Gradio to handle the file response_with_audio = f"{response} 🔊 (Click play below)" chat_history.append((query, response_with_audio)) return chat_history, speech_path, "" # Return file path as separate gr.Audio def handle_menu_click(topic, language, chat_history): query = f"Give me information about {topic} in Rajasthan." return chatbot_interface(query, language, chat_history) # Define language and menu options language_options = ['English', 'Hindi', 'Spanish', 'French', 'German', 'Tamil'] menu_options = ["Places to Visit", "Best Time to Visit", "Festivals", "Cuisine", "Travel Tips"] # Create the Gradio interface with gr.Blocks(css=""" body {background-color: #FFF2E1; font-family: Arial, sans-serif;} .gradio-container {max-width: 800px; margin: auto; padding: 20px; background: #FFF2E1; border-radius: 15px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);} .gradio-title {color: #462f22; text-align: center; font-size: 24px; font-weight: bold; padding-bottom: 10px;} .gradio-chat {border: 1px solid #e1c7a6; border-radius: 10px; padding: 10px; background: #fff; min-height: 250px; color:#462f22} .gr-button {background-color:#FFFCF5; color: #ec8d12; font-size: 14px; border-radius: 8px; padding: 8px; border: 2px solid #e6ac55; cursor: pointer;} .gr-button:hover {background-color: #ec8d12;color:#fff} .clear-chat {float: right; background: #fff3e0; border: 1px solid #ed5722; color: #ed5722; font-weight: bold; border-radius: 6px; padding: 5px 10px; cursor: pointer;} .chat-input {width: 100%; padding: 10px; border-radius: 8px; border: 1px solid #e1c7a6;} """) as demo: gr.Markdown("