LexGuardian / app.py
sunbal7's picture
Update app.py
4299e4d verified
raw
history blame
1.86 kB
import streamlit as st
import sympy as sp
import chromadb
from transformers import pipeline
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.llms import OpenAI
import os
# Initialize ChromaDB for Retrieval-Augmented Generation (RAG)
chroma_client = chromadb.PersistentClient(path="./chroma_db")
embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Check if FAISS index exists
faiss_path = "faiss_index"
if not os.path.exists(faiss_path):
st.error("⚠️ FAISS index not found. Please upload or generate the FAISS index first.")
else:
# Load the RAG-based Retrieval System
vectorstore = FAISS.load_local(faiss_path, embedding_model, allow_dangerous_deserialization=True)
retriever = vectorstore.as_retriever()
qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
# Load NLP Model
model = pipeline("text2text-generation", model="google/flan-t5-small")
st.title("πŸ€– AI-Driven Mathematical Model Generator")
st.write("Enter a problem statement in natural language to get a mathematical model.")
user_input = st.text_area("✍️ Enter your problem:")
if st.button("πŸš€ Generate Model"):
retrieved_context = qa_chain.run(user_input) # RAG retrieval
response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
try:
equation = sp.sympify(response[0]['generated_text'])
except:
equation = response[0]['generated_text'] # If parsing fails, return text
st.subheader("πŸ“Œ Mathematical Model:")
st.latex(sp.latex(equation))
st.code(str(equation), language='python')