Spaces:
Sleeping
Sleeping
import streamlit as st | |
import sympy as sp | |
import chromadb | |
from transformers import pipeline | |
from langchain.chains import RetrievalQA | |
from langchain_community.vectorstores import FAISS | |
from langchain.embeddings import SentenceTransformerEmbeddings | |
from langchain.llms import OpenAI | |
import os | |
# Initialize ChromaDB for Retrieval-Augmented Generation (RAG) | |
chroma_client = chromadb.PersistentClient(path="./chroma_db") | |
embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
# Check if FAISS index exists | |
faiss_path = "faiss_index" | |
if not os.path.exists(faiss_path): | |
st.error("β οΈ FAISS index not found. Please upload or generate the FAISS index first.") | |
else: | |
# Load the RAG-based Retrieval System | |
vectorstore = FAISS.load_local(faiss_path, embedding_model, allow_dangerous_deserialization=True) | |
retriever = vectorstore.as_retriever() | |
qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever) | |
# Load NLP Model | |
model = pipeline("text2text-generation", model="google/flan-t5-small") | |
st.title("π€ AI-Driven Mathematical Model Generator") | |
st.write("Enter a problem statement in natural language to get a mathematical model.") | |
user_input = st.text_area("βοΈ Enter your problem:") | |
if st.button("π Generate Model"): | |
retrieved_context = qa_chain.run(user_input) # RAG retrieval | |
response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200) | |
try: | |
equation = sp.sympify(response[0]['generated_text']) | |
except: | |
equation = response[0]['generated_text'] # If parsing fails, return text | |
st.subheader("π Mathematical Model:") | |
st.latex(sp.latex(equation)) | |
st.code(str(equation), language='python') | |