Spaces:
Sleeping
Sleeping
File size: 1,524 Bytes
bc20a41 9406250 bc20a41 9406250 bc20a41 9406250 bc20a41 9406250 bc20a41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
import sympy as sp
import chromadb
from transformers import pipeline
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.llms import OpenAI
# Initialize ChromaDB for Retrieval-Augmented Generation (RAG)
chroma_client = chromadb.PersistentClient(path="./chroma_db")
embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Load the RAG-based Retrieval System
vectorstore = FAISS.load_local("faiss_index", embedding_model)
retriever = vectorstore.as_retriever()
qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
# Load NLP Model
model = pipeline("text2text-generation", model="google/flan-t5-small")
st.title("π€ AI-Driven Mathematical Model Generator")
st.write("Enter a problem statement in natural language to get a mathematical model.")
user_input = st.text_area("βοΈ Enter your problem:")
if st.button("π Generate Model"):
retrieved_context = qa_chain.run(user_input) # RAG retrieval
response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
try:
equation = sp.sympify(response[0]['generated_text'])
except:
equation = response[0]['generated_text'] # If parsing fails, return text
st.subheader("π Mathematical Model:")
st.latex(sp.latex(equation))
st.code(str(equation), language='python')
|