sunbal7 commited on
Commit
9406250
Β·
verified Β·
1 Parent(s): 40bc79c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -3
app.py CHANGED
@@ -1,9 +1,23 @@
1
  import streamlit as st
2
  import sympy as sp
 
3
  from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Load NLP Model
6
- model = pipeline("text2text-generation", model="facebook/bart-large")
7
 
8
  st.title("πŸ€– AI-Driven Mathematical Model Generator")
9
  st.write("Enter a problem statement in natural language to get a mathematical model.")
@@ -11,8 +25,13 @@ st.write("Enter a problem statement in natural language to get a mathematical mo
11
  user_input = st.text_area("✍️ Enter your problem:")
12
 
13
  if st.button("πŸš€ Generate Model"):
14
- response = model(user_input, max_length=200)
15
- equation = sp.sympify(response[0]['generated_text'])
 
 
 
 
 
16
 
17
  st.subheader("πŸ“Œ Mathematical Model:")
18
  st.latex(sp.latex(equation))
 
1
  import streamlit as st
2
  import sympy as sp
3
+ import chromadb
4
  from transformers import pipeline
5
+ from langchain.chains import RetrievalQA
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.embeddings import SentenceTransformerEmbeddings
8
+ from langchain.llms import OpenAI
9
+
10
+ # Initialize ChromaDB for Retrieval-Augmented Generation (RAG)
11
+ chroma_client = chromadb.PersistentClient(path="./chroma_db")
12
+ embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
13
+
14
+ # Load the RAG-based Retrieval System
15
+ vectorstore = FAISS.load_local("faiss_index", embedding_model)
16
+ retriever = vectorstore.as_retriever()
17
+ qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
18
 
19
  # Load NLP Model
20
+ model = pipeline("text2text-generation", model="google/flan-t5-small")
21
 
22
  st.title("πŸ€– AI-Driven Mathematical Model Generator")
23
  st.write("Enter a problem statement in natural language to get a mathematical model.")
 
25
  user_input = st.text_area("✍️ Enter your problem:")
26
 
27
  if st.button("πŸš€ Generate Model"):
28
+ retrieved_context = qa_chain.run(user_input) # RAG retrieval
29
+ response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
30
+
31
+ try:
32
+ equation = sp.sympify(response[0]['generated_text'])
33
+ except:
34
+ equation = response[0]['generated_text'] # If parsing fails, return text
35
 
36
  st.subheader("πŸ“Œ Mathematical Model:")
37
  st.latex(sp.latex(equation))