sunbal7 commited on
Commit
4299e4d
Β·
verified Β·
1 Parent(s): b6ef89b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -24
app.py CHANGED
@@ -3,39 +3,43 @@ import sympy as sp
3
  import chromadb
4
  from transformers import pipeline
5
  from langchain.chains import RetrievalQA
6
- from langchain_community.vectorstores import FAISS # βœ… FIXED IMPORT
7
  from langchain.embeddings import SentenceTransformerEmbeddings
8
  from langchain.llms import OpenAI
9
  import os
 
10
  # Initialize ChromaDB for Retrieval-Augmented Generation (RAG)
11
  chroma_client = chromadb.PersistentClient(path="./chroma_db")
12
  embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
13
 
14
- # Load the RAG-based Retrieval System
15
-
16
- os.listdir("faiss_index")
17
 
18
- vectorstore = FAISS.load_local("faiss_index", embedding_model)
19
- retriever = vectorstore.as_retriever()
20
- qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
 
 
 
 
21
 
22
- # Load NLP Model
23
- model = pipeline("text2text-generation", model="google/flan-t5-small")
24
 
25
- st.title("πŸ€– AI-Driven Mathematical Model Generator")
26
- st.write("Enter a problem statement in natural language to get a mathematical model.")
27
 
28
- user_input = st.text_area("✍️ Enter your problem:")
29
 
30
- if st.button("πŸš€ Generate Model"):
31
- retrieved_context = qa_chain.run(user_input) # RAG retrieval
32
- response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
33
-
34
- try:
35
- equation = sp.sympify(response[0]['generated_text'])
36
- except:
37
- equation = response[0]['generated_text'] # If parsing fails, return text
38
-
39
- st.subheader("πŸ“Œ Mathematical Model:")
40
- st.latex(sp.latex(equation))
41
- st.code(str(equation), language='python')
 
3
  import chromadb
4
  from transformers import pipeline
5
  from langchain.chains import RetrievalQA
6
+ from langchain_community.vectorstores import FAISS
7
  from langchain.embeddings import SentenceTransformerEmbeddings
8
  from langchain.llms import OpenAI
9
  import os
10
+
11
  # Initialize ChromaDB for Retrieval-Augmented Generation (RAG)
12
  chroma_client = chromadb.PersistentClient(path="./chroma_db")
13
  embedding_model = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
14
 
15
+ # Check if FAISS index exists
16
+ faiss_path = "faiss_index"
 
17
 
18
+ if not os.path.exists(faiss_path):
19
+ st.error("⚠️ FAISS index not found. Please upload or generate the FAISS index first.")
20
+ else:
21
+ # Load the RAG-based Retrieval System
22
+ vectorstore = FAISS.load_local(faiss_path, embedding_model, allow_dangerous_deserialization=True)
23
+ retriever = vectorstore.as_retriever()
24
+ qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(), retriever=retriever)
25
 
26
+ # Load NLP Model
27
+ model = pipeline("text2text-generation", model="google/flan-t5-small")
28
 
29
+ st.title("πŸ€– AI-Driven Mathematical Model Generator")
30
+ st.write("Enter a problem statement in natural language to get a mathematical model.")
31
 
32
+ user_input = st.text_area("✍️ Enter your problem:")
33
 
34
+ if st.button("πŸš€ Generate Model"):
35
+ retrieved_context = qa_chain.run(user_input) # RAG retrieval
36
+ response = model(f"Generate a mathematical model for: {user_input}\nContext: {retrieved_context}", max_length=200)
37
+
38
+ try:
39
+ equation = sp.sympify(response[0]['generated_text'])
40
+ except:
41
+ equation = response[0]['generated_text'] # If parsing fails, return text
42
+
43
+ st.subheader("πŸ“Œ Mathematical Model:")
44
+ st.latex(sp.latex(equation))
45
+ st.code(str(equation), language='python')