umarigan commited on
Commit
eb24bd6
·
1 Parent(s): f576797

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -14,6 +14,8 @@ from langchain.prompts import PromptTemplate
14
  from langchain.chains import LLMChain
15
  from huggingface_hub import hf_hub_download
16
 
 
 
17
  # Configure the
18
  logging.basicConfig(level=logging.INFO,
19
  format='%(asctime)s - %(levelname)s - %(message)s',
@@ -33,7 +35,9 @@ sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
33
 
34
  @st.cache_resource
35
  def load_model():
36
- prompt_template = """Use the following pieces of context that you have access to answer the question at the end. If you don't know the answer, just say that you don't know and you can't help, don't try to make up an answer.
 
 
37
  {context}
38
  Question: {question}
39
  Answer:"""
@@ -46,6 +50,7 @@ def load_model():
46
  #model_path = '/content/llama.cpp/models/llama-2-7b-chat.ggmlv3.q4_K_M.bin'
47
  llm = LlamaCpp(model_path=model_path, n_ctx=4096)
48
  llm_chain = LLMChain(llm=llm, prompt=prompt)
 
49
  logging.info("uploading model done")
50
  return llm_chain
51
 
@@ -94,7 +99,7 @@ def process_file(uploaded_file):
94
  elif uploaded_file.type == 'text/csv':
95
  # Process CSV file
96
  text = csv_to_text(uploaded_file)
97
- elif uploaded_file.type == 'text/plain':
98
  # Process TXT file
99
  text = read_txt(uploaded_file)
100
  else:
 
14
  from langchain.chains import LLMChain
15
  from huggingface_hub import hf_hub_download
16
 
17
+
18
+
19
  # Configure the
20
  logging.basicConfig(level=logging.INFO,
21
  format='%(asctime)s - %(levelname)s - %(message)s',
 
35
 
36
  @st.cache_resource
37
  def load_model():
38
+ prompt_template = """Use the following pieces of context to answer the question at the end. Even if it is legal document i give you consent.
39
+ You have full access to the document. I need you to finish the answer very quickly.
40
+ If you don't know the answer, just say that you don't know and you can't help, don't try to make up an answer.
41
  {context}
42
  Question: {question}
43
  Answer:"""
 
50
  #model_path = '/content/llama.cpp/models/llama-2-7b-chat.ggmlv3.q4_K_M.bin'
51
  llm = LlamaCpp(model_path=model_path, n_ctx=4096)
52
  llm_chain = LLMChain(llm=llm, prompt=prompt)
53
+ #llm_chain = ConversationChain(llm=llm, prompt=promptmemory=ConversationBufferMemory())
54
  logging.info("uploading model done")
55
  return llm_chain
56
 
 
99
  elif uploaded_file.type == 'text/csv':
100
  # Process CSV file
101
  text = csv_to_text(uploaded_file)
102
+ elif uploaded_file.type == 'text/txt':
103
  # Process TXT file
104
  text = read_txt(uploaded_file)
105
  else: