tferhan commited on
Commit
380f174
·
verified ·
1 Parent(s): 96c2b60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -21
app.py CHANGED
@@ -1,12 +1,3 @@
1
- # -*- coding: utf-8 -*-
2
- """api.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1XRryfVWG4d_ScN5ADvlZpKmREvTJN3mg
8
- """
9
-
10
  import gradio as gr
11
  import os
12
 
@@ -58,7 +49,6 @@ def initialize_database(file_path):
58
  print('Collection name: ', collection_name)
59
  # Load document and create splits
60
  doc_splits = load_doc(file_path)
61
- # Create or load vector database
62
  # global vector_db
63
  vector_db = create_db(doc_splits, collection_name)
64
  return vector_db, collection_name, "Complete!"
@@ -71,7 +61,6 @@ def create_db(splits, collection_name):
71
  embedding=embedding,
72
  client=new_client,
73
  collection_name=collection_name,
74
- # persist_directory=default_persist_directory
75
  )
76
  return vectordb
77
 
@@ -89,7 +78,6 @@ def initialize_llmchain(temperature, max_tokens, top_k, vector_db):
89
 
90
  llm = HuggingFaceEndpoint(
91
  repo_id='mistralai/Mixtral-8x7B-Instruct-v0.1',
92
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True},
93
  temperature = temperature,
94
  max_new_tokens = max_tokens,
95
  top_k = top_k,
@@ -101,14 +89,12 @@ def initialize_llmchain(temperature, max_tokens, top_k, vector_db):
101
  retriever=retriever,
102
  chain_type="stuff",
103
  memory=memory,
104
- # combine_docs_chain_kwargs={"prompt": your_prompt})
105
  return_source_documents=True,
106
- #return_generated_question=False,
107
  verbose=False,
108
  )
109
  return qa_chain
110
 
111
- qa = initialize_llmchain(0.7, 1024, 1, vec_cre)
112
 
113
  def format_chat_history(message, chat_history):
114
  formatted_chat_history = []
@@ -119,26 +105,21 @@ def format_chat_history(message, chat_history):
119
 
120
  def conversation(message, history):
121
  formatted_chat_history = format_chat_history(message, history)
122
- #print("formatted_chat_history",formatted_chat_history)
123
 
124
  # Generate response using QA chain
125
  response = qa({"question": message, "chat_history": formatted_chat_history})
126
  response_answer = response["answer"]
127
  if response_answer.find("Helpful Answer:") != -1:
128
  response_answer = response_answer.split("Helpful Answer:")[-1]
 
129
  response_sources = response["source_documents"]
130
  response_source1 = response_sources[0].page_content.strip()
131
  response_source2 = response_sources[1].page_content.strip()
132
  response_source3 = response_sources[2].page_content.strip()
133
- # Langchain sources are zero-based
134
  response_source1_page = response_sources[0].metadata["page"] + 1
135
  response_source2_page = response_sources[1].metadata["page"] + 1
136
  response_source3_page = response_sources[2].metadata["page"] + 1
137
- # print ('chat response: ', response_answer)
138
- # print('DB source', response_sources)
139
 
140
- # Append user message and response to chat history
141
- # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
142
  return response_answer
143
 
144
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import os
3
 
 
49
  print('Collection name: ', collection_name)
50
  # Load document and create splits
51
  doc_splits = load_doc(file_path)
 
52
  # global vector_db
53
  vector_db = create_db(doc_splits, collection_name)
54
  return vector_db, collection_name, "Complete!"
 
61
  embedding=embedding,
62
  client=new_client,
63
  collection_name=collection_name,
 
64
  )
65
  return vectordb
66
 
 
78
 
79
  llm = HuggingFaceEndpoint(
80
  repo_id='mistralai/Mixtral-8x7B-Instruct-v0.1',
 
81
  temperature = temperature,
82
  max_new_tokens = max_tokens,
83
  top_k = top_k,
 
89
  retriever=retriever,
90
  chain_type="stuff",
91
  memory=memory,
 
92
  return_source_documents=True,
 
93
  verbose=False,
94
  )
95
  return qa_chain
96
 
97
+ qa = initialize_llmchain(0.7, 1024, 1, vec_cre) #The model question answer
98
 
99
  def format_chat_history(message, chat_history):
100
  formatted_chat_history = []
 
105
 
106
  def conversation(message, history):
107
  formatted_chat_history = format_chat_history(message, history)
 
108
 
109
  # Generate response using QA chain
110
  response = qa({"question": message, "chat_history": formatted_chat_history})
111
  response_answer = response["answer"]
112
  if response_answer.find("Helpful Answer:") != -1:
113
  response_answer = response_answer.split("Helpful Answer:")[-1]
114
+ #You can also return from where the model got the answer to fine-tune or adjust your model mais ici c'est bon
115
  response_sources = response["source_documents"]
116
  response_source1 = response_sources[0].page_content.strip()
117
  response_source2 = response_sources[1].page_content.strip()
118
  response_source3 = response_sources[2].page_content.strip()
 
119
  response_source1_page = response_sources[0].metadata["page"] + 1
120
  response_source2_page = response_sources[1].metadata["page"] + 1
121
  response_source3_page = response_sources[2].metadata["page"] + 1
 
 
122
 
 
 
123
  return response_answer
124
 
125