Update main.py
Browse files
main.py
CHANGED
|
@@ -12,20 +12,16 @@ from operator import itemgetter
|
|
| 12 |
from pinecone import Pinecone
|
| 13 |
from langchain_pinecone import PineconeVectorStore
|
| 14 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
| 15 |
-
#from langchain_google_community import GoogleSearchAPIWrapper
|
| 16 |
from langchain.memory import ConversationBufferMemory
|
| 17 |
from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableConfig, RunnableLambda
|
| 18 |
from langchain.callbacks.base import BaseCallbackHandler
|
| 19 |
from langchain.chains import (
|
| 20 |
StuffDocumentsChain, ConversationalRetrievalChain
|
| 21 |
)
|
| 22 |
-
#from langchain_core.tracers.context import tracing_v2_enabled
|
| 23 |
-
#from langchain_core.tools import Tool
|
| 24 |
|
| 25 |
import chainlit as cl
|
| 26 |
from chainlit.input_widget import TextInput, Select, Switch, Slider
|
| 27 |
-
|
| 28 |
-
#from chainlit.playground.providers.langchain import LangchainGenericProvider
|
| 29 |
|
| 30 |
from deep_translator import GoogleTranslator
|
| 31 |
|
|
@@ -51,25 +47,13 @@ def auth_callback(username: str, password: str):
|
|
| 51 |
identifier=ident + " : 🧑🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"}
|
| 52 |
)
|
| 53 |
|
| 54 |
-
@cl.step(type="
|
| 55 |
async def LLModel():
|
| 56 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
|
| 57 |
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 58 |
llm = HuggingFaceEndpoint(
|
| 59 |
repo_id=repo_id, max_new_tokens=5300, temperature=1.0, task="text2text-generation", streaming=True
|
| 60 |
)
|
| 61 |
-
#add_llm_provider(
|
| 62 |
-
# LangchainGenericProvider(
|
| 63 |
-
# It is important that the id of the provider matches the _llm_type
|
| 64 |
-
# id=llm._llm_type,
|
| 65 |
-
# The name is not important. It will be displayed in the UI.
|
| 66 |
-
# name="Mistral 8x7b Instruct",
|
| 67 |
-
# This should always be a Langchain llm instance (correctly configured)
|
| 68 |
-
# llm=llm,
|
| 69 |
-
# If the LLM works with messages, set this to True
|
| 70 |
-
# is_chat=True
|
| 71 |
-
# )
|
| 72 |
-
#)
|
| 73 |
return llm
|
| 74 |
|
| 75 |
@cl.step(type="tool")
|
|
@@ -161,21 +145,24 @@ async def Search(input, categorie):
|
|
| 161 |
results = [sources_text, verbatim_text, sources_offres]
|
| 162 |
return results
|
| 163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
@cl.on_chat_start
|
| 165 |
async def on_chat_start():
|
| 166 |
await cl.Message(f"> REVIEWSTREAM").send()
|
| 167 |
-
#sources_videos = [
|
| 168 |
-
# cl.Text(name="Videos", content="""<div style="display:inline-block;text-align:center;font-size:0.7rem;max-height:260px;width:33%;min-width:33%;max-width:33%;overflow:hidden"><a target="_blank" title="Alizé2 - Dimensionnement routier 1/2" href="https://clap.univ-eiffel.fr/permalink/v1261c4664a50dwedh6w/iframe/"><img src="https://clap.univ-eiffel.fr/thumb/v1261c4664a50dwedh6w/play/" width="100%" alt="Alizé2 - Dimensionnement routier 1/2"/><p>Alizé2 - Dimensionnement routier 1/2 Alizé2 - Dimensionnement routier 1/2</p></a></div>
|
| 169 |
-
# <div style="display:inline-block;text-align:center;font-size:0.7rem;max-height:260px;width:33%;min-width:33%;max-width:33%;overflow:hidden"><a target="_blank" title="Alizé2 - Dimensionnement routier 1/2" href="https://clap.univ-eiffel.fr/permalink/v1261c4664a50dwedh6w/iframe/"><img src="https://clap.univ-eiffel.fr/thumb/v1261c4664a50dwedh6w/play/" width="100%" alt="Alizé2 - Dimensionnement routier 1/2"/><p>Alizé2 - Dimensionnement routier 1/2 Alizé2 - Dimensionnement routier 1/2</p></a></div>
|
| 170 |
-
# <div style="display:inline-block;text-align:center;font-size:0.7rem;max-height:260px;width:33%;min-width:33%;max-width:33%;overflow:hidden"><a target="_blank" title="Alizé2 - Dimensionnement routier 1/2" href="https://clap.univ-eiffel.fr/permalink/v1261c4664a50dwedh6w/iframe/"><img src="https://clap.univ-eiffel.fr/thumb/v1261c4664a50dwedh6w/play/" width="100%" alt="Alizé2 - Dimensionnement routier 1/2"/><p>Alizé2 - Dimensionnement routier 1/2 Alizé2 - Dimensionnement routier 1/2</p></a></div>
|
| 171 |
-
# <div style="display:inline-block;text-align:center;font-size:0.7rem;max-height:260px;width:33%;min-width:33%;max-width:33%;overflow:hidden"><a target="_blank" title="Alizé2 - Dimensionnement routier 1/2" href="https://clap.univ-eiffel.fr/permalink/v1261c4664a50dwedh6w/iframe/"><img src="https://clap.univ-eiffel.fr/thumb/v1261c4664a50dwedh6w/play/" width="100%" alt="Alizé2 - Dimensionnement routier 1/2"/><p>Alizé2 - Dimensionnement routier 1/2 Alizé2 - Dimensionnement routier 1/2</p></a></div>""",
|
| 172 |
-
# display="inline")
|
| 173 |
-
#]
|
| 174 |
-
#await cl.Message(
|
| 175 |
-
# content="Vidéos : ",
|
| 176 |
-
# elements=sources_videos,
|
| 177 |
-
#).send()
|
| 178 |
-
|
| 179 |
res = await cl.AskActionMessage(
|
| 180 |
content="<div style='width:100%;text-align:center'>Sélectionnez une source documentaire</div>",
|
| 181 |
actions=[
|
|
@@ -217,20 +204,21 @@ async def on_chat_start():
|
|
| 217 |
########## Chain with streaming ##########
|
| 218 |
message_history = ChatMessageHistory()
|
| 219 |
memory = ConversationBufferMemory(memory_key="chat_history",output_key="answer",chat_memory=message_history,return_messages=True)
|
|
|
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
cl.user_session.set("runnable", qa)
|
| 232 |
cl.user_session.set("memory", memory)
|
| 233 |
|
|
|
|
| 234 |
|
| 235 |
@cl.on_message
|
| 236 |
async def on_message(message: cl.Message):
|
|
|
|
| 12 |
from pinecone import Pinecone
|
| 13 |
from langchain_pinecone import PineconeVectorStore
|
| 14 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
|
|
|
| 15 |
from langchain.memory import ConversationBufferMemory
|
| 16 |
from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableConfig, RunnableLambda
|
| 17 |
from langchain.callbacks.base import BaseCallbackHandler
|
| 18 |
from langchain.chains import (
|
| 19 |
StuffDocumentsChain, ConversationalRetrievalChain
|
| 20 |
)
|
|
|
|
|
|
|
| 21 |
|
| 22 |
import chainlit as cl
|
| 23 |
from chainlit.input_widget import TextInput, Select, Switch, Slider
|
| 24 |
+
from chainlit.types import ThreadDict
|
|
|
|
| 25 |
|
| 26 |
from deep_translator import GoogleTranslator
|
| 27 |
|
|
|
|
| 47 |
identifier=ident + " : 🧑🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"}
|
| 48 |
)
|
| 49 |
|
| 50 |
+
@cl.step(type="tool")
|
| 51 |
async def LLModel():
|
| 52 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HUGGINGFACEHUB_API_TOKEN']
|
| 53 |
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 54 |
llm = HuggingFaceEndpoint(
|
| 55 |
repo_id=repo_id, max_new_tokens=5300, temperature=1.0, task="text2text-generation", streaming=True
|
| 56 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
return llm
|
| 58 |
|
| 59 |
@cl.step(type="tool")
|
|
|
|
| 145 |
results = [sources_text, verbatim_text, sources_offres]
|
| 146 |
return results
|
| 147 |
|
| 148 |
+
@cl.step(type="llm")
|
| 149 |
+
async def setup_conversationalChain():
|
| 150 |
+
model = await LLModel()
|
| 151 |
+
retriever = await Retriever(res.get("name"))
|
| 152 |
+
qa = ConversationalRetrievalChain.from_llm(
|
| 153 |
+
model,
|
| 154 |
+
memory=memory,
|
| 155 |
+
chain_type="stuff",
|
| 156 |
+
return_source_documents=True,
|
| 157 |
+
verbose=False,
|
| 158 |
+
retriever=retriever
|
| 159 |
+
)
|
| 160 |
+
cl.user_session.set("runnable", qa)
|
| 161 |
+
cl.user_session.set("memory", memory)
|
| 162 |
+
|
| 163 |
@cl.on_chat_start
|
| 164 |
async def on_chat_start():
|
| 165 |
await cl.Message(f"> REVIEWSTREAM").send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
res = await cl.AskActionMessage(
|
| 167 |
content="<div style='width:100%;text-align:center'>Sélectionnez une source documentaire</div>",
|
| 168 |
actions=[
|
|
|
|
| 204 |
########## Chain with streaming ##########
|
| 205 |
message_history = ChatMessageHistory()
|
| 206 |
memory = ConversationBufferMemory(memory_key="chat_history",output_key="answer",chat_memory=message_history,return_messages=True)
|
| 207 |
+
await setup_conversationalChain()
|
| 208 |
|
| 209 |
+
@cl.on_chat_resume
|
| 210 |
+
async def on_chat_resume(thread: ThreadDict):
|
| 211 |
+
memory = ConversationBufferMemory(return_messages=True)
|
| 212 |
+
root_messages = [m for m in thread["steps"] if m["parentId"] == None]
|
| 213 |
+
for message in root_messages:
|
| 214 |
+
if message["type"] == "user_message":
|
| 215 |
+
memory.chat_memory.add_user_message(message["output"])
|
| 216 |
+
else:
|
| 217 |
+
memory.chat_memory.add_ai_message(message["output"])
|
| 218 |
+
|
|
|
|
| 219 |
cl.user_session.set("memory", memory)
|
| 220 |
|
| 221 |
+
await setup_conversationalChain()
|
| 222 |
|
| 223 |
@cl.on_message
|
| 224 |
async def on_message(message: cl.Message):
|