Spaces:
Running
Running
checkin
Browse files- app.py +15 -10
- requirements.txt +3 -1
app.py
CHANGED
@@ -1,23 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient, login
|
3 |
import random
|
|
|
|
|
4 |
import os
|
5 |
|
6 |
login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])
|
7 |
|
8 |
-
""
|
9 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
10 |
-
"""
|
11 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
demo = gr.ChatInterface(
|
18 |
-
|
19 |
type="messages"
|
20 |
)
|
21 |
|
22 |
-
|
23 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient, login
|
3 |
import random
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain.schema import AIMessage, HumanMessage
|
6 |
import os
|
7 |
|
8 |
login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])
|
9 |
|
10 |
+
model = ChatOpenAI(model="gpt-4o-mini")
|
|
|
|
|
|
|
11 |
|
12 |
+
def predict(message, history):
|
13 |
+
history_langchain_format = []
|
14 |
+
for msg in history:
|
15 |
+
if msg['role'] == "user":
|
16 |
+
history_langchain_format.append(HumanMessage(content=msg['content']))
|
17 |
+
elif msg['role'] == "assistant":
|
18 |
+
history_langchain_format.append(AIMessage(content=msg['content']))
|
19 |
+
history_langchain_format.append(HumanMessage(content=message))
|
20 |
+
gpt_response = model.invoke(history_langchain_format)
|
21 |
+
return gpt_response.content
|
22 |
|
23 |
demo = gr.ChatInterface(
|
24 |
+
predict,
|
25 |
type="messages"
|
26 |
)
|
27 |
|
28 |
+
demo.launch()
|
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
huggingface_hub==0.25.2
|
|
|
|
|
|
1 |
+
huggingface_hub==0.25.2
|
2 |
+
langchain_openai
|
3 |
+
langgraph
|