Spaces:
Running
Running
File size: 914 Bytes
67c6de9 e94f0c8 b32034a 2daff71 8349717 e94f0c8 7f35276 67c6de9 baabfd0 67c6de9 8349717 67c6de9 8349717 b32034a 67c6de9 8349717 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import gradio as gr
from huggingface_hub import InferenceClient, login
import random
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
from langchain.schema import AIMessage, HumanMessage
import os
login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])
model = ChatHuggingFace(model="HuggingFaceH4/zephyr-7b-beta")
def predict(message, history):
history_langchain_format = []
for msg in history:
if msg['role'] == "user":
history_langchain_format.append(HumanMessage(content=msg['content']))
elif msg['role'] == "assistant":
history_langchain_format.append(AIMessage(content=msg['content']))
history_langchain_format.append(HumanMessage(content=message))
gpt_response = model.invoke(history_langchain_format)
return gpt_response.content
demo = gr.ChatInterface(
predict,
type="messages"
)
demo.launch() |