|
import gradio as gr |
|
import os |
|
|
|
|
|
|
|
CHATBOT_SYSTEM_PROMPT = ( |
|
"Your goal is to undertsand what the user needs in their ideal Hugging Face repository. Specifically a Hugging Face Space. " |
|
"Engage in a natural conversation, ask clarifying questions about their needs, such as their use case or specific features they are looking for. " |
|
"Keep your responses concise and focused on helping the user." |
|
"When you feel you have gathered enough detailed information about their requirements, ask the user to end chat." |
|
|
|
) |
|
|
|
|
|
conversation_history = [] |
|
|
|
|
|
def chat_with_user(user_message, history): |
|
from openai import OpenAI |
|
client = OpenAI(api_key=os.getenv("modal_api")) |
|
client.base_url = os.getenv("base_url") |
|
|
|
messages = [ |
|
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT} |
|
] |
|
for msg in history: |
|
messages.append({"role": "user", "content": msg[0]}) |
|
if msg[1]: |
|
messages.append({"role": "assistant", "content": msg[1]}) |
|
messages.append({"role": "user", "content": user_message}) |
|
response = client.chat.completions.create( |
|
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", |
|
messages=messages, |
|
max_tokens=256, |
|
temperature=0.7 |
|
) |
|
assistant_reply = response.choices[0].message.content |
|
return assistant_reply |
|
|
|
|
|
def extract_keywords_from_conversation(history): |
|
print("Extracting keywords from conversation...") |
|
from openai import OpenAI |
|
client = OpenAI(api_key=os.getenv("modal_api")) |
|
client.base_url = os.getenv("base_url") |
|
|
|
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]]) |
|
system_prompt = ( |
|
"You are an expert at helping find Hugging Face Spaces. You must look at the conversation carefully." |
|
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face Spaces.. " |
|
"Return only the keywords as a comma-separated list." |
|
"Use keywords that are specific to the user's use case and features they are looking for." |
|
"Dont use very generic search words like programming, language, hugging face, ML, AI, etc." |
|
|
|
) |
|
user_prompt = ( |
|
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search." |
|
) |
|
response = client.chat.completions.create( |
|
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", |
|
messages=[ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
], |
|
max_tokens=64, |
|
temperature=0.3 |
|
) |
|
print("Response received from OpenAI...") |
|
print(response.choices[0].message.content) |
|
keywords = response.choices[0].message.content.strip() |
|
return keywords |
|
|
|
with gr.Blocks() as chatbot_demo: |
|
gr.Markdown("## Repo Recommendation Chatbot") |
|
chatbot = gr.Chatbot(type="messages", label="Chatbot") |
|
|
|
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?" |
|
state = gr.State([{"role": "assistant", "content": initial_message}]) |
|
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...") |
|
send_btn = gr.Button("Send") |
|
end_btn = gr.Button("End Chat and Extract Keywords") |
|
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False) |
|
|
|
def user_send(user_message, history_messages): |
|
|
|
history_messages.append({"role": "user", "content": user_message}) |
|
|
|
|
|
tuple_history = [] |
|
for i in range(0, len(history_messages) -1, 2): |
|
if i + 1 < len(history_messages): |
|
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content'])) |
|
|
|
|
|
assistant_reply = chat_with_user(user_message, tuple_history) |
|
history_messages.append({"role": "assistant", "content": assistant_reply}) |
|
|
|
return history_messages, "" |
|
|
|
def end_chat(history_messages): |
|
|
|
tuple_history = [] |
|
for i in range(0, len(history_messages), 2): |
|
if i + 1 < len(history_messages): |
|
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content'])) |
|
|
|
keywords = extract_keywords_from_conversation(tuple_history) |
|
return keywords |
|
|
|
|
|
def reset_chat_state(): |
|
return [{"role": "assistant", "content": initial_message}] |
|
|
|
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, user_input]) |
|
end_btn.click(end_chat, inputs=state, outputs=keywords_output) |
|
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state) |
|
|
|
if __name__ == "__main__": |
|
chatbot_demo.launch() |