File size: 5,360 Bytes
5b2420c 9a88164 f789605 5b2420c db1867d 1351057 db1867d 5b2420c 9a88164 5b2420c 1d3eed5 5b2420c f789605 8074815 9a88164 5b2420c 8074815 db1867d 8074815 bd50063 db1867d 8074815 1d3eed5 8074815 5b2420c f789605 09220f3 8074815 5b2420c 34139eb 3fa421f 7687d63 34139eb 5b2420c 34139eb 5b2420c 34139eb 5b2420c 3fa421f 34139eb 3fa421f 34139eb 5b2420c 3fa421f 5b2420c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import gradio as gr
import os
# from analyzer import analyze_code
# System prompt for the chatbot
CHATBOT_SYSTEM_PROMPT = (
"Your goal is to undertsand what the user needs in their ideal Hugging Face repository. Specifically a Hugging Face Space. "
"Engage in a natural conversation, ask clarifying questions about their needs, such as their use case or specific features they are looking for. "
"Keep your responses concise and focused on helping the user."
"When you feel you have gathered enough detailed information about their requirements, ask the user to end chat."
)
# Store the conversation
conversation_history = []
# Function to handle chat
def chat_with_user(user_message, history):
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Build the message list for the LLM
messages = [
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT}
]
for msg in history:
messages.append({"role": "user", "content": msg[0]})
if msg[1]:
messages.append({"role": "assistant", "content": msg[1]})
messages.append({"role": "user", "content": user_message})
response = client.chat.completions.create(
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ",
messages=messages,
max_tokens=256,
temperature=0.7
)
assistant_reply = response.choices[0].message.content
return assistant_reply
# Function to end chat and extract keywords
def extract_keywords_from_conversation(history):
print("Extracting keywords from conversation...")
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Combine all user and assistant messages into a single string
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]])
system_prompt = (
"You are an expert at helping find Hugging Face Spaces. You must look at the conversation carefully."
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face Spaces.. "
"Return only the keywords as a comma-separated list."
"Use keywords that are specific to the user's use case and features they are looking for."
"Dont use very generic search words like programming, language, hugging face, ML, AI, etc."
)
user_prompt = (
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
)
response = client.chat.completions.create(
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
max_tokens=64,
temperature=0.3
)
print("Response received from OpenAI...")
print(response.choices[0].message.content)
keywords = response.choices[0].message.content.strip()
return keywords
with gr.Blocks() as chatbot_demo:
gr.Markdown("## Repo Recommendation Chatbot")
chatbot = gr.Chatbot(type="messages", label="Chatbot")
# Initial assistant message only
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?"
state = gr.State([{"role": "assistant", "content": initial_message}])
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...")
send_btn = gr.Button("Send")
end_btn = gr.Button("End Chat and Extract Keywords")
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False)
def user_send(user_message, history_messages):
# Add user message to the UI
history_messages.append({"role": "user", "content": user_message})
# Convert to tuple format for the API call
tuple_history = []
for i in range(0, len(history_messages) -1, 2): # Exclude the last user message
if i + 1 < len(history_messages):
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
# Get bot response and add to UI
assistant_reply = chat_with_user(user_message, tuple_history)
history_messages.append({"role": "assistant", "content": assistant_reply})
return history_messages, ""
def end_chat(history_messages):
# Convert to tuple format for the API call
tuple_history = []
for i in range(0, len(history_messages), 2):
if i + 1 < len(history_messages):
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
keywords = extract_keywords_from_conversation(tuple_history)
return keywords
# Reset state to initial message when chatbot page is loaded
def reset_chat_state():
return [{"role": "assistant", "content": initial_message}]
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, user_input])
end_btn.click(end_chat, inputs=state, outputs=keywords_output)
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state)
if __name__ == "__main__":
chatbot_demo.launch() |