Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
# from analyzer import analyze_code | |
# System prompt for the chatbot | |
CHATBOT_SYSTEM_PROMPT = ( | |
"You are a helpful assistant. Your goal is to help the user describe their ideal Hugging face repo. " | |
"Ask questions to clarify what they want, their use case,features, etc. " | |
"When the user clicks 'End Chat', analyze the conversation and return 1 to 5 keywords for repo search. " | |
"Return only the keywords as a comma-separated list." | |
) | |
# Store the conversation | |
conversation_history = [] | |
# Function to handle chat | |
def chat_with_user(user_message, history): | |
from openai import OpenAI | |
client = OpenAI(api_key=os.getenv("modal_api")) | |
client.base_url = os.getenv("base_url") | |
# Build the message list for the LLM | |
messages = [ | |
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT} | |
] | |
for msg in history: | |
messages.append({"role": "user", "content": msg[0]}) | |
if msg[1]: | |
messages.append({"role": "assistant", "content": msg[1]}) | |
messages.append({"role": "user", "content": user_message}) | |
response = client.chat.completions.create( | |
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", | |
messages=messages, | |
max_tokens=256, | |
temperature=0.7 | |
) | |
assistant_reply = response.choices[0].message.content | |
return assistant_reply | |
# Function to end chat and extract keywords | |
def extract_keywords_from_conversation(history): | |
print("Extracting keywords from conversation...") | |
from openai import OpenAI | |
client = OpenAI(api_key=os.getenv("modal_api")) | |
client.base_url = os.getenv("base_url") | |
# Combine all user and assistant messages into a single string | |
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]]) | |
system_prompt = ( | |
"You are an expert at helping users find open-source repos on Hugging Face. " | |
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face repos to find the most relevant results for the user. " | |
"Return only the keywords as a comma-separated list." | |
"Use keywords that are specific to the user's use case and features they are looking for." | |
) | |
user_prompt = ( | |
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search." | |
) | |
response = client.chat.completions.create( | |
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": user_prompt} | |
], | |
max_tokens=64, | |
temperature=0.3 | |
) | |
print("Response received from OpenAI...") | |
print(response.choices[0].message.content) | |
keywords = response.choices[0].message.content.strip() | |
return keywords | |
with gr.Blocks() as chatbot_demo: | |
gr.Markdown("## Repo Recommendation Chatbot") | |
chatbot = gr.Chatbot() | |
# Initial assistant message only | |
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?" | |
state = gr.State([["", initial_message]]) # Only the initial assistant message | |
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...") | |
send_btn = gr.Button("Send") | |
end_btn = gr.Button("End Chat and Extract Keywords") | |
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False) | |
def user_send(user_message, history): | |
assistant_reply = chat_with_user(user_message, history) | |
history = history + [[user_message, assistant_reply]] | |
return history, history, "" | |
def end_chat(history): | |
keywords = extract_keywords_from_conversation(history) | |
return keywords | |
# Reset state to initial message when chatbot page is loaded | |
def reset_chat_state(): | |
return [["", initial_message]] | |
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, state, user_input]) | |
end_btn.click(end_chat, inputs=state, outputs=keywords_output) | |
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state) | |
if __name__ == "__main__": | |
chatbot_demo.launch() |