Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| # from analyzer import analyze_code | |
| # System prompt for the chatbot | |
| CHATBOT_SYSTEM_PROMPT = ( | |
| "You are a helpful and friendly assistant. Your goal is to help the user discover their ideal Hugging Face repository. " | |
| "Engage in a natural conversation, ask clarifying questions about their needs, such as their use case, preferred programming languages, or specific features they are looking for. " | |
| "Keep your responses concise and focused on helping the user." | |
| ) | |
| # Store the conversation | |
| conversation_history = [] | |
| # Function to handle chat | |
| def chat_with_user(user_message, history): | |
| from openai import OpenAI | |
| client = OpenAI(api_key=os.getenv("modal_api")) | |
| client.base_url = os.getenv("base_url") | |
| # Build the message list for the LLM | |
| messages = [ | |
| {"role": "system", "content": CHATBOT_SYSTEM_PROMPT} | |
| ] | |
| for msg in history: | |
| messages.append({"role": "user", "content": msg[0]}) | |
| if msg[1]: | |
| messages.append({"role": "assistant", "content": msg[1]}) | |
| messages.append({"role": "user", "content": user_message}) | |
| response = client.chat.completions.create( | |
| model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", | |
| messages=messages, | |
| max_tokens=256, | |
| temperature=0.7 | |
| ) | |
| assistant_reply = response.choices[0].message.content | |
| return assistant_reply | |
| # Function to end chat and extract keywords | |
| def extract_keywords_from_conversation(history): | |
| print("Extracting keywords from conversation...") | |
| from openai import OpenAI | |
| client = OpenAI(api_key=os.getenv("modal_api")) | |
| client.base_url = os.getenv("base_url") | |
| # Combine all user and assistant messages into a single string | |
| conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]]) | |
| system_prompt = ( | |
| "You are an expert at helping users find open-source repos on Hugging Face. " | |
| "Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face repos to find the most relevant results for the user. " | |
| "Return only the keywords as a comma-separated list." | |
| "Use keywords that are specific to the user's use case and features they are looking for." | |
| ) | |
| user_prompt = ( | |
| "Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search." | |
| ) | |
| response = client.chat.completions.create( | |
| model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", | |
| messages=[ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ], | |
| max_tokens=64, | |
| temperature=0.3 | |
| ) | |
| print("Response received from OpenAI...") | |
| print(response.choices[0].message.content) | |
| keywords = response.choices[0].message.content.strip() | |
| return keywords | |
| with gr.Blocks() as chatbot_demo: | |
| gr.Markdown("## Repo Recommendation Chatbot") | |
| chatbot = gr.Chatbot(type="messages", label="Chatbot") | |
| # Initial assistant message only | |
| initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?" | |
| state = gr.State([{"role": "assistant", "content": initial_message}]) | |
| user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...") | |
| send_btn = gr.Button("Send") | |
| end_btn = gr.Button("End Chat and Extract Keywords") | |
| keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False) | |
| def user_send(user_message, history_messages): | |
| # Add user message to the UI | |
| history_messages.append({"role": "user", "content": user_message}) | |
| # Convert to tuple format for the API call | |
| tuple_history = [] | |
| for i in range(0, len(history_messages) -1, 2): # Exclude the last user message | |
| if i + 1 < len(history_messages): | |
| tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content'])) | |
| # Get bot response and add to UI | |
| assistant_reply = chat_with_user(user_message, tuple_history) | |
| history_messages.append({"role": "assistant", "content": assistant_reply}) | |
| return history_messages, "" | |
| def end_chat(history_messages): | |
| # Convert to tuple format for the API call | |
| tuple_history = [] | |
| for i in range(0, len(history_messages), 2): | |
| if i + 1 < len(history_messages): | |
| tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content'])) | |
| keywords = extract_keywords_from_conversation(tuple_history) | |
| return keywords | |
| # Reset state to initial message when chatbot page is loaded | |
| def reset_chat_state(): | |
| return [{"role": "assistant", "content": initial_message}] | |
| send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, user_input]) | |
| end_btn.click(end_chat, inputs=state, outputs=keywords_output) | |
| chatbot_demo.load(reset_chat_state, inputs=None, outputs=state) | |
| if __name__ == "__main__": | |
| chatbot_demo.launch() |