File size: 5,179 Bytes
5b2420c 9a88164 f789605 5b2420c 1351057 5b2420c 9a88164 5b2420c 1d3eed5 5b2420c f789605 8074815 9a88164 5b2420c 8074815 bd50063 8074815 1d3eed5 8074815 5b2420c f789605 09220f3 8074815 5b2420c 34139eb 3fa421f 7687d63 34139eb 5b2420c 34139eb 5b2420c 34139eb 5b2420c 3fa421f 34139eb 3fa421f 34139eb 5b2420c 3fa421f 5b2420c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import gradio as gr
import os
# from analyzer import analyze_code
# System prompt for the chatbot
CHATBOT_SYSTEM_PROMPT = (
"You are a helpful and friendly assistant. Your goal is to help the user discover their ideal Hugging Face repository. "
"Engage in a natural conversation, ask clarifying questions about their needs, such as their use case, preferred programming languages, or specific features they are looking for. "
"Keep your responses concise and focused on helping the user."
)
# Store the conversation
conversation_history = []
# Function to handle chat
def chat_with_user(user_message, history):
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Build the message list for the LLM
messages = [
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT}
]
for msg in history:
messages.append({"role": "user", "content": msg[0]})
if msg[1]:
messages.append({"role": "assistant", "content": msg[1]})
messages.append({"role": "user", "content": user_message})
response = client.chat.completions.create(
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ",
messages=messages,
max_tokens=256,
temperature=0.7
)
assistant_reply = response.choices[0].message.content
return assistant_reply
# Function to end chat and extract keywords
def extract_keywords_from_conversation(history):
print("Extracting keywords from conversation...")
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Combine all user and assistant messages into a single string
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]])
system_prompt = (
"You are an expert at helping users find open-source repos on Hugging Face. "
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face repos to find the most relevant results for the user. "
"Return only the keywords as a comma-separated list."
"Use keywords that are specific to the user's use case and features they are looking for."
)
user_prompt = (
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
)
response = client.chat.completions.create(
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
max_tokens=64,
temperature=0.3
)
print("Response received from OpenAI...")
print(response.choices[0].message.content)
keywords = response.choices[0].message.content.strip()
return keywords
with gr.Blocks() as chatbot_demo:
gr.Markdown("## Repo Recommendation Chatbot")
chatbot = gr.Chatbot(type="messages", label="Chatbot")
# Initial assistant message only
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?"
state = gr.State([{"role": "assistant", "content": initial_message}])
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...")
send_btn = gr.Button("Send")
end_btn = gr.Button("End Chat and Extract Keywords")
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False)
def user_send(user_message, history_messages):
# Add user message to the UI
history_messages.append({"role": "user", "content": user_message})
# Convert to tuple format for the API call
tuple_history = []
for i in range(0, len(history_messages) -1, 2): # Exclude the last user message
if i + 1 < len(history_messages):
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
# Get bot response and add to UI
assistant_reply = chat_with_user(user_message, tuple_history)
history_messages.append({"role": "assistant", "content": assistant_reply})
return history_messages, ""
def end_chat(history_messages):
# Convert to tuple format for the API call
tuple_history = []
for i in range(0, len(history_messages), 2):
if i + 1 < len(history_messages):
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
keywords = extract_keywords_from_conversation(tuple_history)
return keywords
# Reset state to initial message when chatbot page is loaded
def reset_chat_state():
return [{"role": "assistant", "content": initial_message}]
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, user_input])
end_btn.click(end_chat, inputs=state, outputs=keywords_output)
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state)
if __name__ == "__main__":
chatbot_demo.launch() |