|
import gradio as gr |
|
import os |
|
|
|
|
|
|
|
CHATBOT_SYSTEM_PROMPT = ( |
|
"You are a helpful assistant. Your goal is to help the user describe their ideal open-source repo. " |
|
"Ask questions to clarify what they want, their use case, preferred language, features, etc. " |
|
"When the user clicks 'End Chat', analyze the conversation and return about 5 keywords for repo search. " |
|
"Return only the keywords as a comma-separated list." |
|
) |
|
|
|
|
|
conversation_history = [] |
|
|
|
|
|
def chat_with_user(user_message, history): |
|
from openai import OpenAI |
|
client = OpenAI(api_key=os.getenv("modal_api")) |
|
client.base_url = os.getenv("base_url") |
|
|
|
messages = [ |
|
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT} |
|
] |
|
for msg in history: |
|
messages.append({"role": "user", "content": msg[0]}) |
|
if msg[1]: |
|
messages.append({"role": "assistant", "content": msg[1]}) |
|
messages.append({"role": "user", "content": user_message}) |
|
response = client.chat.completions.create( |
|
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16", |
|
messages=messages, |
|
max_tokens=256, |
|
temperature=0.7 |
|
) |
|
assistant_reply = response.choices[0].message.content |
|
return assistant_reply |
|
|
|
|
|
def extract_keywords_from_conversation(history): |
|
print("Extracting keywords from conversation...") |
|
from openai import OpenAI |
|
client = OpenAI(api_key=os.getenv("modal_api")) |
|
client.base_url = os.getenv("base_url") |
|
|
|
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]]) |
|
system_prompt = ( |
|
"You are an expert at helping users find open-source repos on Hugging Face. " |
|
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face repos to find the most relevant results for the user. " |
|
"Return only the keywords as a comma-separated list." |
|
) |
|
user_prompt = ( |
|
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search." |
|
) |
|
response = client.chat.completions.create( |
|
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16", |
|
messages=[ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
], |
|
max_tokens=64, |
|
temperature=0.3 |
|
) |
|
print("Response received from OpenAI...") |
|
print(response.choices[0].message.content) |
|
keywords = response.choices[0].message.content.strip() |
|
return keywords |
|
|
|
with gr.Blocks() as chatbot_demo: |
|
gr.Markdown("## Repo Recommendation Chatbot") |
|
chatbot = gr.Chatbot() |
|
state = gr.State([]) |
|
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...") |
|
send_btn = gr.Button("Send") |
|
end_btn = gr.Button("End Chat and Extract Keywords") |
|
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False) |
|
|
|
def user_send(user_message, history): |
|
assistant_reply = chat_with_user(user_message, history) |
|
history = history + [[user_message, assistant_reply]] |
|
return history, history, "" |
|
|
|
def end_chat(history): |
|
keywords = extract_keywords_from_conversation(history) |
|
return keywords |
|
|
|
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, state, user_input]) |
|
end_btn.click(end_chat, inputs=state, outputs=keywords_output) |
|
|
|
if __name__ == "__main__": |
|
chatbot_demo.launch() |