File size: 3,805 Bytes
5b2420c 9a88164 f789605 5b2420c 9a88164 5b2420c 9a88164 5b2420c f789605 8074815 9a88164 5b2420c 8074815 9a88164 8074815 5b2420c f789605 09220f3 8074815 5b2420c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
import os
# from analyzer import analyze_code
# System prompt for the chatbot
CHATBOT_SYSTEM_PROMPT = (
"You are a helpful assistant. Your goal is to help the user describe their ideal open-source repo. "
"Ask questions to clarify what they want, their use case, preferred language, features, etc. "
"When the user clicks 'End Chat', analyze the conversation and return about 5 keywords for repo search. "
"Return only the keywords as a comma-separated list."
)
# Store the conversation
conversation_history = []
# Function to handle chat
def chat_with_user(user_message, history):
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Build the message list for the LLM
messages = [
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT}
]
for msg in history:
messages.append({"role": "user", "content": msg[0]})
if msg[1]:
messages.append({"role": "assistant", "content": msg[1]})
messages.append({"role": "user", "content": user_message})
response = client.chat.completions.create(
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",
messages=messages,
max_tokens=256,
temperature=0.7
)
assistant_reply = response.choices[0].message.content
return assistant_reply
# Function to end chat and extract keywords
def extract_keywords_from_conversation(history):
print("Extracting keywords from conversation...")
from openai import OpenAI
client = OpenAI(api_key=os.getenv("modal_api"))
client.base_url = os.getenv("base_url")
# Combine all user and assistant messages into a single string
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]])
system_prompt = (
"You are an expert at helping users find open-source repos on Hugging Face. "
"Given a conversation, extract about 5 keywords that would be most useful for searching Hugging Face repos to find the most relevant results for the user. "
"Return only the keywords as a comma-separated list."
)
user_prompt = (
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
)
response = client.chat.completions.create(
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
max_tokens=64,
temperature=0.3
)
print("Response received from OpenAI...")
print(response.choices[0].message.content)
keywords = response.choices[0].message.content.strip()
return keywords
with gr.Blocks() as chatbot_demo:
gr.Markdown("## Repo Recommendation Chatbot")
chatbot = gr.Chatbot()
state = gr.State([]) # conversation history
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...")
send_btn = gr.Button("Send")
end_btn = gr.Button("End Chat and Extract Keywords")
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False)
def user_send(user_message, history):
assistant_reply = chat_with_user(user_message, history)
history = history + [[user_message, assistant_reply]]
return history, history, ""
def end_chat(history):
keywords = extract_keywords_from_conversation(history)
return keywords
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, state, user_input])
end_btn.click(end_chat, inputs=state, outputs=keywords_output)
if __name__ == "__main__":
chatbot_demo.launch() |