|
import gradio as gr |
|
import os |
|
import logging |
|
from typing import List, Dict, Tuple |
|
from analyzer import combine_repo_files_for_llm, handle_load_repository |
|
from hf_utils import download_filtered_space_files |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
def create_repo_explorer_tab() -> Tuple[Dict[str, gr.components.Component], Dict[str, gr.State]]: |
|
""" |
|
Creates the Repo Explorer tab content and returns the component references and state variables. |
|
""" |
|
|
|
|
|
states = { |
|
"repo_context_summary": gr.State(""), |
|
"current_repo_id": gr.State("") |
|
} |
|
|
|
gr.Markdown("### ποΈ Deep Dive into a Specific Repository") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
repo_explorer_input = gr.Textbox( |
|
label="π Repository ID", |
|
placeholder="microsoft/DialoGPT-medium", |
|
info="Enter a Hugging Face repository ID to explore" |
|
) |
|
with gr.Column(scale=1): |
|
load_repo_btn = gr.Button("π Load Repository", variant="primary", size="lg") |
|
|
|
with gr.Row(): |
|
repo_status_display = gr.Textbox( |
|
label="π Repository Status", |
|
interactive=False, |
|
lines=3, |
|
info="Current repository loading status and basic info" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
repo_chatbot = gr.Chatbot( |
|
label="π€ Repository Assistant", |
|
height=400, |
|
type="messages", |
|
avatar_images=( |
|
"https://cdn-icons-png.flaticon.com/512/149/149071.png", |
|
"https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png" |
|
), |
|
show_copy_button=True, |
|
value=[] |
|
) |
|
|
|
with gr.Row(): |
|
repo_msg_input = gr.Textbox( |
|
label="π Ask about this repository", |
|
placeholder="What does this repository do? How do I use it?", |
|
lines=1, |
|
scale=4, |
|
info="Ask anything about the loaded repository" |
|
) |
|
repo_send_btn = gr.Button("π€ Send", variant="primary", scale=1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
components = { |
|
"repo_explorer_input": repo_explorer_input, |
|
"load_repo_btn": load_repo_btn, |
|
"repo_status_display": repo_status_display, |
|
"repo_chatbot": repo_chatbot, |
|
"repo_msg_input": repo_msg_input, |
|
"repo_send_btn": repo_send_btn, |
|
|
|
} |
|
|
|
return components, states |
|
|
|
def handle_repo_user_message(user_message: str, history: List[Dict[str, str]], repo_context_summary: str, repo_id: str) -> Tuple[List[Dict[str, str]], str]: |
|
"""Handle user messages in the repo-specific chatbot.""" |
|
if not repo_context_summary.strip(): |
|
return history, "" |
|
|
|
|
|
if not history: |
|
welcome_msg = f"Hello! I'm your assistant for the '{repo_id}' repository. I have analyzed all the files and created a comprehensive understanding of this repository. I'm ready to answer any questions about its functionality, usage, architecture, and more. What would you like to know?" |
|
history = [{"role": "assistant", "content": welcome_msg}] |
|
|
|
if user_message: |
|
history.append({"role": "user", "content": user_message}) |
|
return history, "" |
|
|
|
def handle_repo_bot_response(history: List[Dict[str, str]], repo_context_summary: str, repo_id: str) -> List[Dict[str, str]]: |
|
"""Generate bot response for repo-specific questions using comprehensive context.""" |
|
if not history or history[-1]["role"] != "user" or not repo_context_summary.strip(): |
|
return history |
|
|
|
user_message = history[-1]["content"] |
|
|
|
|
|
repo_system_prompt = f"""You are an expert assistant for the Hugging Face repository '{repo_id}'. |
|
You have comprehensive knowledge about this repository based on detailed analysis of all its files and components. |
|
|
|
Use the following comprehensive analysis to answer user questions accurately and helpfully: |
|
|
|
{repo_context_summary} |
|
|
|
Instructions: |
|
- Answer questions clearly and conversationally about this specific repository |
|
- Reference specific components, functions, or features when relevant |
|
- Provide practical guidance on installation, usage, and implementation |
|
- If asked about code details, refer to the analysis above |
|
- Be helpful and informative while staying focused on this repository |
|
- If something isn't covered in the analysis, acknowledge the limitation |
|
|
|
Answer the user's question based on your comprehensive knowledge of this repository.""" |
|
|
|
try: |
|
from openai import OpenAI |
|
client = OpenAI(api_key=os.getenv("modal_api")) |
|
client.base_url = os.getenv("base_url") |
|
|
|
response = client.chat.completions.create( |
|
model="Orion-zhen/Qwen2.5-Coder-7B-Instruct-AWQ", |
|
messages=[ |
|
{"role": "system", "content": repo_system_prompt}, |
|
{"role": "user", "content": user_message} |
|
], |
|
max_tokens=1024, |
|
temperature=0.7 |
|
) |
|
|
|
bot_response = response.choices[0].message.content |
|
history.append({"role": "assistant", "content": bot_response}) |
|
|
|
except Exception as e: |
|
logger.error(f"Error generating repo bot response: {e}") |
|
error_response = f"I apologize, but I encountered an error while processing your question: {e}" |
|
history.append({"role": "assistant", "content": error_response}) |
|
|
|
return history |
|
|
|
def initialize_repo_chatbot(repo_status: str, repo_id: str, repo_context_summary: str) -> List[Dict[str, str]]: |
|
"""Initialize the repository chatbot with a welcome message after successful repo loading.""" |
|
|
|
if repo_context_summary.strip() and "successfully" in repo_status.lower(): |
|
welcome_msg = f"π Welcome! I've successfully analyzed the **{repo_id}** repository.\n\nπ§ **I now have comprehensive knowledge of:**\nβ’ All files and code structure\nβ’ Key features and capabilities\nβ’ Installation and usage instructions\nβ’ Architecture and implementation details\nβ’ Dependencies and requirements\n\nπ¬ **Ask me anything about this repository!** \nFor example:\nβ’ \"What does this repository do?\"\nβ’ \"How do I install and use it?\"\nβ’ \"What are the main components?\"\nβ’ \"Show me usage examples\"\n\nWhat would you like to know? π€" |
|
return [{"role": "assistant", "content": welcome_msg}] |
|
else: |
|
|
|
return [] |
|
|
|
def setup_repo_explorer_events(components: Dict[str, gr.components.Component], states: Dict[str, gr.State]): |
|
"""Setup event handlers for the repo explorer components.""" |
|
|
|
|
|
components["load_repo_btn"].click( |
|
fn=handle_load_repository, |
|
inputs=[components["repo_explorer_input"]], |
|
outputs=[components["repo_status_display"], states["repo_context_summary"]] |
|
).then( |
|
fn=lambda repo_id: repo_id, |
|
inputs=[components["repo_explorer_input"]], |
|
outputs=[states["current_repo_id"]] |
|
).then( |
|
fn=initialize_repo_chatbot, |
|
inputs=[components["repo_status_display"], states["current_repo_id"], states["repo_context_summary"]], |
|
outputs=[components["repo_chatbot"]] |
|
) |
|
|
|
|
|
components["repo_msg_input"].submit( |
|
fn=handle_repo_user_message, |
|
inputs=[components["repo_msg_input"], components["repo_chatbot"], states["repo_context_summary"], states["current_repo_id"]], |
|
outputs=[components["repo_chatbot"], components["repo_msg_input"]] |
|
).then( |
|
fn=handle_repo_bot_response, |
|
inputs=[components["repo_chatbot"], states["repo_context_summary"], states["current_repo_id"]], |
|
outputs=[components["repo_chatbot"]] |
|
) |
|
|
|
components["repo_send_btn"].click( |
|
fn=handle_repo_user_message, |
|
inputs=[components["repo_msg_input"], components["repo_chatbot"], states["repo_context_summary"], states["current_repo_id"]], |
|
outputs=[components["repo_chatbot"], components["repo_msg_input"]] |
|
).then( |
|
fn=handle_repo_bot_response, |
|
inputs=[components["repo_chatbot"], states["repo_context_summary"], states["current_repo_id"]], |
|
outputs=[components["repo_chatbot"]] |
|
) |