hfcontext7 / app.py
abdullahmeda's picture
Update app.py
457a71b verified
import gradio as gr
import os
import json
import subprocess
import dotenv
import shutil
import uuid
from schemas import Response
from openai import OpenAI
from pathlib import Path
from pymilvus import MilvusClient, model
from repo2txt import make_tree
from utils import copy_search_results, create_documentation_string, choice_prompt
_ = dotenv.load_dotenv()
subprocess.run(["python3", "scripts/make_docs.py"])
subprocess.run(["python3", "scripts/make_rag_db.py"])
client = MilvusClient("milvus.db")
embedding_fn = model.dense.OpenAIEmbeddingFunction(
model_name="text-embedding-3-large",
api_key=os.environ.get("OPENAI_API_KEY"),
dimensions=3072,
)
oai_client = OpenAI()
def list_huggingface_resources_names() -> list[str]:
"""List all the names of the libraries, services, and other resources available within the HuggingFace ecosystem.
Returns:
A list of libraries, services, and other resources available within the HuggingFace ecosystem
"""
with open("repos_config.json", "r") as f:
repos = json.load(f)
return [repo["title"] for repo in repos]
def search_documents(query, resource_names=None, topk=50):
"""Search for relevant documents in the Milvus database."""
query_vectors = embedding_fn.encode_queries([query])
search_params = {
"collection_name": "hf_docs",
"data": query_vectors,
"limit": topk,
"output_fields": ["text", "file_path", "resource"],
}
if resource_names:
if len(resource_names) == 1:
search_params["filter"] = f"resource == '{resource_names[0]}'"
else:
resource_list = "', '".join(resource_names)
search_params["filter"] = f"resource in ['{resource_list}']"
return client.search(**search_params)
def get_huggingface_documentation(topic: str, resource_names: list[str] = []) -> str:
"""Get the documentation for the given topic and resource names.
Args:
topic: Focus the docs on a specific topic (e.g. "Anthropic Provider Chat UI", "LoRA methods PEFT" or "TGI on Intel GPUs")
resource_names: A list of relevant resource names to the topic. Must be as specific as possible. Empty list means all resources.
Returns:
A string of documentation for the given topic and resource names
"""
try:
# Search for relevant documents
query_vectors = embedding_fn.encode_queries([topic])
search_params = {
"collection_name": "hf_docs",
"data": query_vectors,
"limit": 50,
"output_fields": ["text", "file_path", "resource"],
}
if resource_names:
if len(resource_names) == 1:
search_params["filter"] = f"resource == '{resource_names[0]}'"
else:
resource_list = "', '".join(resource_names)
search_params["filter"] = f"resource in ['{resource_list}']"
search_results = client.search(**search_params)
# Create temporary folder and copy files
temp_folder = str(uuid.uuid4())
copy_search_results(search_results, temp_folder)
# Generate directory tree
tree_structure = make_tree(Path(temp_folder) / "docs")
# Get relevant file IDs using GPT-4
response = oai_client.responses.parse(
model="gpt-4o",
input=[
{
"role": "user",
"content": choice_prompt.substitute(
question=topic, tree_structure=tree_structure
),
}
],
text_format=Response,
)
file_id = response.output_parsed.file_id
print(f"{topic} -> {file_id}")
# Create the documentation string using the file IDs and template
documentation_string = create_documentation_string([file_id], temp_folder)
# Clean up temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
return documentation_string
except Exception as e:
return f"Error generating documentation: {str(e)}"
def load_readme() -> str:
"""Load and return the README content, skipping YAML frontmatter."""
try:
with open("README.md", "r", encoding="utf-8") as f:
content = f.read()
# Skip YAML frontmatter if it exists
lines = content.split("\n")
start_index = 0
if content.startswith("---"):
# Find the second '---' line to skip frontmatter
dash_count = 0
for i, line in enumerate(lines):
if line.strip() == "---":
dash_count += 1
if dash_count == 2:
start_index = i + 1
break
# Find the line that starts with "### The Problem: Your LLM is stuck in the past"
for i in range(start_index, len(lines)):
if lines[i].startswith("### The Problem: Your LLM is stuck in the past"):
start_index = i
break
# Join the lines from the target starting point
content = "\n".join(lines[start_index:])
return content
except FileNotFoundError:
return "README.md not found"
list_resources_demo = gr.Interface(
fn=list_huggingface_resources_names,
inputs=[],
outputs="json",
)
get_docs_demo = gr.Interface(
fn=get_huggingface_documentation,
inputs=["text", "json"],
outputs="text",
)
# Create README tab with Markdown component
with gr.Blocks() as readme_tab:
gr.Markdown(load_readme())
# Create tabbed interface
demo = gr.TabbedInterface(
[readme_tab, list_resources_demo, get_docs_demo],
["Quickstart", "List Resources", "Get Documentation"],
title="Open HFContext7 MCP - Up-to-date 🤗 Docs For Any Prompt",
)
demo.launch(mcp_server=True)