Spaces:
Running
Running
import gradio as gr | |
from api import app | |
import uvicorn | |
import threading | |
import time | |
import os | |
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.responses import RedirectResponse | |
# FastAPI port - used internally | |
FASTAPI_PORT = 8000 | |
# Create a simple Gradio interface | |
def create_interface(): | |
with gr.Blocks(title="ChatDocxAI Backend") as interface: | |
gr.Markdown("# ChatDocxAI Backend") | |
gr.Markdown(f""" | |
This is the backend server for ChatDocxAI. It provides the following endpoints: | |
- `/proxy/8000/upload` - Upload documents | |
- `/proxy/8000/ask` - Ask questions about uploaded documents | |
- `/proxy/8000/status` - Check API status | |
The frontend should be configured to communicate with this backend. | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("## Server Status") | |
status = gr.Textbox(value="Server is running", label="Status") | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("## API Documentation") | |
doc_link = gr.HTML(f"<a href='/proxy/8000/docs' target='_blank'>View FastAPI Docs</a>") | |
return interface | |
# Set up FastAPI to run on port 8000 | |
def start_fastapi(): | |
uvicorn.run(app, host="0.0.0.0", port=FASTAPI_PORT) | |
# Start FastAPI in a separate thread | |
fastapi_thread = threading.Thread(target=start_fastapi) | |
fastapi_thread.daemon = True | |
fastapi_thread.start() | |
# Allow some time for FastAPI to start | |
time.sleep(2) | |
# Create and launch the Gradio interface | |
interface = create_interface() | |
# Launch the Gradio interface on the default Hugging Face Spaces port (7860) | |
if __name__ == "__main__": | |
# Use the port specified by Hugging Face Spaces | |
interface.launch(server_name="0.0.0.0") | |