Spaces:
Running
Running
File size: 1,850 Bytes
422431d 0068013 422431d 0068013 422431d 0068013 422431d 0068013 422431d 0068013 422431d 0068013 422431d 0068013 422431d 0068013 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
from api import app
import uvicorn
import threading
import time
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import RedirectResponse
# FastAPI port - used internally
FASTAPI_PORT = 8000
# Create a simple Gradio interface
def create_interface():
with gr.Blocks(title="ChatDocxAI Backend") as interface:
gr.Markdown("# ChatDocxAI Backend")
gr.Markdown(f"""
This is the backend server for ChatDocxAI. It provides the following endpoints:
- `/proxy/8000/upload` - Upload documents
- `/proxy/8000/ask` - Ask questions about uploaded documents
- `/proxy/8000/status` - Check API status
The frontend should be configured to communicate with this backend.
""")
with gr.Row():
with gr.Column():
gr.Markdown("## Server Status")
status = gr.Textbox(value="Server is running", label="Status")
with gr.Row():
with gr.Column():
gr.Markdown("## API Documentation")
doc_link = gr.HTML(f"<a href='/proxy/8000/docs' target='_blank'>View FastAPI Docs</a>")
return interface
# Set up FastAPI to run on port 8000
def start_fastapi():
uvicorn.run(app, host="0.0.0.0", port=FASTAPI_PORT)
# Start FastAPI in a separate thread
fastapi_thread = threading.Thread(target=start_fastapi)
fastapi_thread.daemon = True
fastapi_thread.start()
# Allow some time for FastAPI to start
time.sleep(2)
# Create and launch the Gradio interface
interface = create_interface()
# Launch the Gradio interface on the default Hugging Face Spaces port (7860)
if __name__ == "__main__":
# Use the port specified by Hugging Face Spaces
interface.launch(server_name="0.0.0.0")
|