File size: 3,132 Bytes
34a1cb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
from collections.abc import Generator
from openai import OpenAI
from gradio.chat_interface import ChatInterface
from pathlib import Path
import shutil
import os
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles

USERNAME = "ahmedheakl"
SPACE_NAME = "arabic-vlm-app"
TITLE = "AIN Arabic VLM"
DESCRIPTION = "Welcome to the AIN Arabic VLM chatbot. The best Arabic-English VLM developed by MBZUAI."
PUBLIC_DIR = Path("static")
TOP_N_HISTORY = 2
LOGO_PATH = "./logo.jpeg"
os.makedirs(PUBLIC_DIR, exist_ok=True)

app = FastAPI()
app.mount("/static", StaticFiles(directory=PUBLIC_DIR), name="static")


# move the logo to public directory
shutil.copy(LOGO_PATH, PUBLIC_DIR / Path(LOGO_PATH).name)
logo_path = f"/static/{Path(LOGO_PATH).name}"

def load_chat(
    base_url: str,
    model: str,
    token: str | None = None,
    *,
    system_message: str | None = None,
    **kwargs,
) -> gr.ChatInterface:
    client = OpenAI(api_key=token, base_url=base_url)
    start_message = (
        [{"role": "system", "content": system_message}] if system_message else []
    )

    def open_api_stream(
        message: str, history: list | None
    ) -> Generator[str, None, None]:
        history = history or start_message
        if len(history) > 0 and isinstance(history[0], (list, tuple)):
            history = history[:TOP_N_HISTORY]
            history = ChatInterface._tuples_to_messages(history)
        files = message.get('files', [])
        content = [
            {"type": "text", "text": message.get('text', '')}
        ]
        if files:
            src_path = Path(files[0])
            dest_path = PUBLIC_DIR / src_path.name
            shutil.move(src_path, dest_path)
            image_url = f"https://{USERNAME}-{SPACE_NAME}.hf.space/static/{src_path.name}"
            content.append({"type": "image_url", "image_url": {"url": image_url}})
        stream = client.chat.completions.create(
            model=model,
            messages=history + [{"role": "user", "content": content}],
            stream=True,
        )
        response = ""
        for chunk in stream:
            if chunk.choices[0].delta.content is not None:
                response += chunk.choices[0].delta.content
                yield response

    return ChatInterface(
        open_api_stream, type="messages", **kwargs
    )
with gr.Blocks(theme=gr.themes.Soft()) as gradio_interface:
    # Add CSS for better styling
    gr.Markdown(
        """
        <style>
        .container { margin: 0 auto; max-width: 1200px; padding: 20px; }
        .header { text-align: center; margin-bottom: 40px; }
        </style>
        """
    )
    # chatbot = gr.Chatbot()
    # textbox = gr.MultimodalTextbox(file_count="single", file_types=["image"], sources=["upload"])
    
load_chat(
    "https://0f21-5-195-0-150.ngrok-free.app/v1",
    model="test",
    token="ollama",
    multimodal=True,
    # chatbot=chatbot,
    # textbox=textbox,
).launch()
app = gr.mount_gradio_app(app, gradio_interface, path="/")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)