File size: 4,665 Bytes
7a01de3
 
 
ddd25b9
7a01de3
 
bdb09a9
 
7a01de3
 
81449e9
7a01de3
f7e23b0
 
bdb09a9
 
 
 
 
81449e9
 
 
bdb09a9
7a01de3
 
 
bdb09a9
7a01de3
 
 
 
 
 
 
 
 
 
 
bdb09a9
7a01de3
 
 
 
 
 
 
 
bdb09a9
7a01de3
 
 
 
 
 
 
 
bdb09a9
7a01de3
 
 
ddd25b9
 
bdb09a9
 
 
7a01de3
 
 
ddd25b9
7a01de3
 
ddd25b9
 
7a01de3
 
 
 
 
bdb09a9
7a01de3
 
 
 
 
 
 
 
 
 
ddd25b9
7a01de3
 
 
 
 
ddd25b9
7a01de3
 
 
bdb09a9
7a01de3
 
bdb09a9
7a01de3
 
 
bdb09a9
7a01de3
 
bdb09a9
7a01de3
bdb09a9
7a01de3
bdb09a9
 
 
7a01de3
 
 
 
bdb09a9
 
 
7a01de3
 
bdb09a9
7a01de3
 
bdb09a9
 
7a01de3
bdb09a9
7a01de3
bdb09a9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import asyncio
import logging
from typing import Optional, List, Union, Literal
from pathlib import Path
from pydantic import BaseModel, Field
from gradio import Interface, Blocks
from gradio.components import Textbox, Image
from gradio.data_classes import FileData, GradioModel, GradioRootModel
from transformers import pipeline
from diffusers import DiffusionPipeline
import torch
import gradio as gr

# Load gated image model securely
hf_token = os.getenv("HUGGINGFACE_TOKEN")
if not hf_token:
    raise RuntimeError("Missing HUGGINGFACE_TOKEN env var for gated model access.")

image_model = DiffusionPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev",
    torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
    use_auth_token=hf_token
)
image_model.enable_model_cpu_offload()

# Data models
class FileDataDict(BaseModel):
    path: str
    url: Optional[str] = None
    size: Optional[int] = None
    orig_name: Optional[str] = None
    mime_type: Optional[str] = None
    is_stream: Optional[bool] = False
    class Config:
        arbitrary_types_allowed = True

class MessageDict(BaseModel):
    content: Union[str, FileDataDict, tuple, str]
    role: Literal["user", "assistant", "system"]
    metadata: Optional[dict] = None
    options: Optional[List[dict]] = None
    class Config:
        arbitrary_types_allowed = True

class ChatMessage(GradioModel):
    role: Literal["user", "assistant", "system"]
    content: Union[str, FileData, str]
    metadata: dict = Field(default_factory=dict)
    options: Optional[List[dict]] = None
    class Config:
        arbitrary_types_allowed = True

class ChatbotDataMessages(GradioRootModel):
    root: List[ChatMessage]

# Reasoning Engine
class UniversalReasoning:
    def __init__(self, config):
        self.config = config
        self.context_history = []
        self.sentiment_analyzer = pipeline("sentiment-analysis")
        self.deepseek_model = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
        self.davinci_model = pipeline("text2text-generation", model="t5-small")
        self.additional_model = pipeline("text-generation", model="EleutherAI/gpt-neo-125M")
        self.image_model = image_model

    async def generate_response(self, question: str) -> str:
        self.context_history.append(question)
        sentiment_score = self.analyze_sentiment(question)
        deepseek_response = self.deepseek_model(question)
        davinci_response = self.davinci_model(question, max_length=50)
        additional_response = self.additional_model(question, max_length=100)

        responses = [
            f"Sentiment score: {sentiment_score}",
            f"DeepSeek Response: {deepseek_response}",
            f"T5 Response: {davinci_response}",
            f"GPT-Neo Response: {additional_response}"
        ]
        return "\n\n".join(responses)

    def generate_image(self, prompt: str):
        image = self.image_model(
            prompt,
            height=1024,
            width=1024,
            guidance_scale=3.5,
            num_inference_steps=50,
            generator=torch.Generator('cpu').manual_seed(0)
        ).images[0]
        image.save("flux-dev.png")
        return image

    def analyze_sentiment(self, text: str) -> list:
        sentiment_score = self.sentiment_analyzer(text)
        logging.info(f"Sentiment analysis result: {sentiment_score}")
        return sentiment_score

# Main Gradio App
class HuggingFaceChatbot:
    def __init__(self):
        self.universal_reasoning = UniversalReasoning(config={})

    def setup_interface(self):
        async def chatbot_logic(input_text: str) -> str:
            return await self.universal_reasoning.generate_response(input_text)

        def image_logic(prompt: str):
            return self.universal_reasoning.generate_image(prompt)

        text_interface = Interface(
            fn=chatbot_logic,
            inputs=Textbox(label="Ask anything"),
            outputs=Textbox(label="Reasoned Answer"),
            title="🧠 Codettes-BlackForest Chatbot"
        )

        image_interface = Interface(
            fn=image_logic,
            inputs=Textbox(label="Describe an image"),
            outputs=Image(label="Generated Image"),
            title="🎨 Image Generator (FLUX.1-dev)"
        )

        return Blocks([text_interface, image_interface])

    def launch(self):
        app = self.setup_interface()
        app.launch()

# Launch the app
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    HuggingFaceChatbot().launch()