Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
c331dad
1
Parent(s):
0d9d5af
edit image
Browse files- app.py +471 -492
- flux_kontext_lib/README.md +42 -0
- flux_kontext_lib/__init__.py +3 -0
- flux_kontext_lib/example_usage.py +19 -0
- flux_kontext_lib/image_generator.py +119 -0
app.py
CHANGED
@@ -1,555 +1,534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import threading
|
3 |
import requests
|
4 |
import logging
|
5 |
import queue
|
6 |
import json
|
7 |
-
|
|
|
8 |
from collections import defaultdict, deque
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
11 |
from fastapi import FastAPI, Request, HTTPException
|
12 |
from fastapi.responses import JSONResponse, PlainTextResponse
|
13 |
from pydantic import BaseModel, Field, ValidationError
|
|
|
14 |
|
|
|
15 |
from FLUX import generate_image
|
16 |
from VoiceReply import generate_voice_reply
|
17 |
-
from polLLM import generate_llm, LLMBadRequestError
|
18 |
-
|
19 |
-
# --- Logging Setup ---------------------------------------------------------
|
20 |
|
21 |
-
|
22 |
-
logger = logging.getLogger("eve_bot")
|
23 |
-
logger.setLevel(LOG_LEVEL)
|
24 |
-
|
25 |
-
handler = logging.StreamHandler()
|
26 |
-
formatter = logging.Formatter(
|
27 |
-
"%(asctime)s [%(levelname)s] [%(message_id)s/%(sender)s] %(message)s"
|
28 |
-
)
|
29 |
-
handler.setFormatter(formatter)
|
30 |
-
|
31 |
-
class ContextFilter(logging.Filter):
|
32 |
-
def filter(self, record):
|
33 |
-
record.message_id = getattr(record, "message_id", "-")
|
34 |
-
record.sender = getattr(record, "sender", "-")
|
35 |
-
return True
|
36 |
-
|
37 |
-
handler.addFilter(ContextFilter())
|
38 |
-
logger.handlers = [handler]
|
39 |
-
|
40 |
-
# Thread‐local to carry context through helpers
|
41 |
-
_thread_ctx = threading.local()
|
42 |
-
def set_thread_context(chat_id, sender, message_id):
|
43 |
-
_thread_ctx.chat_id = chat_id
|
44 |
-
_thread_ctx.sender = sender
|
45 |
-
_thread_ctx.message_id = message_id
|
46 |
-
|
47 |
-
def get_thread_context():
|
48 |
-
return (
|
49 |
-
getattr(_thread_ctx, "chat_id", None),
|
50 |
-
getattr(_thread_ctx, "sender", None),
|
51 |
-
getattr(_thread_ctx, "message_id", None),
|
52 |
-
)
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
|
|
|
|
63 |
|
64 |
-
|
65 |
-
|
|
|
66 |
|
67 |
-
|
68 |
-
history[(chat_id, sender)].clear()
|
69 |
|
70 |
-
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
|
77 |
-
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
|
78 |
-
BOT_GROUP_CHAT = "[email protected]"
|
79 |
-
BOT_JID = os.getenv("BOT_JID")
|
80 |
-
IMAGE_DIR = "/tmp/images"
|
81 |
-
AUDIO_DIR = "/tmp/audio"
|
82 |
-
DEFAULT_IMAGE_COUNT = 4
|
83 |
|
84 |
@classmethod
|
85 |
-
def
|
86 |
-
|
87 |
-
"
|
88 |
-
"
|
89 |
-
|
90 |
-
if missing:
|
91 |
-
raise ValueError(f"Missing env vars: {', '.join(missing)}")
|
92 |
|
93 |
-
|
94 |
-
def __init__(self, cfg: BotConfig):
|
95 |
-
self.cfg = cfg
|
96 |
-
self.session = requests.Session()
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
f"{self.cfg.GREEN_API_TOKEN}"
|
103 |
-
)
|
104 |
-
for i in range(1, retries+1):
|
105 |
-
try:
|
106 |
-
resp = self.session.post(
|
107 |
-
url,
|
108 |
-
json=payload if files is None else None,
|
109 |
-
data=None if files is None else payload,
|
110 |
-
files=files
|
111 |
-
)
|
112 |
-
resp.raise_for_status()
|
113 |
-
return resp.json()
|
114 |
-
except requests.RequestException as e:
|
115 |
-
logger.warning(f"{endpoint} attempt {i}/{retries} failed: {e}")
|
116 |
-
return {"error":"failed"}
|
117 |
-
|
118 |
-
def send_message(self, message_id, chat_id, text):
|
119 |
-
return self.send("sendMessage", {
|
120 |
-
"chatId": chat_id,
|
121 |
-
"message": text,
|
122 |
-
"quotedMessageId": message_id
|
123 |
-
})
|
124 |
|
125 |
-
def
|
126 |
-
|
127 |
-
"chatId": chat_id,
|
128 |
-
"message": text
|
129 |
-
})
|
130 |
|
131 |
-
def
|
132 |
-
|
133 |
-
payload = {
|
134 |
-
"chatId": chat_id,
|
135 |
-
"caption": caption,
|
136 |
-
"quotedMessageId": message_id
|
137 |
-
}
|
138 |
-
with open(file_path,"rb") as f:
|
139 |
-
mime = "image/jpeg" if media_type=="image" else "audio/mpeg"
|
140 |
-
files = [("file",(os.path.basename(file_path),f,mime))]
|
141 |
-
return self.send(endpoint, payload, files=files)
|
142 |
|
143 |
-
|
144 |
-
|
145 |
|
146 |
-
|
|
|
147 |
|
148 |
-
|
149 |
-
executor = ThreadPoolExecutor(max_workers=4)
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
finally:
|
162 |
-
task_queue.task_done()
|
163 |
-
|
164 |
-
for _ in range(4):
|
165 |
-
threading.Thread(target=worker, daemon=True).start()
|
166 |
-
|
167 |
-
# --- Basic Tool Functions -------------------------------------------------
|
168 |
-
|
169 |
-
def _fn_send_text(mid, cid, message):
|
170 |
-
client.send_message(mid, cid, message)
|
171 |
-
chat_id, sender, _ = get_thread_context()
|
172 |
-
if chat_id and sender:
|
173 |
-
record_bot_message(chat_id, sender, message)
|
174 |
-
task_queue.put({
|
175 |
-
"type": "audio",
|
176 |
-
"message_id": mid,
|
177 |
-
"chat_id": cid,
|
178 |
-
"prompt": message
|
179 |
-
})
|
180 |
-
|
181 |
-
def _fn_send_accept(mid, cid, message):
|
182 |
-
client.send_message(mid, cid, message)
|
183 |
-
chat_id, sender, _ = get_thread_context()
|
184 |
-
if chat_id and sender:
|
185 |
-
record_bot_message(chat_id, sender, message)
|
186 |
-
|
187 |
-
def _fn_summarize(mid, cid, text):
|
188 |
-
summary = generate_llm(f"Summarize:\n\n{text}")
|
189 |
-
_fn_send_text(mid, cid, summary)
|
190 |
-
|
191 |
-
def _fn_translate(mid, cid, lang, text):
|
192 |
-
resp = generate_llm(f"Translate to {lang}:\n\n{text}")
|
193 |
-
_fn_send_text(mid, cid, resp)
|
194 |
-
|
195 |
-
def _fn_joke(mid, cid):
|
196 |
-
try:
|
197 |
-
j = requests.get(
|
198 |
-
"https://official-joke-api.appspot.com/random_joke",
|
199 |
-
timeout=5
|
200 |
-
).json()
|
201 |
-
joke = f"{j['setup']}\n\n{j['punchline']}"
|
202 |
-
except:
|
203 |
-
joke = generate_llm("Tell me a short joke.")
|
204 |
-
_fn_send_text(mid, cid, joke)
|
205 |
-
|
206 |
-
def _fn_weather(mid, cid, loc):
|
207 |
-
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
208 |
-
report = generate_llm(f"Give a weather report in °C:\n\n{raw}")
|
209 |
-
_fn_send_text(mid, cid, report)
|
210 |
-
|
211 |
-
def _fn_inspire(mid, cid):
|
212 |
-
quote = generate_llm("Give me a unique, random short inspirational quote.")
|
213 |
-
_fn_send_text(mid, cid, f"✨ {quote}")
|
214 |
-
|
215 |
-
def _fn_meme(mid, cid, txt):
|
216 |
-
_fn_send_accept(mid, cid, "🎨 Generating meme…")
|
217 |
-
task_queue.put({
|
218 |
-
"type": "image",
|
219 |
-
"message_id": mid,
|
220 |
-
"chat_id": cid,
|
221 |
-
"prompt": f"meme: {txt}"
|
222 |
-
})
|
223 |
-
|
224 |
-
def _fn_generate_images(
|
225 |
-
message_id: str,
|
226 |
-
chat_id: str,
|
227 |
-
prompt: str,
|
228 |
-
count: int = 1,
|
229 |
-
width: Optional[int] = None,
|
230 |
-
height: Optional[int] = None,
|
231 |
-
**_
|
232 |
-
):
|
233 |
-
_fn_send_accept(message_id, chat_id, f"✨ Generating {count if count != 1 else 'a'} image{'s' if count != 1 else ''}...")
|
234 |
-
for i in range(1, count+1):
|
235 |
-
try:
|
236 |
-
img, path, ret_p, url = generate_image(
|
237 |
-
prompt, message_id, message_id, BotConfig.IMAGE_DIR,
|
238 |
-
width=width, height=height
|
239 |
-
)
|
240 |
-
formatted = "\n\n".join(f"_{p.strip()}_" for p in ret_p.split("\n\n") if p.strip())
|
241 |
-
cap = f"✨ Image {i}/{count}: {url}\n>{chr(8203)} {formatted}"
|
242 |
-
client.send_media(message_id, chat_id, path, cap, media_type="image")
|
243 |
-
os.remove(path)
|
244 |
-
except Exception as e:
|
245 |
-
if "Timed out" in str(e):
|
246 |
-
logger.warning("Image generation timed out.")
|
247 |
-
else:
|
248 |
-
logger.warning(f"Img {i}/{count} failed: {e}")
|
249 |
-
_fn_send_text(message_id, chat_id, f"😢 Failed to generate image {i}/{count}.")
|
250 |
-
|
251 |
-
def _fn_voice_reply(
|
252 |
-
message_id: str,
|
253 |
-
chat_id: str,
|
254 |
-
prompt: str,
|
255 |
-
**_
|
256 |
-
):
|
257 |
-
"""
|
258 |
-
Try to generate an audio reply once. If it fails (e.g. a 400),
|
259 |
-
send the text fallback directly (no further retry).
|
260 |
-
"""
|
261 |
-
proc = (
|
262 |
-
f"Just say this exactly as written in a friendly, playful, "
|
263 |
-
f"happy and helpful but a little bit clumsy-cute way: {prompt}"
|
264 |
-
)
|
265 |
-
try:
|
266 |
-
res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
|
267 |
-
except Exception as e:
|
268 |
-
logger.warning(f"Audio generation failed ({e}); sending text only.")
|
269 |
-
return
|
270 |
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
-
# --- Pydantic Models for
|
279 |
|
280 |
class BaseIntent(BaseModel):
|
281 |
action: str
|
282 |
|
283 |
-
class SummarizeIntent(BaseIntent):
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
class
|
288 |
-
|
289 |
-
|
290 |
-
text: str
|
291 |
-
|
292 |
-
class JokeIntent(BaseIntent):
|
293 |
-
action: Literal["joke"]
|
294 |
-
|
295 |
-
class WeatherIntent(BaseIntent):
|
296 |
-
action: Literal["weather"]
|
297 |
-
location: str
|
298 |
-
|
299 |
-
class InspireIntent(BaseIntent):
|
300 |
-
action: Literal["inspire"]
|
301 |
-
|
302 |
-
class MemeIntent(BaseIntent):
|
303 |
-
action: Literal["meme"]
|
304 |
-
text: str
|
305 |
|
306 |
class GenerateImageIntent(BaseModel):
|
307 |
action: Literal["generate_image"]
|
308 |
prompt: str
|
309 |
-
count: int = Field(default=1, ge=1)
|
310 |
-
width: Optional[int]
|
311 |
-
height: Optional[int]
|
312 |
|
313 |
-
class SendTextIntent(
|
314 |
action: Literal["send_text"]
|
315 |
message: str
|
316 |
|
317 |
-
#
|
318 |
-
INTENT_MODELS = [
|
319 |
-
SummarizeIntent, TranslateIntent, JokeIntent, WeatherIntent,
|
320 |
-
InspireIntent, MemeIntent, GenerateImageIntent, SendTextIntent
|
321 |
-
]
|
322 |
-
|
323 |
-
ACTION_HANDLERS = {
|
324 |
-
"summarize": lambda mid,cid,**i: _fn_summarize(mid,cid,i["text"]),
|
325 |
-
"translate": lambda mid,cid,**i: _fn_translate(mid,cid,i["lang"],i["text"]),
|
326 |
-
"joke": lambda mid,cid,**i: _fn_joke(mid,cid),
|
327 |
-
"weather": lambda mid,cid,**i: _fn_weather(mid,cid,i["location"]),
|
328 |
-
"inspire": lambda mid,cid,**i: _fn_inspire(mid,cid),
|
329 |
-
"meme": lambda mid,cid,**i: _fn_meme(mid,cid,i["text"]),
|
330 |
-
"generate_image": _fn_generate_images,
|
331 |
-
"send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
|
332 |
-
}
|
333 |
-
|
334 |
-
# --- Intent Routing with Fallback & History‐Reset on 400 -------------------
|
335 |
-
|
336 |
-
def route_intent(user_input: str, chat_id: str, sender: str):
|
337 |
-
history_text = get_history_text(chat_id, sender)
|
338 |
-
sys_prompt = (
|
339 |
-
"You never perform work yourself—you only invoke one of the available functions."
|
340 |
-
"When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
|
341 |
-
"Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
|
342 |
-
"If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
|
343 |
-
"- summarize(text)\n"
|
344 |
-
"- translate(lang, text)\n"
|
345 |
-
"- joke()\n"
|
346 |
-
"- weather(location)\n"
|
347 |
-
"- inspire()\n"
|
348 |
-
"- meme(text)\n"
|
349 |
-
"- generate_image(prompt, count, width, height)\n"
|
350 |
-
"- send_text(message)\n\n"
|
351 |
-
"Return only raw JSON matching one of these shapes. For example:\n"
|
352 |
-
" {\"action\":\"generate_image\",\"prompt\":\"a red fox\",\"count\":4,\"width\":1920,\"height\":1080}\n"
|
353 |
-
"Another Example:\n"
|
354 |
-
" {\"action\":\"send_text\",\"message\":\"Hello!\"}\n\n"
|
355 |
-
"Otherwise, use send_text to reply with plain chat and you should only return one json for the current message not for previous conversations.\n"
|
356 |
-
f"Conversation so far:\n{history_text}\n\n current message: User: {user_input}"
|
357 |
-
)
|
358 |
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
|
|
364 |
|
365 |
-
|
|
|
|
|
366 |
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
except json.JSONDecodeError:
|
371 |
-
return SendTextIntent(action="send_text", message=raw)
|
372 |
|
373 |
-
for M in INTENT_MODELS:
|
374 |
-
try:
|
375 |
-
intent = M.model_validate(parsed)
|
376 |
-
logger.debug(f"Matched intent model: {M.__name__} with data {parsed}")
|
377 |
-
return intent
|
378 |
-
except ValidationError:
|
379 |
-
continue
|
380 |
-
|
381 |
-
logger.warning("Strict parse failed for all models, falling back to lenient")
|
382 |
-
|
383 |
-
action = parsed.get("action")
|
384 |
-
if action in ACTION_HANDLERS:
|
385 |
-
data = parsed
|
386 |
-
kwargs = {}
|
387 |
-
if action == "generate_image":
|
388 |
-
kwargs["prompt"] = data.get("prompt","")
|
389 |
-
kwargs["count"] = int(data.get("count", BotConfig.DEFAULT_IMAGE_COUNT))
|
390 |
-
kwargs["width"] = data.get("width")
|
391 |
-
kwargs["height"] = data.get("height")
|
392 |
-
elif action == "send_text":
|
393 |
-
kwargs["message"] = data.get("message","")
|
394 |
-
elif action == "translate":
|
395 |
-
kwargs["lang"] = data.get("lang","")
|
396 |
-
kwargs["text"] = data.get("text","")
|
397 |
-
elif action == "summarize":
|
398 |
-
kwargs["text"] = data.get("text","")
|
399 |
-
elif action == "weather":
|
400 |
-
kwargs["location"] = data.get("location","")
|
401 |
-
elif action == "meme":
|
402 |
-
kwargs["text"] = data.get("text","")
|
403 |
try:
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
)
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
)
|
432 |
-
|
433 |
-
@app.post("/whatsapp")
|
434 |
-
async def whatsapp_webhook(request: Request):
|
435 |
-
data = await request.json()
|
436 |
-
logger.debug(f"Incoming webhook payload: {json.dumps(data)}")
|
437 |
-
|
438 |
-
if request.headers.get("Authorization") != f"Bearer {BotConfig.WEBHOOK_AUTH_TOKEN}":
|
439 |
-
raise HTTPException(403, "Unauthorized")
|
440 |
|
441 |
-
|
442 |
-
chat_id = data["senderData"]["chatId"]
|
443 |
-
sender = data["senderData"]["sender"]
|
444 |
-
mid = data["idMessage"]
|
445 |
-
except KeyError:
|
446 |
try:
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
520 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
521 |
else:
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
536 |
|
537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
538 |
|
539 |
-
|
540 |
-
|
541 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
542 |
|
543 |
-
|
544 |
-
def health():
|
545 |
-
# HEAD requests ignore the body by HTTP spec, so FastAPI handles that automatically
|
546 |
-
return JSONResponse(content={"status": "ok"})
|
547 |
|
548 |
|
549 |
if __name__ == "__main__":
|
550 |
-
|
551 |
-
BotConfig
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Professional WhatsApp Bot using Green-API
|
3 |
+
Author: Assistant
|
4 |
+
Description: A comprehensive WhatsApp bot with a professional, class-based structure.
|
5 |
+
Features include image generation, image editing, voice replies,
|
6 |
+
and various utility functions, all handled by an asynchronous task queue.
|
7 |
+
"""
|
8 |
+
|
9 |
import os
|
10 |
import threading
|
11 |
import requests
|
12 |
import logging
|
13 |
import queue
|
14 |
import json
|
15 |
+
import base64
|
16 |
+
from typing import List, Optional, Union, Literal, Dict, Any, Tuple
|
17 |
from collections import defaultdict, deque
|
18 |
from concurrent.futures import ThreadPoolExecutor
|
19 |
|
20 |
from fastapi import FastAPI, Request, HTTPException
|
21 |
from fastapi.responses import JSONResponse, PlainTextResponse
|
22 |
from pydantic import BaseModel, Field, ValidationError
|
23 |
+
import uvicorn
|
24 |
|
25 |
+
# Assume these are your custom libraries for AI functionalities
|
26 |
from FLUX import generate_image
|
27 |
from VoiceReply import generate_voice_reply
|
28 |
+
from polLLM import generate_llm, LLMBadRequestError
|
29 |
+
import flux_kontext_lib
|
|
|
30 |
|
31 |
+
# --- Configuration ---------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
class BotConfig:
|
34 |
+
"""Manages all bot configuration from environment variables."""
|
35 |
+
GREEN_API_URL: str
|
36 |
+
GREEN_API_TOKEN: str
|
37 |
+
GREEN_API_ID_INSTANCE: str
|
38 |
+
WEBHOOK_AUTH_TOKEN: str
|
39 |
+
|
40 |
+
IMAGE_DIR: str = "/tmp/whatsapp_images"
|
41 |
+
AUDIO_DIR: str = "/tmp/whatsapp_audio"
|
42 |
+
TEMP_DIR: str = "/tmp/whatsapp_edit"
|
43 |
+
DEFAULT_IMAGE_COUNT: int = 4
|
44 |
+
MAX_HISTORY_SIZE: int = 20
|
45 |
+
WORKER_THREADS: int = 4
|
46 |
+
LOG_LEVEL: str = "INFO"
|
47 |
+
|
48 |
+
def __init__(self):
|
49 |
+
self.GREEN_API_URL = os.getenv("GREEN_API_URL")
|
50 |
+
self.GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
|
51 |
+
self.GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
|
52 |
+
self.WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
|
53 |
+
self.LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
|
54 |
+
self._validate()
|
55 |
+
|
56 |
+
def _validate(self):
|
57 |
+
"""Ensures all required environment variables are set."""
|
58 |
+
missing = [
|
59 |
+
var for var in ("GREEN_API_URL", "GREEN_API_TOKEN",
|
60 |
+
"GREEN_API_ID_INSTANCE", "WEBHOOK_AUTH_TOKEN")
|
61 |
+
if not getattr(self, var)
|
62 |
+
]
|
63 |
+
if missing:
|
64 |
+
raise ValueError(f"Missing required environment variables: {', '.join(missing)}")
|
65 |
|
66 |
+
# --- Logging Setup ---------------------------------------------------------
|
67 |
|
68 |
+
class LoggerSetup:
|
69 |
+
"""Sets up and manages structured logging for the application."""
|
70 |
+
@staticmethod
|
71 |
+
def setup(level: str) -> logging.Logger:
|
72 |
+
logger = logging.getLogger("whatsapp_bot")
|
73 |
+
logger.setLevel(level)
|
74 |
+
logger.handlers.clear()
|
75 |
+
|
76 |
+
handler = logging.StreamHandler()
|
77 |
+
formatter = logging.Formatter(
|
78 |
+
"%(asctime)s [%(levelname)s] [%(chat_id)s] %(funcName)s:%(lineno)d - %(message)s"
|
79 |
+
)
|
80 |
+
handler.setFormatter(formatter)
|
81 |
|
82 |
+
class ContextFilter(logging.Filter):
|
83 |
+
def filter(self, record):
|
84 |
+
record.chat_id = ThreadContext.get_context().get("chat_id", "-")
|
85 |
+
return True
|
86 |
|
87 |
+
handler.addFilter(ContextFilter())
|
88 |
+
logger.addHandler(handler)
|
89 |
+
return logger
|
90 |
|
91 |
+
# --- Thread Context Management ---------------------------------------------
|
|
|
92 |
|
93 |
+
class ThreadContext:
|
94 |
+
"""Manages thread-local context for chat and message IDs."""
|
95 |
+
_context = threading.local()
|
96 |
|
97 |
+
@classmethod
|
98 |
+
def set_context(cls, chat_id: str, message_id: str):
|
99 |
+
cls._context.chat_id = chat_id
|
100 |
+
cls._context.message_id = message_id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
@classmethod
|
103 |
+
def get_context(cls) -> Dict[str, Optional[str]]:
|
104 |
+
return {
|
105 |
+
"chat_id": getattr(cls._context, "chat_id", None),
|
106 |
+
"message_id": getattr(cls._context, "message_id", None),
|
107 |
+
}
|
|
|
|
|
108 |
|
109 |
+
# --- Conversation History -------------------------------------------------
|
|
|
|
|
|
|
110 |
|
111 |
+
class ConversationManager:
|
112 |
+
"""Manages conversation history for each chat."""
|
113 |
+
def __init__(self, max_size: int):
|
114 |
+
self.history = defaultdict(lambda: deque(maxlen=max_size))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
+
def add_user_message(self, chat_id: str, message: str):
|
117 |
+
self.history[chat_id].append(f"User: {message}")
|
|
|
|
|
|
|
118 |
|
119 |
+
def add_bot_message(self, chat_id: str, message: str):
|
120 |
+
self.history[chat_id].append(f"Assistant: {message}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
+
def get_history_text(self, chat_id: str) -> str:
|
123 |
+
return "\n".join(self.history[chat_id])
|
124 |
|
125 |
+
def clear_history(self, chat_id: str):
|
126 |
+
self.history[chat_id].clear()
|
127 |
|
128 |
+
# --- Green-API Client -----------------------------------------------------
|
|
|
129 |
|
130 |
+
class GreenApiClient:
|
131 |
+
"""A client for interacting with the Green-API for WhatsApp."""
|
132 |
+
def __init__(self, config: BotConfig, logger: logging.Logger):
|
133 |
+
self.config = config
|
134 |
+
self.logger = logger
|
135 |
+
self.session = requests.Session()
|
136 |
+
self.base_url = (
|
137 |
+
f"{self.config.GREEN_API_URL}/waInstance"
|
138 |
+
f"{self.config.GREEN_API_ID_INSTANCE}"
|
139 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
+
def _request(self, method: str, endpoint: str, **kwargs) -> Optional[Dict[str, Any]]:
|
142 |
+
"""Makes a request to the Green-API with retries."""
|
143 |
+
url = f"{self.base_url}/{endpoint}/{self.config.GREEN_API_TOKEN}"
|
144 |
+
for attempt in range(3):
|
145 |
+
try:
|
146 |
+
response = self.session.request(method, url, timeout=20, **kwargs)
|
147 |
+
response.raise_for_status()
|
148 |
+
return response.json()
|
149 |
+
except requests.RequestException as e:
|
150 |
+
self.logger.warning(
|
151 |
+
f"API request to {endpoint} failed (attempt {attempt + 1}): {e}"
|
152 |
+
)
|
153 |
+
self.logger.error(f"API request to {endpoint} failed after all retries.")
|
154 |
+
return None
|
155 |
+
|
156 |
+
def send_message(self, chat_id: str, text: str, quoted_message_id: str = None):
|
157 |
+
payload = {"chatId": chat_id, "message": text}
|
158 |
+
if quoted_message_id:
|
159 |
+
payload["quotedMessageId"] = quoted_message_id
|
160 |
+
return self._request("POST", "sendMessage", json=payload)
|
161 |
+
|
162 |
+
def send_file(self, chat_id: str, file_path: str, caption: str = "", quoted_message_id: str = None):
|
163 |
+
"""Uploads and sends a file (image or audio)."""
|
164 |
+
filename = os.path.basename(file_path)
|
165 |
+
payload = {"chatId": chat_id, "caption": caption}
|
166 |
+
if quoted_message_id:
|
167 |
+
payload["quotedMessageId"] = quoted_message_id
|
168 |
+
|
169 |
+
with open(file_path, "rb") as f:
|
170 |
+
files = {"file": (filename, f)}
|
171 |
+
return self._request("POST", "sendFileByUpload", data=payload, files=files)
|
172 |
+
|
173 |
+
def download_file(self, url: str) -> Optional[bytes]:
|
174 |
+
"""Downloads a file from a given URL."""
|
175 |
+
try:
|
176 |
+
response = self.session.get(url, timeout=30)
|
177 |
+
response.raise_for_status()
|
178 |
+
return response.content
|
179 |
+
except requests.RequestException as e:
|
180 |
+
self.logger.error(f"Failed to download file from {url}: {e}")
|
181 |
+
return None
|
182 |
|
183 |
+
# --- Pydantic Models for Intent Recognition --------------------------------
|
184 |
|
185 |
class BaseIntent(BaseModel):
|
186 |
action: str
|
187 |
|
188 |
+
class SummarizeIntent(BaseIntent): action: Literal["summarize"]; text: str
|
189 |
+
class TranslateIntent(BaseIntent): action: Literal["translate"]; lang: str; text: str
|
190 |
+
class JokeIntent(BaseIntent): action: Literal["joke"]
|
191 |
+
class WeatherIntent(BaseIntent): action: Literal["weather"]; location: str
|
192 |
+
class InspireIntent(BaseIntent): action: Literal["inspire"]
|
193 |
+
class MemeIntent(BaseIntent): action: Literal["meme"]; text: str
|
194 |
+
class EditImageIntent(BaseIntent): action: Literal["edit_image"]; prompt: str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
class GenerateImageIntent(BaseModel):
|
197 |
action: Literal["generate_image"]
|
198 |
prompt: str
|
199 |
+
count: int = Field(default=1, ge=1, le=10)
|
200 |
+
width: Optional[int] = Field(default=None, ge=512, le=2048)
|
201 |
+
height: Optional[int] = Field(default=None, ge=512, le=2048)
|
202 |
|
203 |
+
class SendTextIntent(BaseIntent):
|
204 |
action: Literal["send_text"]
|
205 |
message: str
|
206 |
|
207 |
+
# --- Intent Router --------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
|
209 |
+
class IntentRouter:
|
210 |
+
"""Recognizes user intent using an LLM and routes to appropriate actions."""
|
211 |
+
INTENT_MODELS = [
|
212 |
+
SummarizeIntent, TranslateIntent, JokeIntent, WeatherIntent,
|
213 |
+
InspireIntent, MemeIntent, GenerateImageIntent, EditImageIntent, SendTextIntent
|
214 |
+
]
|
215 |
|
216 |
+
def __init__(self, conv_manager: ConversationManager, logger: logging.Logger):
|
217 |
+
self.conv_manager = conv_manager
|
218 |
+
self.logger = logger
|
219 |
|
220 |
+
def get_intent(self, user_input: str, chat_id: str) -> BaseIntent:
|
221 |
+
history_text = self.conv_manager.get_history_text(chat_id)
|
222 |
+
system_prompt = self._build_system_prompt(history_text, user_input)
|
|
|
|
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
try:
|
225 |
+
raw_response = generate_llm(system_prompt)
|
226 |
+
except LLMBadRequestError:
|
227 |
+
self.conv_manager.clear_history(chat_id)
|
228 |
+
return SendTextIntent(action="send_text", message="Oops! Let's start fresh! 🌟")
|
229 |
+
|
230 |
+
return self._parse_response(raw_response)
|
231 |
+
|
232 |
+
def _build_system_prompt(self, history: str, user_input: str) -> str:
|
233 |
+
return (
|
234 |
+
"You are a function dispatcher. You only invoke functions by returning a single JSON object.\n"
|
235 |
+
"Available functions:\n"
|
236 |
+
"- summarize(text): Summarize given text\n"
|
237 |
+
"- translate(lang, text): Translate text to a language\n"
|
238 |
+
"- joke(): Tell a random joke\n"
|
239 |
+
"- weather(location): Get weather for a location\n"
|
240 |
+
"- inspire(): Get an inspirational quote\n"
|
241 |
+
"- meme(text): Generate a meme from text\n"
|
242 |
+
"- generate_image(prompt, count, width, height): Generate images\n"
|
243 |
+
"- edit_image(prompt): Edit an image (requires replying to an image)\n"
|
244 |
+
"- send_text(message): Send a plain text response\n\n"
|
245 |
+
"Return only raw JSON. Examples:\n"
|
246 |
+
'{"action":"generate_image","prompt":"a red fox","count":2}\n'
|
247 |
+
'{"action":"edit_image","prompt":"make the sky purple"}\n'
|
248 |
+
'{"action":"send_text","message":"Hello there!"}\n\n'
|
249 |
+
f"Conversation history:\n{history}\n\n"
|
250 |
+
f"Current message: User: {user_input}"
|
251 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
def _parse_response(self, raw_response: str) -> BaseIntent:
|
|
|
|
|
|
|
|
|
254 |
try:
|
255 |
+
parsed = json.loads(raw_response)
|
256 |
+
for model in self.INTENT_MODELS:
|
257 |
+
try:
|
258 |
+
return model.model_validate(parsed)
|
259 |
+
except ValidationError:
|
260 |
+
continue
|
261 |
+
except json.JSONDecodeError:
|
262 |
+
pass
|
263 |
+
|
264 |
+
# Fallback for non-JSON or unparsable responses
|
265 |
+
return SendTextIntent(action="send_text", message=raw_response)
|
266 |
+
|
267 |
+
# --- Main Application Class ------------------------------------------------
|
268 |
+
|
269 |
+
class WhatsAppBot:
|
270 |
+
def __init__(self, config: BotConfig):
|
271 |
+
self.config = config
|
272 |
+
self.logger = LoggerSetup.setup(config.LOG_LEVEL)
|
273 |
+
self.api_client = GreenApiClient(config, self.logger)
|
274 |
+
self.conv_manager = ConversationManager(config.MAX_HISTORY_SIZE)
|
275 |
+
self.intent_router = IntentRouter(self.conv_manager, self.logger)
|
276 |
+
self.task_queue = queue.Queue()
|
277 |
+
self.fastapi_app = FastAPI(title="WhatsApp Eve Bot", version="2.0.0")
|
278 |
+
self._setup_routes()
|
279 |
+
self._start_workers()
|
280 |
+
|
281 |
+
def _setup_routes(self):
|
282 |
+
@self.fastapi_app.post("/whatsapp")
|
283 |
+
async def webhook(request: Request):
|
284 |
+
if request.headers.get("Authorization") != f"Bearer {self.config.WEBHOOK_AUTH_TOKEN}":
|
285 |
+
raise HTTPException(403, "Unauthorized")
|
286 |
+
|
287 |
+
payload = await request.json()
|
288 |
+
self.logger.debug(f"Incoming webhook: {json.dumps(payload)}")
|
289 |
+
|
290 |
+
# Process valid incoming messages in the background
|
291 |
+
if payload.get("typeWebhook") == "incomingMessageReceived":
|
292 |
+
executor.submit(self._process_incoming_message, payload)
|
293 |
+
|
294 |
+
return JSONResponse(content={"status": "received"})
|
295 |
+
|
296 |
+
@self.fastapi_app.api_route("/health", methods=["GET", "HEAD"])
|
297 |
+
def health_check():
|
298 |
+
return JSONResponse(content={"status": "healthy"})
|
299 |
+
|
300 |
+
def _start_workers(self):
|
301 |
+
for i in range(self.config.WORKER_THREADS):
|
302 |
+
threading.Thread(target=self._worker, name=f"Worker-{i}", daemon=True).start()
|
303 |
+
self.logger.info(f"Started {self.config.WORKER_THREADS} worker threads.")
|
304 |
+
|
305 |
+
def _worker(self):
|
306 |
+
"""Worker thread to process tasks from the queue."""
|
307 |
+
while True:
|
308 |
+
task = self.task_queue.get()
|
309 |
+
try:
|
310 |
+
handler = getattr(self, f"_task_{task['type']}", None)
|
311 |
+
if handler:
|
312 |
+
handler(task)
|
313 |
+
else:
|
314 |
+
self.logger.warning(f"Unknown task type: {task['type']}")
|
315 |
+
except Exception as e:
|
316 |
+
self.logger.error(f"Error processing task {task['type']}: {e}", exc_info=True)
|
317 |
+
finally:
|
318 |
+
self.task_queue.task_done()
|
319 |
+
|
320 |
+
def _process_incoming_message(self, payload: Dict[str, Any]):
|
321 |
+
"""Main logic for handling an incoming message payload."""
|
322 |
+
try:
|
323 |
+
chat_id = payload["senderData"]["chatId"]
|
324 |
+
message_id = payload["idMessage"]
|
325 |
+
ThreadContext.set_context(chat_id, message_id)
|
326 |
+
|
327 |
+
message_data = payload.get("messageData", {})
|
328 |
+
type_message = message_data.get("typeMessage")
|
329 |
+
|
330 |
+
text = ""
|
331 |
+
if type_message == "textMessage":
|
332 |
+
text = message_data["textMessageData"]["textMessage"]
|
333 |
+
elif type_message == "extendedTextMessage":
|
334 |
+
text = message_data["extendedTextMessageData"]["text"]
|
335 |
+
|
336 |
+
text = text.strip()
|
337 |
+
if not text:
|
338 |
+
return
|
339 |
+
|
340 |
+
self.conv_manager.add_user_message(chat_id, text)
|
341 |
+
|
342 |
+
# Handle direct commands
|
343 |
+
if text.startswith('/'):
|
344 |
+
self._handle_command(chat_id, message_id, text, payload)
|
345 |
+
else:
|
346 |
+
# Handle natural language and replies
|
347 |
+
self._handle_natural_language(chat_id, message_id, text, payload)
|
348 |
+
|
349 |
+
except Exception as e:
|
350 |
+
self.logger.error(f"Failed to process message payload: {e}", exc_info=True)
|
351 |
+
|
352 |
+
def _handle_command(self, chat_id, message_id, text, payload):
|
353 |
+
"""Processes direct slash commands."""
|
354 |
+
parts = text.lower().split()
|
355 |
+
command = parts[0]
|
356 |
+
args = text.split(maxsplit=1)[1] if len(parts) > 1 else ""
|
357 |
+
|
358 |
+
if command == "/help":
|
359 |
+
help_text = (
|
360 |
+
"*🤖 Eve's Command Center:*\n\n"
|
361 |
+
"🔹 `/help` - Show this help message\n"
|
362 |
+
"🔹 `/gen <prompt>` - Generate an image\n"
|
363 |
+
"🔹 `/edit <prompt>` - Reply to an image to edit it\n"
|
364 |
+
"🔹 `/joke` - Get a random joke\n"
|
365 |
+
"🔹 `/inspire` - Receive an inspirational quote\n"
|
366 |
+
"🔹 `/weather <location>` - Check the weather\n\n"
|
367 |
+
"You can also just chat with me naturally!"
|
368 |
)
|
369 |
+
self.api_client.send_message(chat_id, help_text, message_id)
|
370 |
+
elif command == "/gen":
|
371 |
+
self.task_queue.put({"type": "generate_image", "chat_id": chat_id, "message_id": message_id, "prompt": args})
|
372 |
+
elif command == "/edit":
|
373 |
+
self._dispatch_edit_image(chat_id, message_id, args, payload)
|
374 |
+
elif command == "/joke":
|
375 |
+
self._task_joke({"chat_id": chat_id, "message_id": message_id})
|
376 |
+
elif command == "/inspire":
|
377 |
+
self._task_inspire({"chat_id": chat_id, "message_id": message_id})
|
378 |
+
elif command == "/weather":
|
379 |
+
self._task_weather({"chat_id": chat_id, "message_id": message_id, "location": args})
|
380 |
else:
|
381 |
+
self.api_client.send_message(chat_id, "Unknown command. Type /help for options.", message_id)
|
382 |
+
|
383 |
+
def _handle_natural_language(self, chat_id, message_id, text, payload):
|
384 |
+
"""Processes natural language using the intent router."""
|
385 |
+
intent = self.intent_router.get_intent(text, chat_id)
|
386 |
+
|
387 |
+
task_data = {
|
388 |
+
"chat_id": chat_id,
|
389 |
+
"message_id": message_id,
|
390 |
+
**intent.model_dump()
|
391 |
+
}
|
392 |
+
|
393 |
+
if intent.action == "edit_image":
|
394 |
+
# This action needs the original payload to find the replied-to image
|
395 |
+
self._dispatch_edit_image(chat_id, message_id, intent.prompt, payload)
|
396 |
+
elif hasattr(self, f"_task_{intent.action}"):
|
397 |
+
self.task_queue.put({"type": intent.action, **task_data})
|
398 |
+
else:
|
399 |
+
self.logger.warning(f"No handler found for intent action: {intent.action}")
|
400 |
+
self.api_client.send_message(chat_id, "Sorry, I'm not sure how to handle that.", message_id)
|
401 |
+
|
402 |
+
def _dispatch_edit_image(self, chat_id, message_id, prompt, payload):
|
403 |
+
"""Checks for a replied-to image and dispatches the edit task."""
|
404 |
+
quoted_message = payload.get("messageData", {}).get("quotedMessage")
|
405 |
+
if not quoted_message or quoted_message.get("typeMessage") != "imageMessage":
|
406 |
+
self.api_client.send_message(chat_id, "To edit an image, please reply to it with your instructions.", message_id)
|
407 |
+
return
|
408 |
+
|
409 |
+
download_url = quoted_message["imageMessage"]["downloadUrl"]
|
410 |
+
self.task_queue.put({
|
411 |
+
"type": "edit_image",
|
412 |
+
"chat_id": chat_id,
|
413 |
+
"message_id": message_id,
|
414 |
+
"prompt": prompt,
|
415 |
+
"download_url": download_url
|
416 |
+
})
|
417 |
+
|
418 |
+
# --- Task Handler Methods ---
|
419 |
+
|
420 |
+
def _task_send_text(self, task: Dict[str, Any]):
|
421 |
+
chat_id, message_id, message = task["chat_id"], task["message_id"], task["message"]
|
422 |
+
self.api_client.send_message(chat_id, message, message_id)
|
423 |
+
self.conv_manager.add_bot_message(chat_id, message)
|
424 |
+
self.task_queue.put({"type": "voice_reply", "chat_id": chat_id, "message_id": message_id, "text": message})
|
425 |
+
|
426 |
+
def _task_generate_image(self, task: Dict[str, Any]):
|
427 |
+
chat_id, mid, prompt, count = task["chat_id"], task["message_id"], task["prompt"], task.get("count", 1)
|
428 |
+
self.api_client.send_message(chat_id, f"🎨 Generating {count} image(s) for: \"{prompt}\"...", mid)
|
429 |
+
|
430 |
+
for i in range(count):
|
431 |
+
try:
|
432 |
+
_, path, _, url = generate_image(prompt, mid, str(i), self.config.IMAGE_DIR, width=task.get("width"), height=task.get("height"))
|
433 |
+
caption = f"✨ Image {i+1}/{count}: {prompt}"
|
434 |
+
self.api_client.send_file(chat_id, path, caption, mid)
|
435 |
+
os.remove(path)
|
436 |
+
except Exception as e:
|
437 |
+
self.logger.error(f"Image generation {i+1} failed: {e}")
|
438 |
+
self.api_client.send_message(chat_id, f"😢 Failed to generate image {i+1}.", mid)
|
439 |
+
|
440 |
+
def _task_edit_image(self, task: Dict[str, Any]):
|
441 |
+
chat_id, mid, prompt, url = task["chat_id"], task["message_id"], task["prompt"], task["download_url"]
|
442 |
+
self.api_client.send_message(chat_id, f"🎨 Editing image with prompt: \"{prompt}\"...", mid)
|
443 |
+
|
444 |
+
input_path, output_path = None, None
|
445 |
+
try:
|
446 |
+
image_data = self.api_client.download_file(url)
|
447 |
+
if not image_data:
|
448 |
+
raise ValueError("Failed to download image.")
|
449 |
+
|
450 |
+
input_path = os.path.join(self.config.TEMP_DIR, f"input_{mid}.jpg")
|
451 |
+
output_path = os.path.join(self.config.TEMP_DIR, f"output_{mid}.jpg")
|
452 |
+
|
453 |
+
with open(input_path, 'wb') as f:
|
454 |
+
f.write(image_data)
|
455 |
+
|
456 |
+
flux_kontext_lib.generate_image(prompt, input_path, download_path=output_path)
|
457 |
+
|
458 |
+
if os.path.exists(output_path):
|
459 |
+
caption = f"✨ Edited: {prompt}"
|
460 |
+
self.api_client.send_file(chat_id, output_path, caption, mid)
|
461 |
+
else:
|
462 |
+
raise ValueError("Edited image file not found.")
|
463 |
|
464 |
+
except Exception as e:
|
465 |
+
self.logger.error(f"Image editing task failed: {e}")
|
466 |
+
self.api_client.send_message(chat_id, "😢 Sorry, I failed to edit the image.", mid)
|
467 |
+
finally:
|
468 |
+
for path in [input_path, output_path]:
|
469 |
+
if path and os.path.exists(path):
|
470 |
+
os.remove(path)
|
471 |
+
|
472 |
+
def _task_voice_reply(self, task: Dict[str, Any]):
|
473 |
+
text = task["text"]
|
474 |
+
prompt = f"Say this in a friendly, playful, and slightly clumsy-cute way: {text}"
|
475 |
+
try:
|
476 |
+
result = generate_voice_reply(prompt, model="openai-audio", voice="coral", audio_dir=self.config.AUDIO_DIR)
|
477 |
+
if result and result[0]:
|
478 |
+
path, _ = result
|
479 |
+
self.api_client.send_file(task["chat_id"], path, quoted_message_id=task["message_id"])
|
480 |
+
os.remove(path)
|
481 |
+
except Exception as e:
|
482 |
+
self.logger.warning(f"Voice reply generation failed: {e}")
|
483 |
|
484 |
+
def _task_joke(self, task: Dict[str, Any]):
|
485 |
+
try:
|
486 |
+
j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
|
487 |
+
joke = f"{j['setup']}\n\n{j['punchline']}"
|
488 |
+
except Exception:
|
489 |
+
joke = generate_llm("Tell me a short, clean joke.")
|
490 |
+
self._task_send_text({"type": "send_text", **task, "message": f"😄 {joke}"})
|
491 |
+
|
492 |
+
def _task_inspire(self, task: Dict[str, Any]):
|
493 |
+
quote = generate_llm("Give me a unique, short, uplifting inspirational quote with attribution.")
|
494 |
+
self._task_send_text({"type": "send_text", **task, "message": f"✨ {quote}"})
|
495 |
+
|
496 |
+
def _task_weather(self, task: Dict[str, Any]):
|
497 |
+
location = task.get("location", "New York")
|
498 |
+
try:
|
499 |
+
raw = requests.get(f"http://wttr.in/{location.replace(' ', '+')}?format=4", timeout=10).text
|
500 |
+
report = generate_llm(f"Create a friendly weather report in Celsius from this data:\n\n{raw}")
|
501 |
+
self._task_send_text({"type": "send_text", **task, "message": f"🌤️ Weather for {location}:\n{report}"})
|
502 |
+
except Exception as e:
|
503 |
+
self.logger.error(f"Weather task failed: {e}")
|
504 |
+
self.api_client.send_message(task["chat_id"], "Sorry, I couldn't get the weather.", task["message_id"])
|
505 |
+
|
506 |
+
def run(self):
|
507 |
+
"""Starts the bot and FastAPI server."""
|
508 |
+
self.logger.info("Starting Eve WhatsApp Bot...")
|
509 |
+
for d in [self.config.IMAGE_DIR, self.config.AUDIO_DIR, self.config.TEMP_DIR]:
|
510 |
+
os.makedirs(d, exist_ok=True)
|
511 |
+
self.logger.info(f"Ensured directory exists: {d}")
|
512 |
+
|
513 |
+
self.api_client.send_message(
|
514 |
+
"[email protected]",
|
515 |
+
"🌟 Eve is online and ready to help! Type /help to see commands."
|
516 |
+
)
|
517 |
|
518 |
+
uvicorn.run(self.fastapi_app, host="0.0.0.0", port=7860)
|
|
|
|
|
|
|
519 |
|
520 |
|
521 |
if __name__ == "__main__":
|
522 |
+
try:
|
523 |
+
config = BotConfig()
|
524 |
+
executor = ThreadPoolExecutor(max_workers=config.WORKER_THREADS * 2)
|
525 |
+
bot = WhatsAppBot(config)
|
526 |
+
bot.run()
|
527 |
+
except ValueError as e:
|
528 |
+
# Catch config validation errors
|
529 |
+
print(f"❌ CONFIGURATION ERROR: {e}")
|
530 |
+
except KeyboardInterrupt:
|
531 |
+
print("\n🛑 Bot stopped by user.")
|
532 |
+
except Exception as e:
|
533 |
+
print(f"❌ A fatal error occurred: {e}")
|
534 |
+
|
flux_kontext_lib/README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Flux Kontext Image Generator Library
|
2 |
+
|
3 |
+
A Python library for interacting with the Kontext Chat image generation API.
|
4 |
+
|
5 |
+
## Installation
|
6 |
+
```bash
|
7 |
+
pip install requests
|
8 |
+
```
|
9 |
+
|
10 |
+
## Usage
|
11 |
+
```python
|
12 |
+
from flux_kontext_lib import generate_image
|
13 |
+
|
14 |
+
# Using file path
|
15 |
+
result = generate_image("close her eyes", "path/to/image.jpg")
|
16 |
+
|
17 |
+
# Using image bytes
|
18 |
+
with open("path/to/image.jpg", "rb") as f:
|
19 |
+
image_bytes = f.read()
|
20 |
+
result = generate_image("add sunglasses", image_bytes)
|
21 |
+
|
22 |
+
# Custom headers
|
23 |
+
custom_headers = {"Authorization": "Bearer YOUR_TOKEN"}
|
24 |
+
result = generate_image("make it sunny", "path/to/image.jpg", headers=custom_headers)
|
25 |
+
```
|
26 |
+
|
27 |
+
## Parameters
|
28 |
+
- `prompt_text` (str): Text prompt for image modification
|
29 |
+
- `image_input` (str or bytes): Image file path or bytes content
|
30 |
+
- `headers` (dict, optional): Custom request headers
|
31 |
+
|
32 |
+
## Returns
|
33 |
+
- dict: API response on success
|
34 |
+
- None: On request failure
|
35 |
+
|
36 |
+
## Error Handling
|
37 |
+
Raises:
|
38 |
+
- `FileNotFoundError`: If image file doesn't exist
|
39 |
+
- `ValueError`: For unsupported input types
|
40 |
+
|
41 |
+
## Example
|
42 |
+
See [example_usage.py](example_usage.py) for a complete usage example.
|
flux_kontext_lib/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .image_generator import generate_image
|
2 |
+
|
3 |
+
__all__ = ['generate_image']
|
flux_kontext_lib/example_usage.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flux_kontext_lib import generate_image
|
2 |
+
|
3 |
+
# Example usage
|
4 |
+
if __name__ == '__main__':
|
5 |
+
try:
|
6 |
+
# Replace with your actual image path
|
7 |
+
image_path = "./image.jpg"
|
8 |
+
prompt = "close his eyes"
|
9 |
+
|
10 |
+
# Call the library function
|
11 |
+
result = generate_image(prompt, image_path)
|
12 |
+
|
13 |
+
if result:
|
14 |
+
print("API Response:")
|
15 |
+
print(result)
|
16 |
+
else:
|
17 |
+
print("Request failed. Check error messages for details.")
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Error: {e}")
|
flux_kontext_lib/image_generator.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import base64
|
3 |
+
import json
|
4 |
+
from io import BytesIO
|
5 |
+
from typing import Optional, Dict
|
6 |
+
|
7 |
+
# Pillow is required for image format conversion and normalization.
|
8 |
+
# Please install it using: pip install Pillow
|
9 |
+
try:
|
10 |
+
from PIL import Image
|
11 |
+
except ImportError:
|
12 |
+
print("Pillow library not found. Please install it using: pip install Pillow")
|
13 |
+
exit()
|
14 |
+
|
15 |
+
def _download_image_from_url(image_url: str, save_path: str) -> bool:
|
16 |
+
"""
|
17 |
+
Downloads an image from a URL and saves it to a local path.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
image_url: The URL of the image to download.
|
21 |
+
save_path: The local path to save the downloaded image.
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
True if the download was successful, False otherwise.
|
25 |
+
"""
|
26 |
+
print(f"Downloading generated image from: {image_url}")
|
27 |
+
try:
|
28 |
+
image_response = requests.get(image_url, stream=True)
|
29 |
+
# Check if the download request was successful.
|
30 |
+
if image_response.status_code == 200:
|
31 |
+
content_type = image_response.headers.get('Content-Type', '')
|
32 |
+
if 'image' in content_type:
|
33 |
+
with open(save_path, 'wb') as f:
|
34 |
+
for chunk in image_response.iter_content(1024):
|
35 |
+
f.write(chunk)
|
36 |
+
print(f"Image successfully saved to {save_path}")
|
37 |
+
return True
|
38 |
+
else:
|
39 |
+
print(f"Error: Content at URL is not an image. Content-Type: {content_type}")
|
40 |
+
return False
|
41 |
+
else:
|
42 |
+
print(f"Error: Failed to download image. Status code: {image_response.status_code}")
|
43 |
+
return False
|
44 |
+
except requests.exceptions.RequestException as e:
|
45 |
+
print(f"An error occurred during image download: {e}")
|
46 |
+
return False
|
47 |
+
|
48 |
+
def generate_image(
|
49 |
+
prompt_text: str,
|
50 |
+
image_path: str,
|
51 |
+
download_path: Optional[str] = None
|
52 |
+
) -> Optional[Dict]:
|
53 |
+
"""
|
54 |
+
Sends a request to the image generation API and optionally downloads the result.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
prompt_text: The instructional text for image modification.
|
58 |
+
image_path: The file path to the input image (any common format).
|
59 |
+
download_path: If provided, the path to save the generated image.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
A dictionary of the JSON response from the API, or None on error.
|
63 |
+
"""
|
64 |
+
url = "https://kontext-chat.replicate.dev/generate-image"
|
65 |
+
|
66 |
+
try:
|
67 |
+
# --- Image Normalization Step ---
|
68 |
+
with Image.open(image_path) as img:
|
69 |
+
if img.mode != 'RGB':
|
70 |
+
img = img.convert('RGB')
|
71 |
+
with BytesIO() as output_buffer:
|
72 |
+
img.save(
|
73 |
+
output_buffer,
|
74 |
+
format="JPEG",
|
75 |
+
quality=95,
|
76 |
+
subsampling=0,
|
77 |
+
progressive=False
|
78 |
+
)
|
79 |
+
image_bytes = output_buffer.getvalue()
|
80 |
+
except FileNotFoundError:
|
81 |
+
print(f"Error: Image file not found at {image_path}")
|
82 |
+
return None
|
83 |
+
except Exception as e:
|
84 |
+
print(f"Error processing image file. Ensure it's a valid image. Details: {e}")
|
85 |
+
return None
|
86 |
+
|
87 |
+
encoded_string = base64.b64encode(image_bytes).decode('utf-8')
|
88 |
+
input_image_data_uri = f"data:image/jpeg;base64,{encoded_string}"
|
89 |
+
|
90 |
+
payload = {
|
91 |
+
"prompt": prompt_text,
|
92 |
+
"input_image": input_image_data_uri
|
93 |
+
}
|
94 |
+
|
95 |
+
headers = {
|
96 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:139.0) Gecko/20100101 Firefox/139.0",
|
97 |
+
"Accept": "*/*",
|
98 |
+
"Content-Type": "application/json",
|
99 |
+
"Referer": "https://kontext-chat.replicate.dev/",
|
100 |
+
"Origin": "https://kontext-chat.replicate.dev",
|
101 |
+
}
|
102 |
+
|
103 |
+
try:
|
104 |
+
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
105 |
+
response.raise_for_status()
|
106 |
+
api_response_data = response.json()
|
107 |
+
|
108 |
+
# --- Optional Download Logic ---
|
109 |
+
if download_path and isinstance(api_response_data, dict):
|
110 |
+
image_url = api_response_data.get("imageUrl")
|
111 |
+
if image_url:
|
112 |
+
_download_image_from_url(image_url, download_path)
|
113 |
+
else:
|
114 |
+
print("Warning: 'imageUrl' not found in response, could not download image.")
|
115 |
+
|
116 |
+
return api_response_data
|
117 |
+
except requests.exceptions.RequestException as e:
|
118 |
+
print(f"API request failed: {e}")
|
119 |
+
return None
|