File size: 3,241 Bytes
eb5ed72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
086a8a4
eb5ed72
 
 
 
 
 
ed4474d
eb5ed72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed4474d
eb5ed72
 
ed4474d
 
eb5ed72
ed4474d
 
eb5ed72
ed4474d
 
 
eb5ed72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac9eb7f
eb5ed72
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import asyncio
import glob
import os
import random
import os
import random
import threading
from discord import app_commands
from discord.ext import commands
import discord
import gradio as gr
import discord
from gradio_client import Client


HF_TOKEN = os.getenv("HF_TOKEN")
wuerstchen_client = Client("huggingface-projects/Wuerstchen-duplicate", HF_TOKEN)
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")

#---------------------------------------------------------------------------------------------------------------------
intents = discord.Intents.all()
bot = commands.Bot(command_prefix="/", intents=intents)
#---------------------------------------------------------------------------------------------------------------------
@bot.event
async def on_ready():
    print(f"Logged in as {bot.user} (ID: {bot.user.id})")
    synced = await bot.tree.sync()
    print(f"Synced commands: {', '.join([s.name for s in synced])}.")
    print("------")
#---------------------------------------------------------------------------------------------------------------------
@bot.hybrid_command(
    name="wuerstchen",
    description="Enter a prompt to generate art!",
)
async def wuerstchen_command(ctx, prompt: str):
    """Wuerstchen generation"""
    try:
        await run_wuerstchen(ctx, prompt)
    except Exception as e:
        print(f"Error wuerstchen: (app.py){e}")


def wuerstchen_inference(prompt):
    """Inference for Wuerstchen"""
    negative_prompt = ""
    seed = random.randint(0, 1000)
    width = 1024
    height = 1024
    prior_num_inference_steps = 60
    prior_guidance_scale = 4
    decoder_num_inference_steps = 12
    decoder_guidance_scale = 0
    num_images_per_prompt = 1

    result_path = wuerstchen_client.predict(
        prompt,
        negative_prompt,
        seed,
        width,
        height,
        prior_num_inference_steps,
        prior_guidance_scale,
        decoder_num_inference_steps,
        decoder_guidance_scale,
        num_images_per_prompt,
        api_name="/run",
    )
    png_file = list(glob.glob(f"{result_path}/**/*.png"))
    return png_file[0]


async def run_wuerstchen(ctx, prompt):
    """Responds to /Wuerstchen command"""
    try:
        channel = bot.get_channel(WUERSTCHEN_CHANNEL_ID)
        message = await ctx.send(f"**{prompt}** - {ctx.author.mention} <a:loading:1114111677990981692>")

        loop = asyncio.get_running_loop()
        result_path = await loop.run_in_executor(None, wuerstchen_inference, prompt)

        await message.delete()
        with open(result_path, "rb") as f:
            await channel.send(f"**{prompt}** - {ctx.author.mention}", file=discord.File(f, "wuerstchen.png"))
    except Exception as e:
        print(f"Error: {e}")


def run_bot():
    bot.run(DISCORD_TOKEN)


threading.Thread(target=run_bot).start()
"""This allows us to run the Discord bot in a Python thread"""
with gr.Blocks() as demo:
    gr.Markdown("""
    # Huggingbots Server
    This space hosts the huggingbots discord bot.
    Currently supported models are Falcon and DeepfloydIF
    https://discord.com/api/oauth2/authorize?client_id=1155489509518098565&permissions=51200&scope=bot
    """)
demo.queue(concurrency_count=100)
demo.queue(max_size=100)
demo.launch()