File size: 8,064 Bytes
463123f fa43b29 d971fec 2d24499 d971fec fa43b29 d971fec 463123f d971fec 2d24499 d971fec fa43b29 d971fec 0ded264 d971fec 560c226 d971fec fa43b29 560c226 fa43b29 5637bed 560c226 fa43b29 5637bed fa43b29 5637bed fa43b29 0f755e3 fa43b29 7d3776b fa43b29 560c226 fa43b29 560c226 fa43b29 c410fef fa43b29 560c226 fa43b29 560c226 fa43b29 560c226 fa43b29 d971fec 463123f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
import gradio as gr
import gradio_client as grc
import dotenv
import discord
from discord.ext import commands
import os
import threading
from threading import Event
from typing import Literal,Optional
import asyncio
event = Event()
dotenv.load_dotenv()
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix="/",intents=intents)
# limit command to channel_name
channel_name = "🤸🏻♀anime"
# Limited to only 25 models
models = Literal['3Guofeng3_v34.safetensors [50f420de]',
'absolutereality_V16.safetensors [37db0fc3]',
'absolutereality_v181.safetensors [3d9d4d2b]',
'anythingV5_PrtRE.safetensors [893e49b9]',
'blazing_drive_v10g.safetensors [ca1c1eab]',
'cetusMix_Version35.safetensors [de2f2560]',
'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
'Counterfeit_v30.safetensors [9e2a8f19]',
'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
'cyberrealistic_v33.safetensors [82b0d085]',
'dreamlike-anime-1.0.safetensors [4520e090]',
'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
'dreamshaper_8.safetensors [9d40847d]',
'edgeOfRealism_eorV20.safetensors [3ed5de15]',
'elldreths-vivid-mix.safetensors [342d9d26]',
'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
'juggernaut_aftermath.safetensors [5e20c455]',
'lofi_v4.safetensors [ccc204d6]',
'lyriel_v16.safetensors [68fceea2]',
'neverendingDream_v122.safetensors [f964ceeb]',
'openjourney_V4.ckpt [ca2f377f]',
'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
'Realistic_Vision_V5.0.safetensors [614d1063]',
'revAnimated_v122.safetensors [3f4fefd9]',
'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
]
@bot.event
async def on_ready():
print(f"logged as {bot.user}")
event.set()
@bot.hybrid_command(
name="ping",
description="ping")
async def ping(ctx):
await ctx.reply(f"{bot.latency*1000:.0f}ms")
def get_client(session: Optional[str] = None) -> grc.Client:
client = grc.Client("prodia/fast-stable-diffusion")
if session:
client.session_hash = session
return client
async def text2img(pos_prompt: str, neg_promt: str = "",model="absolutereality_v181.safetensors [3d9d4d2b]"):
"""
Generates an image based on the given positive prompt and optional negative prompt.
Args:
pos_prompt (str): The positive prompt to generate the image.
neg_prompt (str, optional): The negative prompt to generate the image. Defaults to "".
Returns:
The generated image.
"""
txt2img_conf = {
"parameter_11" : pos_prompt,
"parameter_12" : neg_promt,
"stable_diffusion_checkpoint" : model,
"sampling_steps" : 25,
"sampling_method" : "DPM++ 2M Karras",
"cfg_scale" : 7,
"width" : 512,
"height" : 512,
"seed" : -1
}
loop = asyncio.get_running_loop()
client = await loop.run_in_executor(None, get_client, None)
txt2img_args = txt2img_conf.values()
job = client.predict(*txt2img_args, fn_index=0)
return job
async def img2img(pos_prompt: str, neg_promt: str = "", img = None,model="absolutereality_v181.safetensors [3d9d4d2b]"):
"""
Generates an image based on the given positive prompt, optional negative prompt and image path.
Args:
pos_prompt (str): The positive prompt for the image generation.
neg_promt (str, optional): The negative prompt for the image generation. Defaults to "".
img (filepath or URL to image): The input image for the image generation. Defaults to None.
Returns:
The generated image.
"""
img2img_conf = {
"parameter_52" : img,
"denoising_strength" : 0.7,
"parameter_44" : pos_prompt,
"parameter_45" : neg_promt,
"stable_diffusion_checkpoint" : model,
"sampling_steps" : 25,
"sampling_method" : "DPM++ 2M Karras",
"cfg_scale" : 7,
"width" : 512,
"height" : 512,
"seed" : -1
}
loop = asyncio.get_running_loop()
client = await loop.run_in_executor(None, get_client, None)
img2img_args = img2img_conf.values()
job = client.predict(*img2img_args, fn_index=1)
return job
def run_dffusion(pos_prompt: str,neg_promt: str = "",img_url= "",model="absolutereality_v181.safetensors [3d9d4d2b]"):
"""Runs the diffusion model."""
if len(img_url) == 0:
# Support for text prompts
generated_image = text2img(pos_prompt, neg_promt,model)
else:
# Support for image prompts
generated_image = img2img(pos_prompt, neg_promt, img_url,model)
return generated_image
# AI prediction command
@bot.hybrid_command(
name="diffusion",
description="creates an AI generated image"
)
async def diffusion(ctx, pos_prompt: str="",neg_promt: str = "", img_url= None,improve : Literal["True","False"] = "True",model : models = "absolutereality_v181.safetensors [3d9d4d2b]"):
"""
AI command to generate images
"""
# if channel name is == channel_name
try:
if ctx.channel.name == channel_name:
await ctx.reply(f"Creating image for {ctx.author.mention} ...")
try :
# preparing the prediction before creating the thread
_improved_pos_prompt = "ultrarealistic,8k"
_improved_neg_promt = "3d, cartoon, (deformed eyes, nose, ears, nose), bad anatomy, ugly,blur"
pos_prompt =pos_prompt.strip()
neg_promt = neg_promt.strip()
# check if the user wants to improve the results
if improve == "True":
if len(pos_prompt) == 0:
pos_prompt = _improved_pos_prompt
elif pos_prompt[-1] != ",":
pos_prompt += ", " + _improved_pos_prompt
else:
pos_prompt += _improved_pos_prompt
if len(neg_promt) == 0:
neg_promt = _improved_neg_promt
elif neg_promt[-1] != ",":
neg_promt += ", " + _improved_neg_promt
else:
neg_promt += _improved_neg_promt
# need to make sure AI sends the first message
result = run_dffusion(pos_prompt , neg_promt,img_url,model=model)
await ctx.channel.send(file=discord.File(result))
except Exception as e:
await ctx.resepond(e)
else:
# TODO:
# tag the channel #channel_name
# create the channel if we can't find it, tag it and let the user know that we created it
await ctx.respond(f"""
use this command in the channel #falcon-180b-demo\nuse `/setup` to create the channel if it doesn't exist""")
except Exception as e:
await ctx.respond(e)
# setup create the channel for the bot
@bot.hybrid_command(
name="setup",
description="setup the bot")
async def setup(ctx):
"""
create the #channel_name channel
"""
# if channel falcon-180b-demo doesn't exist create it
if not discord.utils.get(ctx.guild.channels, name=channel_name):
await ctx.guild.create_text_channel(channel_name,category=ctx.channel.category)
await ctx.reply(channel_name)
else:
# TODO: tag the channel
await ctx.reply(f"{channel_name} channel already exist")
# running in thread
def run_bot():
if not DISCORD_TOKEN:
print("DISCORD_TOKEN NOT SET")
event.set()
else:
bot.run(DISCORD_TOKEN)
threading.Thread(target=run_bot).start()
event.wait()
with gr.Blocks() as iface :
gr.Markdown("# welcome to Tonic-bot")
iface.queue().launch()
|