Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,7 @@
|
|
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
4 |
-
import uuid
|
5 |
-
import torch
|
6 |
-
import subprocess
|
7 |
-
from huggingface_hub import snapshot_download
|
8 |
-
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
|
9 |
from transformers import pipeline
|
10 |
|
11 |
# λ‘κΉ
μ€μ
|
@@ -15,47 +11,45 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(nam
|
|
15 |
intents = discord.Intents.default()
|
16 |
intents.message_content = True
|
17 |
|
18 |
-
# Hugging Face λͺ¨λΈ λ€μ΄λ‘λ
|
19 |
-
huggingface_token = os.getenv("HF_TOKEN")
|
20 |
-
model_path = snapshot_download(
|
21 |
-
# repo_id="SG161222/RealVisXL_V4.0",
|
22 |
-
repo_id="cagliostrolab/animagine-xl-3.1",
|
23 |
-
|
24 |
-
repo_type="model",
|
25 |
-
# local_dir="RealVisXL_V4.0",
|
26 |
-
local_dir="animagine-xl-3.1",
|
27 |
-
token=huggingface_token,
|
28 |
-
)
|
29 |
-
|
30 |
-
# λͺ¨λΈ λ‘λ ν¨μ
|
31 |
-
def load_pipeline(pipeline_type):
|
32 |
-
logging.debug(f'νμ΄νλΌμΈ λ‘λ μ€: {pipeline_type}')
|
33 |
-
if pipeline_type == "text2img":
|
34 |
-
return StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16, use_fast=True)
|
35 |
-
elif pipeline_type == "img2img":
|
36 |
-
return StableDiffusionImg2ImgPipeline.from_pretrained(model_path, torch_dtype=torch.float16, use_fast=True)
|
37 |
-
|
38 |
-
# λλ°μ΄μ€ μ€μ
|
39 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
40 |
-
|
41 |
# λ²μ νμ΄νλΌμΈ μ€μ
|
42 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
43 |
|
44 |
# κ³ μ λ λ€κ±°ν°λΈ ν둬ννΈ
|
45 |
negative_prompt = "blur, low quality, bad composition, ugly, disfigured, weird colors, low quality, jpeg artifacts, lowres, grainy, deformed structures, blurry, opaque, low contrast, distorted details, details are low"
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
# λμ€μ½λ λ΄ ν΄λμ€
|
48 |
class MyClient(discord.Client):
|
49 |
-
def __init__(self, *args, **kwargs):
|
50 |
-
super().__init__(*args, **kwargs)
|
51 |
-
self.is_processing = False
|
52 |
-
self.text2img_pipeline = load_pipeline("text2img").to(device)
|
53 |
-
self.text2img_pipeline.enable_attention_slicing() # λ©λͺ¨λ¦¬ μ΅μ ν
|
54 |
-
|
55 |
async def on_ready(self):
|
56 |
logging.info(f'{self.user}λ‘ λ‘κ·ΈμΈλμμ΅λλ€!')
|
57 |
-
subprocess.Popen(["python", "web.py"])
|
58 |
-
logging.info("web.py μλ²κ° μμλμμ΅λλ€.")
|
59 |
|
60 |
async def on_message(self, message):
|
61 |
if message.author == self.user:
|
@@ -68,29 +62,19 @@ class MyClient(discord.Client):
|
|
68 |
logging.debug(f'λ²μλ ν둬ννΈ: {prompt_en}')
|
69 |
logging.debug(f'κ³ μ λ λ€κ±°ν°λΈ ν둬ννΈ: {negative_prompt}')
|
70 |
|
71 |
-
|
72 |
-
user_id = message.author.id
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
77 |
finally:
|
78 |
self.is_processing = False
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
images = self.text2img_pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=50, generator=generator)["images"]
|
83 |
-
image_path = f'/tmp/{uuid.uuid4()}.png'
|
84 |
-
images[0].save(image_path)
|
85 |
-
return image_path
|
86 |
-
|
87 |
-
# ν둬ννΈ λ²μ ν¨μ
|
88 |
-
def translate_prompt(prompt):
|
89 |
-
logging.debug(f'ν둬ννΈ λ²μ μ€: {prompt}')
|
90 |
-
translation = translator(prompt, max_length=512)
|
91 |
-
translated_text = translation[0]['translation_text']
|
92 |
-
logging.debug(f'λ²μλ ν
μ€νΈ: {translated_text}')
|
93 |
-
return translated_text
|
94 |
|
95 |
# λμ€μ½λ ν ν° λ° λ΄ μ€ν
|
96 |
if __name__ == "__main__":
|
|
|
1 |
+
import requests
|
2 |
import discord
|
3 |
import logging
|
4 |
import os
|
|
|
|
|
|
|
|
|
|
|
5 |
from transformers import pipeline
|
6 |
|
7 |
# λ‘κΉ
μ€μ
|
|
|
11 |
intents = discord.Intents.default()
|
12 |
intents.message_content = True
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# λ²μ νμ΄νλΌμΈ μ€μ
|
15 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
16 |
|
17 |
# κ³ μ λ λ€κ±°ν°λΈ ν둬ννΈ
|
18 |
negative_prompt = "blur, low quality, bad composition, ugly, disfigured, weird colors, low quality, jpeg artifacts, lowres, grainy, deformed structures, blurry, opaque, low contrast, distorted details, details are low"
|
19 |
|
20 |
+
# μΈνΌλ°μ€ APIλ₯Ό μ¬μ©νκΈ° μν ν¨μ
|
21 |
+
def generate_image(prompt, negative_prompt):
|
22 |
+
headers = {
|
23 |
+
"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"
|
24 |
+
}
|
25 |
+
data = {
|
26 |
+
"inputs": {
|
27 |
+
"prompt": prompt,
|
28 |
+
"negative_prompt": negative_prompt,
|
29 |
+
"num_inference_steps": 50
|
30 |
+
}
|
31 |
+
}
|
32 |
+
api_url = "https://api-inference.huggingface.co/models/fluently/Fluently-XL-Final"
|
33 |
+
response = requests.post(api_url, headers=headers, json=data)
|
34 |
+
if response.status_code == 200:
|
35 |
+
image_url = response.json()[0]['url'] # μλ΅μμ μ΄λ―Έμ§ URL μΆμΆ
|
36 |
+
return image_url
|
37 |
+
else:
|
38 |
+
logging.error("API μμ²μ μ€ν¨νμ΅λλ€: " + response.text)
|
39 |
+
return None
|
40 |
+
|
41 |
+
# ν둬ννΈ λ²μ ν¨μ
|
42 |
+
def translate_prompt(prompt):
|
43 |
+
logging.debug(f'ν둬ννΈ λ²μ μ€: {prompt}')
|
44 |
+
translation = translator(prompt, max_length=512)
|
45 |
+
translated_text = translation[0]['translation_text']
|
46 |
+
logging.debug(f'λ²μλ ν
μ€νΈ: {translated_text}')
|
47 |
+
return translated_text
|
48 |
+
|
49 |
# λμ€μ½λ λ΄ ν΄λμ€
|
50 |
class MyClient(discord.Client):
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
async def on_ready(self):
|
52 |
logging.info(f'{self.user}λ‘ λ‘κ·ΈμΈλμμ΅λλ€!')
|
|
|
|
|
53 |
|
54 |
async def on_message(self, message):
|
55 |
if message.author == self.user:
|
|
|
62 |
logging.debug(f'λ²μλ ν둬ννΈ: {prompt_en}')
|
63 |
logging.debug(f'κ³ μ λ λ€κ±°ν°λΈ ν둬ννΈ: {negative_prompt}')
|
64 |
|
65 |
+
image_url = generate_image(prompt_en, negative_prompt)
|
66 |
+
user_id = message.author.id
|
67 |
+
if image_url:
|
68 |
+
await message.channel.send(
|
69 |
+
f"<@{user_id}> λμ΄ μμ²νμ μ΄λ―Έμ§μ
λλ€: {image_url}"
|
70 |
+
)
|
71 |
+
else:
|
72 |
+
await message.channel.send(f"<@{user_id}> μ΄λ―Έμ§ μμ±μ μ€ν¨νμμ΅λλ€.")
|
73 |
finally:
|
74 |
self.is_processing = False
|
75 |
+
else:
|
76 |
+
# "!image" λͺ
λ Ήμ΄κ° μλ κ²½μ° μλ΄ λ©μμ§ μ μ‘
|
77 |
+
await message.channel.send('μ¬λ°λ₯Έ λͺ
λ Ήμ΄λ₯Ό μ
λ ₯ν΄ μ£ΌμΈμ. μ) "!image κ·μ¬μ΄ κ³ μμ΄κ° μ μ μκ³ μλ€." λ±μΌλ‘ μ
λ ₯νμλ©΄ μ΄λ―Έμ§κ° μμ±λ©λλ€.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
# λμ€μ½λ ν ν° λ° λ΄ μ€ν
|
80 |
if __name__ == "__main__":
|