Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,78 +1,566 @@
|
|
|
|
1 |
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
import
|
|
|
|
|
|
|
|
|
6 |
import os
|
|
|
|
|
7 |
import requests
|
|
|
|
|
8 |
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
Constructs the input prompt string from the chatbot interactions and the current message.
|
17 |
-
"""
|
18 |
-
input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
|
19 |
-
for interaction in chatbot:
|
20 |
-
input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
|
21 |
|
22 |
-
|
23 |
-
|
|
|
24 |
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
"""
|
30 |
-
response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
|
31 |
-
response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
|
32 |
-
return response.json()
|
33 |
|
|
|
|
|
34 |
|
35 |
-
def predict_beta(message, chatbot=[], system_prompt=""):
|
36 |
-
input_prompt = build_input_prompt(message, chatbot, system_prompt)
|
37 |
-
data = {
|
38 |
-
"inputs": input_prompt
|
39 |
-
}
|
40 |
|
|
|
|
|
41 |
try:
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
|
3 |
+
# Record the start time
|
4 |
+
start_time = time.time()
|
5 |
+
from gradio_client import Client as GRCLIENT
|
6 |
+
import sys
|
7 |
+
import discord
|
8 |
+
from discord.ext import commands
|
9 |
+
import json
|
10 |
+
import difflib
|
11 |
import os
|
12 |
+
import asyncio
|
13 |
+
import io
|
14 |
import requests
|
15 |
+
import re
|
16 |
+
from PIL import Image
|
17 |
|
18 |
+
## Load pre-trained model and tokenizer
|
19 |
+
#model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
|
20 |
+
#tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
|
21 |
+
#
|
22 |
+
## Load the content from the file
|
23 |
+
#with open('textfile.txt', 'r') as file:
|
24 |
+
# context = file.read()
|
25 |
|
26 |
+
def setup():
|
27 |
+
os.environ["DISCORD_BOT_TOKEN"] = "MTA0NTMwODMyMzc2MDcxMzc0OA.GsS5dR.yBjbLrDeCFVX96yG4zzVT_rqw7tOhJm5qPQAzc"
|
28 |
+
os.environ["TRAINING_PASSWORD"] = "train4545"
|
29 |
+
os.environ["HUGGINGFACE_TOKEN"] = "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"
|
30 |
+
print("settuped")
|
31 |
+
setup()
|
32 |
|
33 |
+
global bot_has_replied
|
34 |
+
bot_has_replied = False
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
# Define the intents
|
37 |
+
intents = discord.Intents.default()
|
38 |
+
intents.message_content = True
|
39 |
|
40 |
+
# Initialize a bot instance with intents and a prefix
|
41 |
+
bot = commands.Bot(command_prefix="!", intents=intents)
|
42 |
|
43 |
+
# Load settings from settings.json
|
44 |
+
with open('settings.json', 'r') as settings_file:
|
45 |
+
settings = json.load(settings_file)
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
# Retrieve the values from the loaded settings
|
48 |
+
developer_ids = settings.get("developer_ids", [])
|
49 |
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
# Event to perform when the bot is ready
|
52 |
+
async def read_and_calculate(filename):
|
53 |
try:
|
54 |
+
with open(filename, 'r') as file:
|
55 |
+
content = file.read()
|
56 |
+
# Assuming the content of the file is a number (e.g., 377526)
|
57 |
+
number_from_file = len(content)
|
58 |
+
# Perform your calculations here
|
59 |
+
result = number_from_file / 5 # Just an example calculation
|
60 |
+
return result
|
61 |
+
except FileNotFoundError:
|
62 |
+
print(f"File '{filename}' not found.")
|
63 |
+
except ValueError:
|
64 |
+
print(f"Error reading content from '{filename}'. Make sure it contains a valid number.")
|
65 |
+
|
66 |
+
@bot.event
|
67 |
+
async def on_ready():
|
68 |
+
print(f'Logged in as {bot.user.name}')
|
69 |
+
|
70 |
+
# Read and calculate from the text file
|
71 |
+
result = await read_and_calculate('textfile.txt')
|
72 |
+
if result is not None:
|
73 |
+
new_bot_name = f"ChattyBot [{result:.2f} tkn]" # Format the result to 2 decimal places
|
74 |
+
await bot.user.edit(username=new_bot_name)
|
75 |
+
#print(f"Calculated result: {result}. Bot name set to {new_bot_name}")
|
76 |
+
|
77 |
+
await bot.change_presence(activity=discord.Game(name="!info for help."))
|
78 |
+
|
79 |
+
# Start the tasks
|
80 |
+
bot.loop.create_task(print_time_left())
|
81 |
+
#bot.loop.create_task(send_sleep_message())
|
82 |
+
|
83 |
+
async def send_wake_message():
|
84 |
+
for guild in bot.guilds:
|
85 |
+
for channel in guild.text_channels:
|
86 |
+
if "ai-chat" in channel.name.lower():
|
87 |
+
await channel.send("I am awake and ready to chat!")
|
88 |
+
|
89 |
+
# Calculate the time remaining for the countdown
|
90 |
+
def get_time_remaining():
|
91 |
+
elapsed_time = time.time() - start_time
|
92 |
+
desired_countdown_time = 29 * 60 # 29 minutes in seconds
|
93 |
+
time_remaining = max(0, desired_countdown_time - elapsed_time)
|
94 |
+
return time_remaining
|
95 |
+
|
96 |
+
# Add a task to send the "going to sleep" message after the remaining time
|
97 |
+
async def send_sleep_message():
|
98 |
+
time_remaining = get_time_remaining()
|
99 |
+
await asyncio.sleep(time_remaining)
|
100 |
+
await send_sleep_announcement()
|
101 |
+
|
102 |
+
async def send_sleep_announcement():
|
103 |
+
for guild in bot.guilds:
|
104 |
+
for channel in guild.text_channels:
|
105 |
+
if "ai-chat" in channel.name.lower():
|
106 |
+
await channel.send("I'm going to sleep in 1 minute. Bye everyone!")
|
107 |
+
|
108 |
+
# Task to print the time left every 5 minutes
|
109 |
+
async def print_time_left():
|
110 |
+
while True:
|
111 |
+
time_remaining = get_time_remaining()
|
112 |
+
print(f"Time left: {time_remaining / 60:.1f} minutes")
|
113 |
+
await asyncio.sleep(5 * 60) # Sleep for 5 minutes
|
114 |
+
|
115 |
+
|
116 |
+
@bot.event
|
117 |
+
async def on_disconnect():
|
118 |
+
await send_sleep_message()
|
119 |
+
|
120 |
+
@bot.command()
|
121 |
+
async def info(ctx):
|
122 |
+
bot_name = bot.user.name
|
123 |
+
response = f"**Info**\n"
|
124 |
+
response += f"Hello, my name is {bot_name}!\n"
|
125 |
+
response += f"You can use !cmds for a list of commands.\n"
|
126 |
+
response += f"If you want to talk with me, please go to #ai_chat."
|
127 |
+
|
128 |
+
await ctx.send(response)
|
129 |
+
|
130 |
+
|
131 |
+
@bot.command()
|
132 |
+
async def teach(ctx):
|
133 |
+
def check(author):
|
134 |
+
def inner_check(message):
|
135 |
+
return message.author == author and message.channel == ctx.channel
|
136 |
+
return inner_check
|
137 |
+
|
138 |
+
await ctx.send("Quick info: The teach command creates new intents that can be used to train the bot.")
|
139 |
+
await ctx.send("Quick info: Currently, it's available for everyone to use, but this might be changed if abused.")
|
140 |
+
await ctx.send("Q: Would you like to continue? (yes/no)")
|
141 |
+
|
142 |
+
try:
|
143 |
+
response = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
144 |
+
|
145 |
+
if response.content.lower() != 'yes':
|
146 |
+
await ctx.send("Teaching canceled.")
|
147 |
+
return
|
148 |
+
|
149 |
+
await ctx.send("Q: What topic would you like to teach me?")
|
150 |
+
topic = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
151 |
+
|
152 |
+
await ctx.send("Q: How many responses would you like me to pick from?")
|
153 |
+
num_responses = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
154 |
+
|
155 |
+
response_count = int(num_responses.content)
|
156 |
+
if response_count <= 0:
|
157 |
+
await ctx.send("Please specify a valid number of responses.")
|
158 |
+
return
|
159 |
+
|
160 |
+
responses = []
|
161 |
+
for i in range(response_count):
|
162 |
+
await ctx.send(f"Q: What would you like me to say for response {i + 1}?")
|
163 |
+
response = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
164 |
+
responses.append(response.content)
|
165 |
+
|
166 |
+
await ctx.send("Q: How many patterns would you like to provide?")
|
167 |
+
num_patterns = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
168 |
+
|
169 |
+
pattern_count = int(num_patterns.content)
|
170 |
+
if pattern_count <= 0:
|
171 |
+
await ctx.send("Please specify a valid number of patterns.")
|
172 |
+
return
|
173 |
+
|
174 |
+
patterns = []
|
175 |
+
for i in range(pattern_count):
|
176 |
+
await ctx.send(f"Q: Please provide question for what I should say {i + 1}:")
|
177 |
+
pattern = await bot.wait_for('message', check=check(ctx.author), timeout=60.0)
|
178 |
+
patterns.append(pattern.content)
|
179 |
+
|
180 |
+
await ctx.send("Thank you for teaching me!")
|
181 |
+
|
182 |
+
# You can use the 'topic', 'responses', and 'patterns' variables to create new intents and train the bot.
|
183 |
+
result = make_intents.make_intent(topic.content, patterns, responses)
|
184 |
+
#await ctx.send(result)
|
185 |
+
|
186 |
+
except asyncio.TimeoutError:
|
187 |
+
await ctx.send("Teaching session timed out. Please start over if you'd like to continue.")
|
188 |
+
|
189 |
+
except ValueError:
|
190 |
+
await ctx.send("Invalid input. Please provide valid numbers for responses and patterns.")
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
# Command to train the model
|
195 |
+
@bot.command()
|
196 |
+
async def train_model(ctx):
|
197 |
+
# Check if the provided password is correct
|
198 |
+
password = os.getenv("TRAINING_PASSWORD")
|
199 |
+
if password is None or ctx.message.content != f"!train_model {password}":
|
200 |
+
await ctx.send("Invalid password!")
|
201 |
+
return
|
202 |
+
await ctx.send("Auth: valid password")
|
203 |
+
if ctx.message.author.id not in developer_ids:
|
204 |
+
await ctx.send("You must be a developer to do this.")
|
205 |
+
return
|
206 |
+
await ctx.send("Auth: ai developer :white_check_mark:")
|
207 |
+
trainnltk.train()
|
208 |
+
await ctx.send("Created a new model and trainied it.")
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
# Command to list the current settings
|
213 |
+
@bot.command()
|
214 |
+
async def settings_list(ctx):
|
215 |
+
if ctx.message.author.id not in developer_ids:
|
216 |
+
await ctx.send("You must be a developer to do this.")
|
217 |
+
return
|
218 |
+
response = f"**Current Settings**\n"
|
219 |
+
response += f"Developer IDs: {', '.join(map(str, developer_ids))}"
|
220 |
+
|
221 |
+
await ctx.send(response)
|
222 |
+
|
223 |
+
|
224 |
+
@bot.command()
|
225 |
+
async def cmds(ctx):
|
226 |
+
#commands_list = []
|
227 |
+
#for index, command in enumerate(bot.commands, start=1):
|
228 |
+
# # Exclude the "help" command from the list
|
229 |
+
# if command.name != 'help':
|
230 |
+
# commands_list.append(f"{index}. {command.name}")
|
231 |
+
|
232 |
+
#response = f"**Commands List**\n" + "\n".join(commands_list)
|
233 |
+
response = """**Commands List**\n \n!cmds, !info, !dev <>, !restart, !generate_image <>, !ping"""
|
234 |
+
await ctx.send(response)
|
235 |
+
|
236 |
+
@bot.command()
|
237 |
+
async def ping(ctx):
|
238 |
+
latency = bot.latency * 1000 # Convert to milliseconds
|
239 |
+
await ctx.send(f"Pong! Latency: {latency:.2f}ms")
|
240 |
+
|
241 |
+
@bot.command()
|
242 |
+
async def restart(ctx):
|
243 |
+
if ctx.message.author.id not in developer_ids:
|
244 |
+
await ctx.send("You must be a developer to do this.")
|
245 |
+
return
|
246 |
+
await ctx.send("Restarting... (developer)")
|
247 |
+
os.execl(sys.executable, sys.executable, *sys.argv)
|
248 |
+
|
249 |
+
# Command to change a specific setting
|
250 |
+
@bot.command()
|
251 |
+
async def setting(ctx, name, *, new_value):
|
252 |
+
# Declare global variable at the beginning of the function
|
253 |
+
global developer_ids
|
254 |
+
|
255 |
+
if ctx.message.author.id not in developer_ids:
|
256 |
+
await ctx.send("You must be a developer to do this.")
|
257 |
+
return
|
258 |
+
|
259 |
+
|
260 |
+
if name.lower() == 'developer_ids':
|
261 |
+
developer_ids = json.loads(new_value)
|
262 |
+
else:
|
263 |
+
await ctx.send(f"Setting '{name}' does not exist.")
|
264 |
+
|
265 |
+
await ctx.send(f"Setting '{name}' updated to '{new_value}'.")
|
266 |
+
|
267 |
+
|
268 |
+
@bot.command()
|
269 |
+
async def chat(ctx, *, msg):
|
270 |
+
# Declare global variable at the beginning of the function
|
271 |
+
global developer_ids
|
272 |
+
|
273 |
+
# Check if the user has the developer role or is in the developer_ids list
|
274 |
+
if ctx.message.author.id not in developer_ids:
|
275 |
+
await ctx.send("You must be a developer to use this command.")
|
276 |
+
return
|
277 |
+
msgs = chatbot(msg)
|
278 |
+
# Replace the following line with your desired bot response logic
|
279 |
+
bot_response = f"{msgs}"
|
280 |
+
|
281 |
+
# Send the bot's response
|
282 |
+
await ctx.send(bot_response)
|
283 |
+
|
284 |
+
@bot.command()
|
285 |
+
async def dev(ctx, *, message):
|
286 |
+
if ctx.message.author.id not in developer_ids:
|
287 |
+
await ctx.send("You must be a developer to use this command.")
|
288 |
+
return
|
289 |
+
|
290 |
+
for channel in bot.get_all_channels():
|
291 |
+
if isinstance(channel, discord.TextChannel) and "ai-chat" in channel.name.lower():
|
292 |
+
await channel.send(message)
|
293 |
+
print(f'Message sent in server {channel.guild.name} in channel {channel.name}')
|
294 |
+
|
295 |
+
|
296 |
+
|
297 |
+
|
298 |
+
async def generate_image(text, message):
|
299 |
+
# Generate the image URL
|
300 |
+
image_link = f'https://image.pollinations.ai/prompt/{text}'
|
301 |
+
|
302 |
+
# Download the image
|
303 |
+
response = requests.get(image_link)
|
304 |
+
image_data = response.content
|
305 |
+
|
306 |
+
# Send the image as an attachment
|
307 |
+
image_file = discord.File(io.BytesIO(image_data), filename='generated_image.png')
|
308 |
+
|
309 |
+
# Remove watermark by cropping the image
|
310 |
+
image = Image.open(io.BytesIO(image_data))
|
311 |
+
width, height = image.size
|
312 |
+
left = 0
|
313 |
+
top = 0
|
314 |
+
right = width - 100 # Adjust to remove the watermark
|
315 |
+
bottom = height - 100 # Adjust to remove the watermark
|
316 |
+
cropped_image = image.crop((left, top, right, bottom))
|
317 |
+
|
318 |
+
# Save the cropped image
|
319 |
+
cropped_image.save('cropped_image.png')
|
320 |
+
|
321 |
+
# Send the cropped image as an attachment
|
322 |
+
cropped_file = discord.File('cropped_image.png')
|
323 |
+
await message.reply("Here is your requested image:")
|
324 |
+
await message.reply(file=cropped_file)
|
325 |
+
os.remove('cropped_image.png')
|
326 |
+
|
327 |
+
# Main chatbot loop
|
328 |
+
chatbot_history = []
|
329 |
+
@bot.event
|
330 |
+
async def on_message(message):
|
331 |
+
if message.author == bot.user:
|
332 |
+
return
|
333 |
+
|
334 |
+
# ------------------------------------------------------------------
|
335 |
+
|
336 |
+
content = message.content.lower()
|
337 |
+
|
338 |
+
#if content.startswith("!train_model"):
|
339 |
+
#await bot.process_commands(message)
|
340 |
+
#elif content == "!settings_list":
|
341 |
+
# await bot.process_commands(message)
|
342 |
+
if content == "!cmds":
|
343 |
+
await bot.process_commands(message)
|
344 |
+
elif content == "!info":
|
345 |
+
await bot.process_commands(message)
|
346 |
+
#elif content == "!teach":
|
347 |
+
# await bot.process_commands(message)
|
348 |
+
#elif content.startswith("!chat "):
|
349 |
+
# await bot.process_commands(message)
|
350 |
+
#elif content.startswith("!setting"):
|
351 |
+
# await bot.process_commands(message)
|
352 |
+
elif content == "!ping":
|
353 |
+
await bot.process_commands(message)
|
354 |
+
elif content == "!restart":
|
355 |
+
await bot.process_commands(message)
|
356 |
+
elif message.content.startswith('!generate_image '):
|
357 |
+
text = message.content[len('!generate_image '):]
|
358 |
+
await generate_image(text, message)
|
359 |
+
return
|
360 |
+
elif message.content.startswith('!dev '):
|
361 |
+
await bot.process_commands(message)
|
362 |
+
print("dev cmd")
|
363 |
+
|
364 |
|
365 |
+
# ------------------------------------------------------------------
|
366 |
+
|
367 |
+
# Calculate the Jaccard similarity between the channel name and "ai_chat"
|
368 |
+
channel_name = message.channel.name
|
369 |
+
similarity = difflib.SequenceMatcher(None, channel_name.lower(), "ai_chat").ratio()
|
370 |
+
|
371 |
+
# Check if the similarity is at least 90%
|
372 |
+
if similarity < 0.8:
|
373 |
+
print(f"Ignored message in channel {channel_name}: {message.content}")
|
374 |
+
return
|
375 |
+
|
376 |
+
user_input = message.content
|
377 |
+
print(f'Message received in server {message.guild.name} in channel {message.channel.name}: {user_input}')
|
378 |
+
|
379 |
+
|
380 |
+
async with message.channel.typing():
|
381 |
+
pattern = r'image: (.*?)'
|
382 |
+
image_descriptions = re.findall(pattern, user_input)
|
383 |
+
desc_limit = 1
|
384 |
+
for description in image_descriptions:
|
385 |
+
if desc_limit <= 0:
|
386 |
+
return
|
387 |
+
desc_limit -=1
|
388 |
+
# Generate the image URL
|
389 |
+
text = message.content
|
390 |
+
image_link = f'https://image.pollinations.ai/prompt/{text}'
|
391 |
+
|
392 |
+
# Download the image
|
393 |
+
response = requests.get(image_link)
|
394 |
+
image_data = response.content
|
395 |
+
|
396 |
+
# Send the image as an attachment
|
397 |
+
image_file = discord.File(io.BytesIO(image_data), filename='generated_image.png')
|
398 |
+
|
399 |
+
# Remove watermark by cropping the image
|
400 |
+
image = Image.open(io.BytesIO(image_data))
|
401 |
+
width, height = image.size
|
402 |
+
left = 0
|
403 |
+
top = 0
|
404 |
+
right = width - 100 # Adjust to remove the watermark
|
405 |
+
bottom = height - 100 # Adjust to remove the watermark
|
406 |
+
cropped_image = image.crop((left, top, right, bottom))
|
407 |
+
|
408 |
+
# Save the cropped image
|
409 |
+
cropped_image.save('cropped_image.png')
|
410 |
+
|
411 |
+
# Send the cropped image as an attachment
|
412 |
+
cropped_file = discord.File('cropped_image.png')
|
413 |
+
await message.reply("Here is your requested image:")
|
414 |
+
await message.reply(file=cropped_file)
|
415 |
+
os.remove('cropped_image.png')
|
416 |
+
print(description)
|
417 |
+
if not image_descriptions:
|
418 |
+
ot = await message.reply("generating..")
|
419 |
+
# Add a reaction to the message
|
420 |
+
await ot.add_reaction("β³") # You can use any emoji you prefer
|
421 |
+
SYSTEM_PROMPT = "As a generative chatbot (you are not a GPT but your structure is 50% the same), your primary function is to provide helpful and friendly responses to user queries. Feel free to add some personality, but make sure your responses are accurate and helpful. Your owner and developer is: @Costikoooo (Discord user) other developers are unknown. Your name is Chattybot."
|
422 |
+
|
423 |
+
zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
|
424 |
+
|
425 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
426 |
+
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
|
427 |
+
|
428 |
+
def build_input_prompt(message, chatbot, system_prompt):
|
429 |
+
input_prompt = "\n" + system_prompt + "</s>\n\n"
|
430 |
+
for interaction in chatbot:
|
431 |
+
input_prompt = input_prompt + str(interaction[0]) + "</s>\n\n" + str(interaction[1]) + "\n</s>\n\n"
|
432 |
+
|
433 |
+
input_prompt = input_prompt + str(message) + "</s>\n"
|
434 |
+
return input_prompt
|
435 |
+
|
436 |
+
def post_request_beta(payload):
|
437 |
+
response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
|
438 |
+
response.raise_for_status()
|
439 |
+
return response.json()
|
440 |
+
|
441 |
+
def predict_beta(message, chatbot=[], system_prompt=""):
|
442 |
+
input_prompt = build_input_prompt(message, chatbot, system_prompt)
|
443 |
+
data = {
|
444 |
+
"inputs": input_prompt
|
445 |
+
}
|
446 |
+
|
447 |
+
try:
|
448 |
+
response_data = post_request_beta(data)
|
449 |
+
json_obj = response_data[0]
|
450 |
+
|
451 |
+
if 'generated_text' in json_obj and len(json_obj['generated_text']) > 0:
|
452 |
+
bot_message = json_obj['generated_text']
|
453 |
+
return bot_message
|
454 |
+
elif 'error' in json_obj:
|
455 |
+
raise Exception(json_obj['error'] + ' Please refresh and try again with smaller input prompt')
|
456 |
+
else:
|
457 |
+
warning_msg = f"Unexpected response: {json_obj}"
|
458 |
+
raise Exception(warning_msg)
|
459 |
+
except requests.HTTPError as e:
|
460 |
+
error_msg = f"Request failed with status code {e.response.status_code}"
|
461 |
+
raise Exception(error_msg)
|
462 |
+
except json.JSONDecodeError as e:
|
463 |
+
error_msg = f"Failed to decode response as JSON: {str(e)}"
|
464 |
+
raise Exception(error_msg)
|
465 |
+
|
466 |
+
|
467 |
+
#client = GRCLIENT("https://wop-open-gpt-chattybot.hf.space/--replicas/l7cr5/")
|
468 |
+
|
469 |
+
async def oof():
|
470 |
+
try:
|
471 |
+
a = predict_beta(message.content, chatbot_history, SYSTEM_PROMPT)
|
472 |
+
return a
|
473 |
+
except Exception as error:
|
474 |
+
return "error"
|
475 |
+
|
476 |
+
result = await oof()
|
477 |
+
|
478 |
+
if not result == "error":
|
479 |
+
await ot.add_reaction("β
")
|
480 |
+
chatbot_history.append((message.content, result))
|
481 |
+
else:
|
482 |
+
await ot.add_reaction("β")
|
483 |
+
|
484 |
+
# Split the result into messages if it's more than 2000 characters
|
485 |
+
if len(result) > 2000:
|
486 |
+
# Split the result into chunks of 2000 characters
|
487 |
+
chunks = [result[i:i + 2000] for i in range(0, len(result), 2000)]
|
488 |
+
|
489 |
+
# Send each chunk as a separate message
|
490 |
+
for chunk in chunks:
|
491 |
+
await message.channel.send(chunk)
|
492 |
+
else:
|
493 |
+
# Edit the original message with the result
|
494 |
+
await ot.edit(content=result)
|
495 |
+
|
496 |
+
|
497 |
+
#try:
|
498 |
+
# with torch.no_grad():
|
499 |
+
# # Tokenize input
|
500 |
+
# tokens = tokenizer.tokenize(message.content + " " + context)
|
501 |
+
# input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
502 |
+
# input_ids = torch.tensor([input_ids])
|
503 |
+
#
|
504 |
+
# # Get answer start and end scores
|
505 |
+
# outputs = model(input_ids)
|
506 |
+
# answer_start_scores = outputs.start_logits
|
507 |
+
# answer_end_scores = outputs.end_logits
|
508 |
+
#
|
509 |
+
# # Get the most likely beginning and end of the answer
|
510 |
+
# answer_start = torch.argmax(answer_start_scores)
|
511 |
+
# answer_end = torch.argmax(answer_end_scores) + 1
|
512 |
+
#
|
513 |
+
# # Combine the tokens in the window and append to the final answer
|
514 |
+
# final_answer = ' '.join(tokens[answer_start:answer_end])
|
515 |
+
#
|
516 |
+
# # Check if the reaction has been added
|
517 |
+
# ot = await ot.channel.fetch_message(ot.id)
|
518 |
+
# reaction = next((r for r in ot.reactions if str(r.emoji) == 'π«'), None)
|
519 |
+
# if reaction and reaction.count > 1: # Reaction pressed by someone other than the bot
|
520 |
+
# # Handle the interruption if needed
|
521 |
+
# pass
|
522 |
+
#
|
523 |
+
# # Sleep for a short duration to avoid blocking the event loop
|
524 |
+
# await asyncio.sleep(0.1)
|
525 |
+
#
|
526 |
+
# # Edit the message with the final answer
|
527 |
+
# await ot.edit(content=final_answer.strip())
|
528 |
+
#
|
529 |
+
#except asyncio.CancelledError:
|
530 |
+
# # Handle cancellation if needed
|
531 |
+
# pass
|
532 |
+
#
|
533 |
+
##await sent_message.add_reaction('π') # Adds the "1" reaction
|
534 |
+
#await sent_message.add_reaction('π') # Adds the "2" reaction
|
535 |
+
# Reaction detection
|
536 |
+
# def check(reaction, user):
|
537 |
+
# return user == message.author and str(reaction.emoji) in ['π', 'π']
|
538 |
+
|
539 |
+
# try:
|
540 |
+
# reaction, user = await bot.wait_for('reaction_add', timeout=8, check=check)
|
541 |
+
|
542 |
+
# if str(reaction.emoji) == 'π':
|
543 |
+
# Perform the action when a thumbs up is added
|
544 |
+
# Add your code here
|
545 |
+
# fallback.operate_feedback("good", message.content, sent_message.content)
|
546 |
+
# print("thumbsup")
|
547 |
+
# elif str(reaction.emoji) == 'π':
|
548 |
+
# Perform the action when a thumbs down is added
|
549 |
+
# Add your code here
|
550 |
+
# fallback.operate_feedback("bad", message.content, sent_message.content)
|
551 |
+
# print("thumbsdown")
|
552 |
+
|
553 |
+
# except asyncio.TimeoutError:
|
554 |
+
# Remove reactions after 1 minute
|
555 |
+
# await sent_message.clear_reactions()
|
556 |
+
|
557 |
+
|
558 |
+
# ...
|
559 |
+
|
560 |
+
if __name__ == "__main__":
|
561 |
+
if os.getenv("DISCORD_BOT_TOKEN") is None:
|
562 |
+
print("Please set the DISCORD_BOT_TOKEN environment variable.")
|
563 |
+
else:
|
564 |
+
#keep_alive.keep_alive()
|
565 |
+
bot.run(os.getenv("DISCORD_BOT_TOKEN"))
|
566 |
+
|