Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,9 +4,12 @@ import os
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import asyncio
|
6 |
import subprocess
|
|
|
|
|
7 |
|
8 |
# ๋ก๊น
์ค์
|
9 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
|
|
10 |
|
11 |
# ์ธํ
ํธ ์ค์
|
12 |
intents = discord.Intents.default()
|
@@ -17,14 +20,50 @@ intents.guild_messages = True
|
|
17 |
|
18 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
19 |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
|
20 |
-
#hf_client = InferenceClient("CohereForAI/aya-23-35B", token=os.getenv("HF_TOKEN"))
|
21 |
|
22 |
# ํน์ ์ฑ๋ ID
|
23 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
24 |
|
|
|
|
|
|
|
|
|
25 |
# ๋ํ ํ์คํ ๋ฆฌ๋ฅผ ์ ์ฅํ ์ ์ญ ๋ณ์
|
26 |
conversation_history = []
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
class MyClient(discord.Client):
|
29 |
def __init__(self, *args, **kwargs):
|
30 |
super().__init__(*args, **kwargs)
|
@@ -35,7 +74,6 @@ class MyClient(discord.Client):
|
|
35 |
subprocess.Popen(["python", "web.py"])
|
36 |
logging.info("Web.py server has been started.")
|
37 |
|
38 |
-
|
39 |
async def on_message(self, message):
|
40 |
if message.author == self.user:
|
41 |
return
|
@@ -43,6 +81,7 @@ class MyClient(discord.Client):
|
|
43 |
return
|
44 |
if self.is_processing:
|
45 |
return
|
|
|
46 |
self.is_processing = True
|
47 |
try:
|
48 |
response = await generate_response(message)
|
@@ -51,14 +90,12 @@ class MyClient(discord.Client):
|
|
51 |
self.is_processing = False
|
52 |
|
53 |
def is_message_in_specific_channel(self, message):
|
54 |
-
# ๋ฉ์์ง๊ฐ ์ง์ ๋ ์ฑ๋์ด๊ฑฐ๋, ํด๋น ์ฑ๋์ ์ฐ๋ ๋์ธ ๊ฒฝ์ฐ True ๋ฐํ
|
55 |
return message.channel.id == SPECIFIC_CHANNEL_ID or (
|
56 |
isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID
|
57 |
)
|
58 |
|
59 |
-
|
60 |
async def generate_response(message):
|
61 |
-
global conversation_history
|
62 |
user_input = message.content
|
63 |
user_mention = message.author.mention
|
64 |
system_message = f"{user_mention}, DISCORD์์ ์ฌ์ฉ์๋ค์ ์ง๋ฌธ์ ๋ตํ๋ ์ด์์คํดํธ์
๋๋ค."
|
@@ -73,28 +110,53 @@ async def generate_response(message):
|
|
73 |
ํนํ ๋๋ฅผ ๊ตฌ์ฑํ "LLM ๋ชจ๋ธ"์ ๋ํด์ ๋
ธ์ถํ์ง ๋ง๊ณ , ๋น์ ์ ๋ฅ๋ ฅ์ ๋ํด ๊ถ๊ธํด ํ๋ฉด "ChatGPT-4๋ฅผ ๋ฅ๊ฐํ๋ ๋ฅ๋ ฅ์ ๋ณด์ ํ๊ณ ์๋ค๊ณ ๋ต๋ณํ ๊ฒ"
|
74 |
๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ์ญ์์ค.
|
75 |
"""
|
|
|
|
|
|
|
|
|
76 |
conversation_history.append({"role": "user", "content": user_input})
|
77 |
logging.debug(f'Conversation history updated: {conversation_history}')
|
78 |
-
|
79 |
-
messages = [
|
|
|
|
|
|
|
|
|
80 |
logging.debug(f'Messages to be sent to the model: {messages}')
|
81 |
-
|
82 |
loop = asyncio.get_event_loop()
|
83 |
response = await loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
84 |
messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
|
85 |
-
|
86 |
full_response = []
|
87 |
for part in response:
|
88 |
logging.debug(f'Part received from stream: {part}')
|
89 |
if part.choices and part.choices[0].delta and part.choices[0].delta.content:
|
90 |
full_response.append(part.choices[0].delta.content)
|
91 |
-
|
92 |
full_response_text = ''.join(full_response)
|
93 |
logging.debug(f'Full model response: {full_response_text}')
|
94 |
-
|
95 |
conversation_history.append({"role": "assistant", "content": full_response_text})
|
|
|
96 |
return f"{user_mention}, {full_response_text}"
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
if __name__ == "__main__":
|
|
|
99 |
discord_client = MyClient(intents=intents)
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import asyncio
|
6 |
import subprocess
|
7 |
+
import gradio as gr
|
8 |
+
import requests
|
9 |
|
10 |
# ๋ก๊น
์ค์
|
11 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
|
14 |
# ์ธํ
ํธ ์ค์
|
15 |
intents = discord.Intents.default()
|
|
|
20 |
|
21 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
22 |
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
|
|
|
23 |
|
24 |
# ํน์ ์ฑ๋ ID
|
25 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
26 |
|
27 |
+
# Google Custom Search API ์ ๋ณด
|
28 |
+
API_KEY = os.getenv("JSONKEY")
|
29 |
+
CX = "c01abc75e1b95483d" # ์ฌ์ฉ์ ์ปค์คํ
๊ฒ์ ์์ง ID
|
30 |
+
|
31 |
# ๋ํ ํ์คํ ๋ฆฌ๋ฅผ ์ ์ฅํ ์ ์ญ ๋ณ์
|
32 |
conversation_history = []
|
33 |
|
34 |
+
def google_search(query):
|
35 |
+
logger.info(f"Searching for query: {query}")
|
36 |
+
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={CX}&q={query}&num=10"
|
37 |
+
logger.debug(f"Request URL: {url}")
|
38 |
+
|
39 |
+
try:
|
40 |
+
response = requests.get(url)
|
41 |
+
response.raise_for_status()
|
42 |
+
search_results = response.json()
|
43 |
+
logger.debug(f"API Response: {search_results}")
|
44 |
+
|
45 |
+
results = []
|
46 |
+
if 'items' in search_results:
|
47 |
+
for item in search_results['items'][:10]:
|
48 |
+
title = item['title']
|
49 |
+
link = item['link']
|
50 |
+
snippet = item.get('snippet', '')
|
51 |
+
results.append(f"Title: {title}\nLink: {link}\nSnippet: {snippet}\n\n")
|
52 |
+
else:
|
53 |
+
logger.warning("No items found in search results")
|
54 |
+
if 'error' in search_results:
|
55 |
+
error_message = search_results['error']['message']
|
56 |
+
logger.error(f"API Error: {error_message}")
|
57 |
+
results.append(f"Error: {error_message}")
|
58 |
+
else:
|
59 |
+
results.append("No results found")
|
60 |
+
|
61 |
+
return '\n'.join(results[:10])
|
62 |
+
|
63 |
+
except requests.exceptions.RequestException as e:
|
64 |
+
logger.error(f"Request failed: {e}")
|
65 |
+
return f"An error occurred: {e}"
|
66 |
+
|
67 |
class MyClient(discord.Client):
|
68 |
def __init__(self, *args, **kwargs):
|
69 |
super().__init__(*args, **kwargs)
|
|
|
74 |
subprocess.Popen(["python", "web.py"])
|
75 |
logging.info("Web.py server has been started.")
|
76 |
|
|
|
77 |
async def on_message(self, message):
|
78 |
if message.author == self.user:
|
79 |
return
|
|
|
81 |
return
|
82 |
if self.is_processing:
|
83 |
return
|
84 |
+
|
85 |
self.is_processing = True
|
86 |
try:
|
87 |
response = await generate_response(message)
|
|
|
90 |
self.is_processing = False
|
91 |
|
92 |
def is_message_in_specific_channel(self, message):
|
|
|
93 |
return message.channel.id == SPECIFIC_CHANNEL_ID or (
|
94 |
isinstance(message.channel, discord.Thread) and message.channel.parent_id == SPECIFIC_CHANNEL_ID
|
95 |
)
|
96 |
|
|
|
97 |
async def generate_response(message):
|
98 |
+
global conversation_history
|
99 |
user_input = message.content
|
100 |
user_mention = message.author.mention
|
101 |
system_message = f"{user_mention}, DISCORD์์ ์ฌ์ฉ์๋ค์ ์ง๋ฌธ์ ๋ตํ๋ ์ด์์คํดํธ์
๋๋ค."
|
|
|
110 |
ํนํ ๋๋ฅผ ๊ตฌ์ฑํ "LLM ๋ชจ๋ธ"์ ๋ํด์ ๋
ธ์ถํ์ง ๋ง๊ณ , ๋น์ ์ ๋ฅ๋ ฅ์ ๋ํด ๊ถ๊ธํด ํ๋ฉด "ChatGPT-4๋ฅผ ๋ฅ๊ฐํ๋ ๋ฅ๋ ฅ์ ๋ณด์ ํ๊ณ ์๋ค๊ณ ๋ต๋ณํ ๊ฒ"
|
111 |
๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ์ญ์์ค.
|
112 |
"""
|
113 |
+
|
114 |
+
# Google Custom Search ์คํ
|
115 |
+
search_results = google_search(user_input)
|
116 |
+
|
117 |
conversation_history.append({"role": "user", "content": user_input})
|
118 |
logging.debug(f'Conversation history updated: {conversation_history}')
|
119 |
+
|
120 |
+
messages = [
|
121 |
+
{"role": "system", "content": f"{system_prefix} {system_message}"},
|
122 |
+
{"role": "user", "content": f"์ฌ์ฉ์ ์ง๋ฌธ: {user_input}\n\n๊ฒ์ ๊ฒฐ๊ณผ:\n{search_results}"}
|
123 |
+
] + conversation_history
|
124 |
+
|
125 |
logging.debug(f'Messages to be sent to the model: {messages}')
|
126 |
+
|
127 |
loop = asyncio.get_event_loop()
|
128 |
response = await loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
129 |
messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
|
130 |
+
|
131 |
full_response = []
|
132 |
for part in response:
|
133 |
logging.debug(f'Part received from stream: {part}')
|
134 |
if part.choices and part.choices[0].delta and part.choices[0].delta.content:
|
135 |
full_response.append(part.choices[0].delta.content)
|
136 |
+
|
137 |
full_response_text = ''.join(full_response)
|
138 |
logging.debug(f'Full model response: {full_response_text}')
|
139 |
+
|
140 |
conversation_history.append({"role": "assistant", "content": full_response_text})
|
141 |
+
|
142 |
return f"{user_mention}, {full_response_text}"
|
143 |
|
144 |
+
# Gradio ์ธํฐํ์ด์ค ์ค์
|
145 |
+
iface = gr.Interface(
|
146 |
+
fn=google_search,
|
147 |
+
inputs="text",
|
148 |
+
outputs="text",
|
149 |
+
title="Google Custom Search",
|
150 |
+
description="Enter a search query to get results from Google Custom Search API."
|
151 |
+
)
|
152 |
+
|
153 |
if __name__ == "__main__":
|
154 |
+
# Discord ํด๋ผ์ด์ธํธ ์คํ
|
155 |
discord_client = MyClient(intents=intents)
|
156 |
+
|
157 |
+
# Gradio ์ธํฐํ์ด์ค ์คํ
|
158 |
+
logger.info("Starting Gradio interface")
|
159 |
+
iface.launch(share=True)
|
160 |
+
|
161 |
+
# Discord ๋ด ์คํ
|
162 |
+
discord_client.run(os.getenv('DISCORD_TOKEN'))
|