Update app.py
Browse files
app.py
CHANGED
@@ -1,232 +1,449 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
-
|
|
|
|
|
4 |
from typing import Iterator
|
|
|
5 |
import google.generativeai as genai
|
6 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
#
|
9 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
10 |
genai.configure(api_key=GEMINI_API_KEY)
|
11 |
|
12 |
-
#
|
13 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
for message in messages:
|
22 |
-
# Skip thinking messages (messages with metadata)
|
23 |
-
if not (message.get("role") == "assistant" and "metadata" in message):
|
24 |
-
formatted_history.append({
|
25 |
-
"role": "user" if message.get("role") == "user" else "assistant",
|
26 |
-
"parts": [message.get("content", "")]
|
27 |
-
})
|
28 |
-
return formatted_history
|
29 |
-
|
30 |
-
def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
31 |
"""
|
32 |
-
|
|
|
33 |
"""
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
#
|
44 |
-
|
45 |
-
|
46 |
-
#
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
content="",
|
60 |
-
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
61 |
-
)
|
62 |
-
)
|
63 |
-
|
64 |
-
for chunk in response:
|
65 |
-
parts = chunk.candidates[0].content.parts
|
66 |
current_chunk = parts[0].text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
thought_buffer += current_chunk
|
71 |
-
print(f"\n=== Complete Thought ===\n{thought_buffer}")
|
72 |
-
|
73 |
-
messages[-1] = ChatMessage(
|
74 |
-
role="assistant",
|
75 |
-
content=thought_buffer,
|
76 |
-
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
77 |
-
)
|
78 |
-
yield messages
|
79 |
-
|
80 |
-
# Start response
|
81 |
-
response_buffer = parts[1].text
|
82 |
-
print(f"\n=== Starting Response ===\n{response_buffer}")
|
83 |
-
|
84 |
-
messages.append(
|
85 |
-
ChatMessage(
|
86 |
-
role="assistant",
|
87 |
-
content=response_buffer
|
88 |
-
)
|
89 |
-
)
|
90 |
-
thinking_complete = True
|
91 |
-
|
92 |
-
elif thinking_complete:
|
93 |
-
# Stream response
|
94 |
-
response_buffer += current_chunk
|
95 |
-
print(f"\n=== Response Chunk ===\n{current_chunk}")
|
96 |
-
|
97 |
-
messages[-1] = ChatMessage(
|
98 |
-
role="assistant",
|
99 |
-
content=response_buffer
|
100 |
-
)
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
111 |
-
)
|
112 |
-
#time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
|
113 |
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
messages.append(
|
121 |
-
ChatMessage(
|
122 |
-
role="assistant",
|
123 |
-
content=f"I apologize, but I encountered an error: {str(e)}"
|
124 |
-
)
|
125 |
-
)
|
126 |
-
yield messages
|
127 |
|
128 |
-
|
129 |
-
"""
|
130 |
-
history.append(ChatMessage(role="user", content=msg))
|
131 |
-
return "", history
|
132 |
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
|
|
|
|
137 |
|
138 |
-
|
139 |
-
gr.
|
140 |
-
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Faiqcamp-Gemini2-Flash-Thinking.hf.space&countColor=%23263759" />
|
141 |
-
</a>""")
|
142 |
|
143 |
-
|
144 |
chatbot = gr.Chatbot(
|
145 |
-
|
146 |
-
|
147 |
-
render_markdown=True
|
148 |
-
scale=1,
|
149 |
-
avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
|
150 |
)
|
151 |
|
152 |
-
with gr.Row(
|
153 |
-
|
154 |
-
|
155 |
-
label="
|
156 |
-
|
157 |
-
|
158 |
-
)
|
159 |
-
|
160 |
-
clear_button = gr.Button("Clear Chat", scale=1)
|
161 |
-
|
162 |
-
# Add example prompts - removed file upload examples. Kept text focused examples.
|
163 |
-
example_prompts = [
|
164 |
-
["Write a short poem about the sunset."],
|
165 |
-
["Explain the theory of relativity in simple terms."],
|
166 |
-
["If a train leaves Chicago at 6am traveling at 60mph, and another train leaves New York at 8am traveling at 80mph, at what time will they meet?"],
|
167 |
-
["Summarize the plot of Hamlet."],
|
168 |
-
["Write a haiku about a cat."]
|
169 |
-
]
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
)
|
177 |
|
|
|
|
|
178 |
|
179 |
-
|
180 |
-
msg_store = gr.State("") # Store for preserving user message
|
181 |
-
|
182 |
-
input_box.submit(
|
183 |
-
lambda msg: (msg, msg, ""), # Store message and clear input
|
184 |
-
inputs=[input_box],
|
185 |
-
outputs=[msg_store, input_box, input_box],
|
186 |
-
queue=False
|
187 |
-
).then(
|
188 |
-
user_message, # Add user message to chat
|
189 |
-
inputs=[msg_store, chatbot],
|
190 |
-
outputs=[input_box, chatbot],
|
191 |
-
queue=False
|
192 |
-
).then(
|
193 |
-
stream_gemini_response, # Generate and stream response
|
194 |
-
inputs=[msg_store, chatbot],
|
195 |
-
outputs=chatbot
|
196 |
-
)
|
197 |
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
203 |
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
|
210 |
-
You can observe the model's thought process as it generates responses, displayed with the "โ๏ธ Thinking" prefix.
|
211 |
-
|
212 |
-
**Try out the example prompts below to see Gemini in action!**
|
213 |
-
|
214 |
-
**Key Features:**
|
215 |
-
* Powered by Google's **Gemini 2.0 Flash** model.
|
216 |
-
* Shows the model's **thoughts** before the final answer (experimental feature).
|
217 |
-
* Supports **conversation history** for multi-turn chats.
|
218 |
-
* Uses **streaming** for a more interactive experience.
|
219 |
-
**Instructions:**
|
220 |
-
1. Type your message in the input box below or select an example.
|
221 |
-
2. Press Enter or click Submit to send.
|
222 |
-
3. Observe the chatbot's "Thinking" process followed by the final response.
|
223 |
-
4. Use the "Clear Chat" button to start a new conversation.
|
224 |
-
|
225 |
-
*Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
|
226 |
-
"""
|
227 |
)
|
228 |
|
|
|
|
|
|
|
|
|
229 |
|
230 |
-
# Launch the interface
|
231 |
if __name__ == "__main__":
|
232 |
-
demo.launch(debug=True)
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
import logging
|
6 |
from typing import Iterator
|
7 |
+
|
8 |
import google.generativeai as genai
|
9 |
+
from gradio import ChatMessage # ChatMessage ๊ตฌ์กฐ ์ฌ์ฉ (Thinking/Response ๊ตฌ๋ถ ๊ฐ๋ฅ)
|
10 |
+
|
11 |
+
logging.basicConfig(
|
12 |
+
level=logging.INFO,
|
13 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
14 |
+
handlers=[
|
15 |
+
logging.FileHandler("api_debug.log"),
|
16 |
+
logging.StreamHandler()
|
17 |
+
]
|
18 |
+
)
|
19 |
+
logger = logging.getLogger("idea_generator")
|
20 |
|
21 |
+
# Gemini API ํค ์ค์
|
22 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
23 |
genai.configure(api_key=GEMINI_API_KEY)
|
24 |
|
25 |
+
# ์ฌ์ฉํ Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋ฅ ํฌํจ)
|
26 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
27 |
|
28 |
+
##############################################################################
|
29 |
+
# ๋ณํ ๋ฌธ์์ด์์ ์ฌ๋์("/")๋ก ๊ตฌ๋ถ๋ ๋ ์ต์
์ค ํ๋ ์ ํ
|
30 |
+
##############################################################################
|
31 |
+
def choose_alternative(transformation):
|
32 |
+
if "/" not in transformation:
|
33 |
+
return transformation
|
34 |
+
parts = transformation.split("/")
|
35 |
+
if len(parts) != 2:
|
36 |
+
return random.choice([part.strip() for part in parts])
|
37 |
+
left = parts[0].strip()
|
38 |
+
right = parts[1].strip()
|
39 |
+
if " " in left:
|
40 |
+
tokens = left.split(" ", 1)
|
41 |
+
prefix = tokens[0]
|
42 |
+
if not right.startswith(prefix):
|
43 |
+
option1 = left
|
44 |
+
option2 = prefix + " " + right
|
45 |
+
else:
|
46 |
+
option1 = left
|
47 |
+
option2 = right
|
48 |
+
return random.choice([option1, option2])
|
49 |
+
else:
|
50 |
+
return random.choice([left, right])
|
51 |
+
|
52 |
+
##############################################################################
|
53 |
+
# ์นดํ
๊ณ ๋ฆฌ ์ฌ์ (์ผ๋ถ๋ง ๋ฐ์ท ๊ฐ๋ฅ. ์ฌ๊ธฐ์๋ ์์๋ก 3๊ฐ๋ง ์ ์ง)
|
54 |
+
##############################################################################
|
55 |
+
physical_transformation_categories = {
|
56 |
+
"๊ณต๊ฐ ์ด๋": [
|
57 |
+
"์/๋ค ์ด๋", "์ข/์ฐ ์ด๋", "์/์๋ ์ด๋", "์ธ๋ก์ถ ํ์ (๊ณ ๊ฐ ๋๋์)",
|
58 |
+
"๊ฐ๋ก์ถ ํ์ (๊ณ ๊ฐ ์ ๊ธฐ)", "๊ธธ์ด์ถ ํ์ (์์ผ๋ก ๊ธฐ์ธ์)", "์ ์ด๋", "๋์ ํ ์ด๋",
|
59 |
+
"๊ด์ฑ์ ์ํ ๋ฏธ๋๋ฌ์ง", "ํ์ ์ถ ๋ณํ", "๋ถ๊ท์น ํ์ ", "ํ๋ค๋ฆผ ์ด๋", "ํฌ๋ฌผ์ ์ด๋",
|
60 |
+
"๋ฌด์ค๋ ฅ ๋ถ์ ", "์๋ฉด ์ ๋ถ์ ", "์ ํ/๋์ฝ", "์ฌ๋ผ์ด๋ฉ", "๋กค๋ง", "์์ ๋ํ",
|
61 |
+
"์๋ณต ์ด๋", "ํ์ฑ ํ๊น", "๊ดํต", "ํํผ ์์ง์", "์ง๊ทธ์ฌ๊ทธ ์ด๋", "์ค์ ์ด๋"
|
62 |
+
],
|
63 |
+
|
64 |
+
"ํฌ๊ธฐ์ ํํ ๋ณํ": [
|
65 |
+
"๋ถํผ ๋์ด๋จ/์ค์ด๋ฆ", "๊ธธ์ด ๋์ด๋จ/์ค์ด๋ฆ", "๋๋น ๋์ด๋จ/์ค์ด๋ฆ", "๋์ด ๋์ด๋จ/์ค์ด๋ฆ",
|
66 |
+
"๋ฐ๋ ๋ณํ", "๋ฌด๊ฒ ์ฆ๊ฐ/๊ฐ์", "๋ชจ์ ๋ณํ", "์ํ ๋ณํ", "๋ถ๊ท ๋ฑ ๋ณํ",
|
67 |
+
"๋ณต์กํ ํํ ๋ณํ", "๋นํ๋ฆผ/๊ผฌ์", "๋ถ๊ท ์ผํ ํ์ฅ/์ถ์", "๋ชจ์๋ฆฌ ๋ฅ๊ธ๊ฒ/๋ ์นด๋กญ๊ฒ",
|
68 |
+
"๊นจ์ง/๊ฐ๋ผ์ง", "์ฌ๋ฌ ์กฐ๊ฐ ๋๋ ์ง", "๋ฌผ ์ ํญ", "๋จผ์ง ์ ํญ", "์ฐ๊ทธ๋ฌ์ง/๋ณต์",
|
69 |
+
"์ ํ/ํผ์ณ์ง", "์์ฐฉ/ํฝ์ฐฝ", "๋์ด๋จ/์์ถ", "๊ตฌ๊ฒจ์ง/ํํํด์ง", "๋ญ๊ฐ์ง/๋จ๋จํด์ง",
|
70 |
+
"๋ง๋ฆผ/ํด์ง", "๊บพ์/๊ตฌ๋ถ๋ฌ์ง"
|
71 |
+
],
|
72 |
+
|
73 |
+
"ํ๋ฉด ๋ฐ ์ธ๊ด ๋ณํ": [
|
74 |
+
"์์ ๋ณํ", "์ง๊ฐ ๋ณํ", "ํฌ๋ช
/๋ถํฌ๋ช
๋ณํ", "๋ฐ์ง์/๋ฌด๊ด ๋ณํ",
|
75 |
+
"๋น ๋ฐ์ฌ ์ ๋ ๋ณํ", "๋ฌด๋ฌ ๋ณํ", "๊ฐ๋์ ๋ฐ๋ฅธ ์์ ๋ณํ", "๋น์ ๋ฐ๋ฅธ ์์ ๋ณํ",
|
76 |
+
"์จ๋์ ๋ฐ๋ฅธ ์์ ๋ณํ", "ํ๋ก๊ทธ๋จ ํจ๊ณผ", "ํ๋ฉด ๊ฐ๋๋ณ ๋น ๋ฐ์ฌ", "ํ๋ฉด ๋ชจ์ ๋ณํ",
|
77 |
+
"์ด๋ฏธ์ธ ํ๋ฉด ๊ตฌ์กฐ ๋ณํ", "์๊ฐ ์ธ์ ํจ๊ณผ", "์ผ๋ฃฉ/ํจํด ์์ฑ", "ํ๋ฆผ/์ ๋ช
ํจ ๋ณํ",
|
78 |
+
"๊ดํ/์ค๊ธฐ ๋ณํ", "์์กฐ/์ฑ๋ ๋ณํ", "๋ฐ๊ด/ํ๊ด", "๋น ์ฐ๋ ํจ๊ณผ",
|
79 |
+
"๋น ํก์ ๋ณํ", "๋ฐํฌ๋ช
ํจ๊ณผ", "๊ทธ๋ฆผ์ ํจ๊ณผ ๋ณํ", "์์ธ์ ๋ฐ์ ๋ณํ",
|
80 |
+
"์ผ๊ด ํจ๊ณผ"
|
81 |
+
],
|
82 |
+
|
83 |
+
"๋ฌผ์ง์ ์ํ ๋ณํ": [
|
84 |
+
"๊ณ ์ฒด/์ก์ฒด/๊ธฐ์ฒด ์ ํ", "๊ฒฐ์ ํ/์ฉํด", "์ฐํ/๋ถ์", "๋ฑ๋ฑํด์ง/๋ถ๋๋ฌ์์ง",
|
85 |
+
"ํน์ ์ํ ์ ํ", "๋ฌด์ ํ/๊ฒฐ์ ํ ์ ํ", "์ฑ๋ถ ๋ถ๋ฆฌ", "๋ฏธ์ธ ์
์ ํ์ฑ/๋ถํด",
|
86 |
+
"์ ค ํ์ฑ/ํ์ด์ง", "์ค์์ ์ํ ๋ณํ", "๋ถ์ ์๊ฐ ์ ๋ ฌ/๋ถํด", "์ํ๋ณํ ์ง์ฐ ํ์",
|
87 |
+
"๋
น์", "๊ตณ์", "์ฆ๋ฐ/์์ถ", "์นํ/์ฆ์ฐฉ", "์นจ์ /๋ถ์ ", "๋ถ์ฐ/์์ง",
|
88 |
+
"๊ฑด์กฐ/์ต์ค", "ํฝ์ค/์์ถ", "๋๊ฒฐ/ํด๋", "ํํ/์นจ์", "์ถฉ์ /๋ฐฉ์ ",
|
89 |
+
"๊ฒฐํฉ/๋ถ๋ฆฌ", "๋ฐํจ/๋ถํจ"
|
90 |
+
],
|
91 |
+
|
92 |
+
"์ด ๊ด๋ จ ๋ณํ": [
|
93 |
+
"์จ๋ ์์น/ํ๊ฐ", "์ด์ ์ํ ํฝ์ฐฝ/์์ถ", "์ด ์ ๋ฌ/์ฐจ๋จ", "์๋ ฅ ์์น/ํ๊ฐ",
|
94 |
+
"์ด ๋ณํ์ ๋ฐ๋ฅธ ์ํ", "๋ฌด์ง์๋ ๋ณํ", "์ด์ ๊ธฐ ํ์", "์๊ธฐ์ฅ์ ์ํ ์ด ๋ณํ",
|
95 |
+
"์ํ๋ณ๏ฟฝ๏ฟฝ๏ฟฝ ์ค ์ด ์ ์ฅ/๋ฐฉ์ถ", "์ด ์คํธ๋ ์ค ๋ฐ์/ํด์", "๊ธ๊ฒฉํ ์จ๋ ๋ณํ ์ํฅ",
|
96 |
+
"๋ณต์ฌ์ด์ ์ํ ๋๊ฐ/๊ฐ์ด", "๋ฐ์ด/ํก์ด", "์ด ๋ถํฌ ๋ณํ", "์ด ๋ฐ์ฌ/ํก์",
|
97 |
+
"๋๊ฐ ์์ถ", "์ด ํ์ฑํ", "์ด ๋ณ์", "์ด ํฝ์ฐฝ ๊ณ์ ๋ณํ", "์ด ์์ ์ฑ ๋ณํ",
|
98 |
+
"๋ด์ด์ฑ/๋ดํ์ฑ", "์๊ธฐ๋ฐ์ด", "์ด์ ํํ/๋ถ๊ท ํ", "์ด์ ๋ณํ", "์ด ๋ถ์ฐ/์ง์ค"
|
99 |
+
],
|
100 |
+
|
101 |
+
"์์ง์ ํน์ฑ ๋ณํ": [
|
102 |
+
"๊ฐ์/๊ฐ์", "์ผ์ ์๋ ์ ์ง", "์ง๋/์ง๋ ๊ฐ์", "๋ถ๋ชํ/ํ๊น",
|
103 |
+
"ํ์ ์๋ ์ฆ๊ฐ/๊ฐ์", "ํ์ ๋ฐฉํฅ ๋ณํ", "๋ถ๊ท์น ์์ง์", "๋ฉ์ท๋ค ๋ฏธ๋๋ฌ์ง๋ ํ์",
|
104 |
+
"๊ณต์ง/๋ฐ๊ณต์ง", "์ ์ฒด ์ ์ ํญ/์๋ ฅ ๋ณํ", "์์ง์ ์ ํญ ๋ณํ", "๋ณตํฉ ์ง๋ ์์ง์",
|
105 |
+
"ํน์ ์ ์ฒด ์ ์์ง์", "ํ์ -์ด๋ ์ฐ๊ณ ์์ง์", "๊ด์ฑ ์ ์ง", "์ถฉ๊ฒฉ ํก์",
|
106 |
+
"์ถฉ๊ฒฉ ์ ๋ฌ", "์ด๋๋ ๋ณด์กด", "๋ง์ฐฐ๋ ฅ ๋ณํ", "๊ด์ฑ ํ์ถ", "๋ถ์์ ๊ท ํ",
|
107 |
+
"๋์ ์์ ์ฑ", "ํ๋ค๋ฆผ ๊ฐ์ ", "๊ฒฝ๋ก ์์ธก์ฑ", "ํํผ ์์ง์"
|
108 |
+
],
|
109 |
+
|
110 |
+
"๊ตฌ์กฐ์ ๋ณํ": [
|
111 |
+
"๋ถํ ์ถ๊ฐ/์ ๊ฑฐ", "์กฐ๋ฆฝ/๋ถํด", "์ ๊ธฐ/ํด๊ธฐ", "๋ณํ/์์๋ณต๊ตฌ", "์ต์ ๊ตฌ์กฐ ๋ณํ",
|
112 |
+
"์๊ฐ ์ฌ๋ฐฐ์ด", "์์ฐ ํจํด ํ์ฑ/์๋ฉธ", "๊ท์น์ ํจํด ๋ณํ", "๋ชจ๋์ ๋ณํ",
|
113 |
+
"๋ณต์ก์ฑ ์ฆ๊ฐ ๊ตฌ์กฐ", "์๋ ๋ชจ์ ๊ธฐ์ต ํจ๊ณผ", "์๊ฐ์ ๋ฐ๋ฅธ ํํ ๋ณํ", "๋ถ๋ถ ์ ๊ฑฐ",
|
114 |
+
"๋ถ๋ถ ๊ต์ฒด", "๊ฒฐํฉ", "๋ถ๋ฆฌ", "๋ถํ /ํตํฉ", "์ค์ฒฉ/๊ฒน์นจ", "๋ด๋ถ ๊ตฌ์กฐ ๋ณํ",
|
115 |
+
"์ธ๋ถ ๊ตฌ์กฐ ๋ณํ", "์ค์ฌ์ถ ์ด๋", "๊ท ํ์ ๋ณํ", "๊ณ์ธต ๊ตฌ์กฐ ๋ณํ", "์ง์ง ๊ตฌ์กฐ ๋ณํ",
|
116 |
+
"์๋ ฅ ๋ถ์ฐ ๊ตฌ์กฐ", "์ถฉ๊ฒฉ ํก์ ๊ตฌ์กฐ", "๊ทธ๋ฆฌ๋/๋งคํธ๋ฆญ์ค ๊ตฌ์กฐ ๋ณํ", "์ํธ ์ฐ๊ฒฐ์ฑ ๋ณํ"
|
117 |
+
],
|
118 |
+
|
119 |
+
"์ ๊ธฐ ๋ฐ ์๊ธฐ ๋ณํ": [
|
120 |
+
"์์ฑ ์์ฑ/์๋ฉธ", "์ ํ๋ ์ฆ๊ฐ/๊ฐ์", "์ ๊ธฐ์ฅ ์์ฑ/์๋ฉธ", "์๊ธฐ์ฅ ์์ฑ/์๋ฉธ",
|
121 |
+
"์ด์ ๋ ์ํ ์ ํ", "๊ฐ์ ์ ์ฒด ํน์ฑ ๋ณํ", "์์ ์ํ ๋ณํ", "ํ๋ผ์ฆ๋ง ์ํ ํ์ฑ/์๋ฉธ",
|
122 |
+
"์คํํ ์ ๋ฌ", "๋น์ ์ํ ์ ๊ธฐ ๋ฐ์", "์๋ ฅ์ ์ํ ์ ๊ธฐ ๋ฐ์", "์๊ธฐ์ฅ ์ ์ ๋ฅ ๋ณํ",
|
123 |
+
"์ ๊ธฐ ์ ํญ ๋ณํ", "์ ๊ธฐ ์ ๋์ฑ ๋ณํ", "์ ์ ๊ธฐ ๋ฐ์/๋ฐฉ์ ", "์ ์๊ธฐ ์ ๋",
|
124 |
+
"์ ์๊ธฐํ ๋ฐฉ์ถ/ํก์", "์ ๊ธฐ ์ฉ๋ ๋ณํ", "์๊ธฐ ์ด๋ ฅ ํ์", "์ ๊ธฐ์ ๋ถ๊ทน",
|
125 |
+
"์ ์ ํ๋ฆ ๋ฐฉํฅ ๋ณํ", "์ ๊ธฐ์ ๊ณต๋ช
", "์ ๊ธฐ์ ์ฐจํ/๋
ธ์ถ", "์๊ธฐ ์ฐจํ/๋
ธ์ถ",
|
126 |
+
"์๊ธฐ์ฅ ๋ฐฉํฅ ์ ๋ ฌ"
|
127 |
+
],
|
128 |
+
|
129 |
+
"ํํ์ ๋ณํ": [
|
130 |
+
"ํ๋ฉด ์ฝํ
๋ณํ", "๋ฌผ์ง ์ฑ๋ถ ๋ณํ", "ํํ ๋ฐ์ ๋ณํ", "์ด๋งค ์์ฉ ์์/์ค๋จ",
|
131 |
+
"๋น์ ์ํ ํํ ๋ฐ์", "์ ๊ธฐ์ ์ํ ํํ ๋ฐ์", "๋จ๋ถ์๋ง ํ์ฑ", "๋ถ์ ์์ค ๊ณ์ฐ ๋ณํ",
|
132 |
+
"์์ฐ ๋ชจ๋ฐฉ ํ๋ฉด ๋ณํ", "ํ๊ฒฝ ๋ฐ์ํ ๋ฌผ์ง ๋ณํ", "์ฃผ๊ธฐ์ ํํ ๋ฐ์", "์ฐํ", "ํ์",
|
133 |
+
"๊ณ ๋ถ์ํ", "๋ฌผ ๋ถํด", "ํํฉ", "๋ฐฉ์ฌ์ ์ํฅ", "์ฐ-์ผ๊ธฐ ๋ฐ์", "์คํ ๋ฐ์",
|
134 |
+
"์ด์จํ", "ํํ์ ํก์ฐฉ/ํ์ฐฉ", "์ด๋งค ํจ์จ ๋ณํ", "ํจ์ ํ์ฑ ๋ณํ", "๋ฐ์ ๋ฐ์",
|
135 |
+
"pH ๋ณํ", "ํํ์ ํํ ์ด๋", "๊ฒฐํฉ ํ์ฑ/๋ถํด", "์ฉํด๋ ๋ณํ"
|
136 |
+
],
|
137 |
+
|
138 |
+
"์๊ฐ ๊ด๋ จ ๋ณํ": [
|
139 |
+
"๋
ธํ/ํํ", "๋ง๋ชจ/๋ถ์", "์ ๋ฐ๋จ/๋ณ์", "์์/ํ๋ณต", "์๋ช
์ฃผ๊ธฐ ๋ณํ",
|
140 |
+
"์ฌ์ฉ์ ์ํธ์์ฉ์ ๋ฐ๋ฅธ ์ ์", "ํ์ต ๊ธฐ๋ฐ ํํ ์ต์ ํ", "์๊ฐ์ ๋ฐ๋ฅธ ๋ฌผ์ฑ ๋ณํ",
|
141 |
+
"์ง๋จ ๊ธฐ์ต ํจ๊ณผ", "๋ฌธํ์ ์๋ฏธ ๋ณํ", "์ง์ฐ ๋ฐ์", "์ด์ ์ํ ์์กด ๋ณํ",
|
142 |
+
"์ ์ง์ ์๊ฐ ๋ณํ", "์งํ์ ๋ณํ", "์ฃผ๊ธฐ์ ์ฌ์", "๊ณ์ ๋ณํ ์ ์",
|
143 |
+
"์์ฒด๋ฆฌ๋ฌ ๋ณํ", "์์ ์ฃผ๊ธฐ ๋จ๊ณ", "์ฑ์ฅ/ํดํ", "์๊ธฐ ๋ณต๊ตฌ/์ฌ์",
|
144 |
+
"์์ฐ ์ํ ์ ์", "์ง์์ฑ/์ผ์์ฑ", "๊ธฐ์ต ํจ๊ณผ", "์ง์ฐ๋ ์์ฉ", "๋์ ํจ๊ณผ"
|
145 |
+
],
|
146 |
+
|
147 |
+
"๋น๊ณผ ์๊ฐ ํจ๊ณผ": [
|
148 |
+
"๋ฐ๊ด/์๋ฑ", "๋น ํฌ๊ณผ/์ฐจ๋จ", "๋น ์ฐ๋/์ง์ค", "์์ ์คํํธ๋ผ ๋ณํ", "๋น ํ์ ",
|
149 |
+
"๋น ๊ฐ์ญ", "ํ๋ก๊ทธ๋จ ์์ฑ", "๋ ์ด์ ํจ๊ณผ", "๋น ํธ๊ด", "ํ๊ด/์ธ๊ด",
|
150 |
+
"์์ธ์ /์ ์ธ์ ๋ฐ๊ด", "๊ดํ์ ์ฐฉ์", "๋น ๊ตด์ ", "๊ทธ๋ฆผ์ ์์ฑ/์ ๊ฑฐ",
|
151 |
+
"์์์ฐจ ํจ๊ณผ", "๋ฌด์ง๊ฐ ํจ๊ณผ", "๊ธ๋ก์ฐ ํจ๊ณผ", "ํ๋์ ํจ๊ณผ", "์กฐ๋ช
ํจํด",
|
152 |
+
"๋น ํจ๊ณผ", "๊ด ํํฐ ํจ๊ณผ", "๋น์ ๋ฐฉํฅ์ฑ ๋ณํ", "ํฌ์ ํจ๊ณผ", "๋น ๊ฐ์ง/๋ฐ์",
|
153 |
+
"๊ด๋ ๋ณํ"
|
154 |
+
],
|
155 |
+
|
156 |
+
"์๋ฆฌ์ ์ง๋ ํจ๊ณผ": [
|
157 |
+
"์๋ฆฌ ๋ฐ์/์๋ฉธ", "์๋ฆฌ ๋๋ฎ์ด ๋ณํ", "์๋ฆฌ ํฌ๊ธฐ ๋ณํ", "์์ ๋ณํ",
|
158 |
+
"๊ณต๋ช
/๋ฐ๊ณต๋ช
", "์ํฅ ์ง๋", "์ด์ํ/์ ์ํ ๋ฐ์", "์ํฅ ์ง์ค/๋ถ์ฐ",
|
159 |
+
"์ํฅ ๋ฐ์ฌ/ํก์", "์ํฅ ๋ํ๋ฌ ํจ๊ณผ", "์ํ ๊ฐ์ญ", "์ํฅ ๊ณต์ง",
|
160 |
+
"์ง๋ ํจํด ๋ณํ", "ํ์
ํจ๊ณผ", "์ํฅ ํผ๋๋ฐฑ", "์ํฅ ์ฐจํ/์ฆํญ",
|
161 |
+
"์๋ฆฌ ์งํฅ์ฑ", "์ํฅ ์๊ณก", "๋นํธ ์์ฑ", "ํ๋ชจ๋๏ฟฝ๏ฟฝ๏ฟฝ ์์ฑ", "์ฃผํ์ ๋ณ์กฐ",
|
162 |
+
"์ํฅ ์ถฉ๊ฒฉํ", "์ํฅ ํํฐ๋ง", "์ํ ์ ํ ํจํด", "์ง๋ ๋ํ"
|
163 |
+
],
|
164 |
+
|
165 |
+
"์๋ฌผํ์ ๋ณํ": [
|
166 |
+
"์์ฅ/์์ถ", "์ธํฌ ๋ถ์ด/์ฌ๋ฉธ", "์๋ฌผ ๋ฐ๊ด", "์ ์ง๋์ฌ ๋ณํ", "๋ฉด์ญ ๋ฐ์",
|
167 |
+
"ํธ๋ฅด๋ชฌ ๋ถ๋น", "์ ๊ฒฝ ๋ฐ์", "์ ์ ์ ๋ฐํ", "์ ์/์งํ", "์์ฒด๋ฆฌ๋ฌ ๋ณํ",
|
168 |
+
"์ฌ์/์น์ ", "๋
ธํ/์ฑ์", "์์ฒด ๋ชจ๋ฐฉ ๋ณํ", "๋ฐ์ด์คํ๋ฆ ํ์ฑ", "์๋ฌผํ์ ๋ถํด",
|
169 |
+
"ํจ์ ํ์ฑํ/๋นํ์ฑํ", "์๋ฌผํ์ ์ ํธ ์ ๋ฌ", "์คํธ๋ ์ค ๋ฐ์", "์ฒด์จ ์กฐ์ ",
|
170 |
+
"์๋ฌผํ์ ์๊ณ ๋ณํ", "์ธํฌ์ธ ๊ธฐ์ง ๋ณํ", "์์ฒด ์ญํ์ ๋ฐ์", "์ธํฌ ์ด๋์ฑ",
|
171 |
+
"์ธํฌ ๊ทน์ฑ ๋ณํ", "์์ ์ํ ๋ณํ"
|
172 |
+
],
|
173 |
+
|
174 |
+
"ํ๊ฒฝ ์ํธ์์ฉ": [
|
175 |
+
"์จ๋ ๋ฐ์", "์ต๋ ๋ฐ์", "๊ธฐ์ ๋ฐ์", "์ค๋ ฅ ๋ฐ์", "์๊ธฐ์ฅ ๋ฐ์",
|
176 |
+
"๋น ๋ฐ์", "์๋ฆฌ ๋ฐ์", "ํํ ๋ฌผ์ง ๊ฐ์ง", "๊ธฐ๊ณ์ ์๊ทน ๊ฐ์ง", "์ ๊ธฐ ์๊ทน ๋ฐ์",
|
177 |
+
"๋ฐฉ์ฌ์ ๋ฐ์", "์ง๋ ๊ฐ์ง", "pH ๋ฐ์", "์ฉ๋งค ๋ฐ์", "๊ธฐ์ฒด ๊ตํ",
|
178 |
+
"ํ๊ฒฝ ์ค์ผ ๋ฐ์", "๋ ์จ ๋ฐ์", "๊ณ์ ๋ณํ ๋ฐ์", "์ผ์ฃผ๊ธฐ ๋ฐ์", "์ํ๊ณ ์ํธ์์ฉ",
|
179 |
+
"๊ณต์/๊ฒฝ์ ๋ฐ์", "ํฌ์/ํผ์ ๊ด๊ณ", "๊ตฐ์ง ํ์ฑ", "์์ญ ์ค์ ", "์ด์ฃผ/์ ์ฐฉ ํจํด"
|
180 |
+
],
|
181 |
+
|
182 |
+
"์ผ์ ๊ธฐ๋ฅ": [
|
183 |
+
"์๊ฐ ์ผ์/๊ฐ์ง", "์ฒญ๊ฐ ์ผ์/๊ฐ์ง", "์ด๊ฐ ์ผ์/๊ฐ์ง", "๋ฏธ๊ฐ ์ผ์/๊ฐ์ง", "ํ๊ฐ ์ผ์/๊ฐ์ง",
|
184 |
+
"์จ๋ ์ผ์/๊ฐ์ง", "์ต๋ ์ผ์/๊ฐ์ง", "์๋ ฅ ์ผ์/๊ฐ์ง", "๊ฐ์๋ ์ผ์/๊ฐ์ง", "ํ์ ์ผ์/๊ฐ์ง",
|
185 |
+
"๊ทผ์ ์ผ์/๊ฐ์ง", "์์น ์ผ์/๊ฐ์ง", "์ด๋ ์ผ์/๊ฐ์ง", "๊ฐ์ค ์ผ์/๊ฐ์ง", "์ ์ธ์ ์ผ์/๊ฐ์ง",
|
186 |
+
"์์ธ์ ์ผ์/๊ฐ์ง", "๋ฐฉ์ฌ์ ์ผ์/๊ฐ์ง", "์๊ธฐ์ฅ ์ผ์/๊ฐ์ง", "์ ๊ธฐ์ฅ ์ผ์/๊ฐ์ง", "ํํ๋ฌผ์ง ์ผ์/๊ฐ์ง",
|
187 |
+
"์์ฒด์ ํธ ์ผ์/๊ฐ์ง", "์ง๋ ์ผ์/๊ฐ์ง", "์์ ์ผ์/๊ฐ์ง", "๋น ์ธ๊ธฐ ์ผ์/๊ฐ์ง", "๋น ํ์ฅ ์ผ์/๊ฐ์ง",
|
188 |
+
"๊ธฐ์ธ๊ธฐ ์ผ์/๊ฐ์ง", "pH ์ผ์/๊ฐ์ง", "์ ๋ฅ ์ผ์/๊ฐ์ง", "์ ์ ์ผ์/๊ฐ์ง", "์ด๋ฏธ์ง ์ผ์/๊ฐ์ง",
|
189 |
+
"๊ฑฐ๋ฆฌ ์ผ์/๊ฐ์ง", "๊น์ด ์ผ์/๊ฐ์ง", "์ค๋ ฅ ์ผ์/๊ฐ์ง", "์๋ ์ผ์/๊ฐ์ง", "ํ๋ฆ ์ผ์/๊ฐ์ง",
|
190 |
+
"์์ ์ผ์/๊ฐ์ง", "ํ๋ ์ผ์/๊ฐ์ง", "์ผ๋ ์ผ์/๊ฐ์ง", "๊ธ์ ๊ฐ์ง", "์์ ์ผ์/๊ฐ์ง",
|
191 |
+
"๊ด์ ์ผ์/๊ฐ์ง", "์ด์ ๋ ์ผ์/๊ฐ์ง", "ํ ํจ๊ณผ ์ผ์/๊ฐ์ง", "์ด์ํ ์ผ์/๊ฐ์ง", "๋ ์ด๋ ์ผ์/๊ฐ์ง",
|
192 |
+
"๋ผ์ด๋ค ์ผ์/๊ฐ์ง", "ํฐ์น ์ผ์/๊ฐ์ง", "์ ์ค์ฒ ์ผ์/๊ฐ์ง", "์ฌ๋ฐ ์ผ์/๊ฐ์ง", "ํ์ ์ผ์/๊ฐ์ง"
|
193 |
+
]
|
194 |
+
}
|
195 |
|
196 |
+
##############################################################################
|
197 |
+
# ์คํธ๋ฆฌ๋ฐ์ฉ Gemini API ํจ์:
|
198 |
+
# - 'Thinking' ๋จ๊ณ(์์ด๋์ด ๋ด๋ถ ์ถ๋ก )์ ์ต์ข
'Response' ๋จ๊ณ๋ก ๊ตฌ์ฑ
|
199 |
+
##############################################################################
|
200 |
+
def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
"""
|
202 |
+
Gemini 2.0 Flash with 'Thinking' ๋ถ๋ถ๊ณผ 'Response' ๋ถ๋ถ์
|
203 |
+
๋ถ๋ฆฌํ์ฌ ์คํธ๋ฆฌ๋ฐ(Chunk)์ผ๋ก ์ ๊ณตํ๋ค.
|
204 |
"""
|
205 |
+
# chat ์ด๊ธฐํ (history ์์ด ๋จ๋ฐ์ฑ ํธ์ถ)
|
206 |
+
chat = model.start_chat(history=[])
|
207 |
+
response = chat.send_message(prompt, stream=True)
|
208 |
+
|
209 |
+
thought_buffer = ""
|
210 |
+
response_buffer = ""
|
211 |
+
thinking_complete = False
|
212 |
+
|
213 |
+
for chunk in response:
|
214 |
+
# ๊ฐ chunk์๋ candidates[0].content.parts๊ฐ ๋ค์ด์๋ค
|
215 |
+
parts = chunk.candidates[0].content.parts
|
216 |
+
|
217 |
+
# ์์) parts๊ฐ 2๊ฐ์ด๋ฉด (0: Thinking, 1: Response ์์)
|
218 |
+
# ๊ทธ ์ธ์๋ 1๊ฐ์ฉ ๋์ด์ ๋ค์ด์ฌ ์ ์์
|
219 |
+
if len(parts) == 2 and not thinking_complete:
|
220 |
+
# ์์ง Thinking ์ค์ธ๋ฐ, ์์ฑ๋ Thinking + Response ์์์ด ํ ๋ฒ์ ์ด
|
221 |
+
thought_buffer += parts[0].text
|
222 |
+
yield f"[Thinking Chunk] {parts[0].text}"
|
223 |
+
|
224 |
+
response_buffer = parts[1].text
|
225 |
+
yield f"[Response Start] {parts[1].text}"
|
226 |
+
|
227 |
+
thinking_complete = True
|
228 |
+
elif thinking_complete:
|
229 |
+
# ์ด๋ฏธ Thinking์ ๋๋จ โ Response๋ฅผ ์ด์ด์ ์คํธ๋ฆฌ๋ฐ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
current_chunk = parts[0].text
|
231 |
+
response_buffer += current_chunk
|
232 |
+
yield current_chunk
|
233 |
+
else:
|
234 |
+
# Thinking ์งํ ์ค (parts๊ฐ 1๊ฐ์ฉ ์ถ๊ฐ๋จ)
|
235 |
+
current_chunk = parts[0].text
|
236 |
+
thought_buffer += current_chunk
|
237 |
+
yield f"[Thinking Chunk] {current_chunk}"
|
238 |
|
239 |
+
# ์คํธ๋ฆฌ๋ฐ ์๋ฃ ํ ์ต์ข
๊ฒฐ๊ณผ ํ๋ฒ์ ์ ๊ณตํ ์๋ ์์
|
240 |
+
yield f"\n[Final Response]\n{response_buffer}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
+
##############################################################################
|
243 |
+
# ์นดํ
๊ณ ๋ฆฌ๋ณ ๊ฐ๋จ ์ค๋ช
์ 'Thinking' + 'Response'๋ก ํ์ฅ (์คํธ๋ฆฌ๋ฐ)
|
244 |
+
##############################################################################
|
245 |
+
def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
|
246 |
+
"""
|
247 |
+
๊ธฐ์กด enhance_with_llm๋ฅผ ์คํธ๋ฆฌ๋ฐ ํํ๋ก ๋ฐ๊พผ ํจ์:
|
248 |
+
'Thinking' + 'Response' ๋จ๊ณ๋ฅผ chunk๋ก ์์ฐจ ์ ๋ฌ
|
249 |
+
"""
|
250 |
+
prompt = f"""
|
251 |
+
๋ค์์ '{obj_name}'์ '{category}' ๊ด๋ จ ๊ฐ๋จํ ์ค๋ช
์
๋๋ค:
|
252 |
+
"{base_description}"
|
253 |
+
์ ๋ด์ฉ์ ๋ณด๋ค ๊ตฌ์ฒดํํ์ฌ,
|
254 |
+
1) ์ฐฝ์์ ์ธ ๋ชจ๋ธ/์ปจ์
/ํ์์ ๋ณํ์ ๋ํ ์ดํด,
|
255 |
+
2) ํ์ ํฌ์ธํธ์ ๊ธฐ๋ฅ์ฑ ๋ฑ์ ์ค์ฌ์ผ๋ก
|
256 |
+
3~4๋ฌธ์ฅ์ ์์ด๋์ด๋ก ํ์ฅํด ์ฃผ์ธ์.
|
257 |
+
"""
|
258 |
+
# query_gemini_api_stream()๋ก๋ถํฐ chunk๋ฅผ ๋ฐ์ ๊ทธ๋๋ก yield
|
259 |
+
for chunk in query_gemini_api_stream(prompt):
|
260 |
+
yield chunk
|
261 |
+
|
262 |
+
##############################################################################
|
263 |
+
# ํ ํค์๋(์ค๋ธ์ ํธ)์ ๋ํ ๊ธฐ๋ณธ ์์ด๋์ด(์นดํ
๊ณ ๋ฆฌ๋ณ) ์์ฑ
|
264 |
+
##############################################################################
|
265 |
+
def generate_single_object_transformations(obj):
|
266 |
+
results = {}
|
267 |
+
for category, transformations in physical_transformation_categories.items():
|
268 |
+
transformation = choose_alternative(random.choice(transformations))
|
269 |
+
base_description = f"{obj}์ด(๊ฐ) {transformation} ํ์์ ๋ณด์ธ๋ค"
|
270 |
+
results[category] = {"base": base_description, "enhanced": ""}
|
271 |
+
return results
|
272 |
+
|
273 |
+
##############################################################################
|
274 |
+
# 2๊ฐ ํค์๋ ์ํธ์์ฉ
|
275 |
+
##############################################################################
|
276 |
+
def generate_two_objects_interaction(obj1, obj2):
|
277 |
+
results = {}
|
278 |
+
for category, transformations in physical_transformation_categories.items():
|
279 |
+
transformation = choose_alternative(random.choice(transformations))
|
280 |
+
template = random.choice([
|
281 |
+
"{obj1}์ด(๊ฐ) {obj2}๏ฟฝ๏ฟฝ ๊ฒฐํฉํ์ฌ {change}๊ฐ ๋ฐ์ํ๋ค",
|
282 |
+
"{obj1}๊ณผ(์) {obj2}์ด(๊ฐ) ์ถฉ๋ํ๋ฉด์ {change}๊ฐ ์ผ์ด๋ฌ๋ค"
|
283 |
+
])
|
284 |
+
base_description = template.format(obj1=obj1, obj2=obj2, change=transformation)
|
285 |
+
results[category] = {"base": base_description, "enhanced": ""}
|
286 |
+
return results
|
287 |
+
|
288 |
+
##############################################################################
|
289 |
+
# 3๊ฐ ํค์๋ ์ํธ์์ฉ
|
290 |
+
##############################################################################
|
291 |
+
def generate_three_objects_interaction(obj1, obj2, obj3):
|
292 |
+
results = {}
|
293 |
+
for category, transformations in physical_transformation_categories.items():
|
294 |
+
transformation = choose_alternative(random.choice(transformations))
|
295 |
+
template = random.choice([
|
296 |
+
"{obj1}, {obj2}, {obj3}์ด(๊ฐ) ์ผ๊ฐํ ๊ตฌ์กฐ๋ก ๊ฒฐํฉํ์ฌ {change}๊ฐ ๋ฐ์ํ๋ค",
|
297 |
+
"{obj1}์ด(๊ฐ) {obj2}์(๊ณผ) {obj3} ์ฌ์ด์์ ๋งค๊ฐ์ฒด ์ญํ ์ ํ๋ฉฐ {change}๋ฅผ ์ด์งํ๋ค"
|
298 |
+
])
|
299 |
+
base_description = template.format(obj1=obj1, obj2=obj2, obj3=obj3, change=transformation)
|
300 |
+
results[category] = {"base": base_description, "enhanced": ""}
|
301 |
+
return results
|
302 |
+
|
303 |
+
##############################################################################
|
304 |
+
# ์ค์ ๋ณํ ์์ฑ ๋ก์ง
|
305 |
+
##############################################################################
|
306 |
+
def generate_transformations(text1, text2=None, text3=None):
|
307 |
+
if text2 and text3:
|
308 |
+
results = generate_three_objects_interaction(text1, text2, text3)
|
309 |
+
objects = [text1, text2, text3]
|
310 |
+
elif text2:
|
311 |
+
results = generate_two_objects_interaction(text1, text2)
|
312 |
+
objects = [text1, text2]
|
313 |
+
else:
|
314 |
+
results = generate_single_object_transformations(text1)
|
315 |
+
objects = [text1]
|
316 |
+
return results, objects
|
317 |
+
|
318 |
+
##############################################################################
|
319 |
+
# ์คํธ๋ฆฌ๋ฐ: ๊ฐ ์นดํ
๊ณ ๋ฆฌ๋ณ๋ก 'Thinking' + 'Response' ๋ถ๋ถ์ ์ค์๊ฐ ์ ๋ฌ
|
320 |
+
##############################################################################
|
321 |
+
def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
322 |
+
"""
|
323 |
+
Gradio์ Chatbot ํ์์ ๋ง์ถฐ์,
|
324 |
+
[(role='assistant'|'user', content=...), ...] ํํ๋ก yieldํ๋ค.
|
325 |
+
์๊ฐ(Thinking) ๋จ๊ณ์ ์ต์ข
์๋ต์ ๋ถ๋ฆฌํด์ ์ค์๊ฐ ์ ์ก.
|
326 |
+
"""
|
327 |
+
messages = []
|
328 |
|
329 |
+
# 1) ์
๋ ฅ๊ฐ ํ์ธ
|
330 |
+
yield [("assistant", "์
๋ ฅ๊ฐ ํ์ธ ์ค...")]
|
331 |
+
time.sleep(0.3)
|
|
|
|
|
|
|
332 |
|
333 |
+
text1 = text1.strip() if text1 else None
|
334 |
+
text2 = text2.strip() if text2 else None
|
335 |
+
text3 = text3.strip() if text3 else None
|
336 |
+
if not text1:
|
337 |
+
yield [("assistant", "์ค๋ฅ: ์ต์ ํ๋์ ํค์๋๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.")]
|
338 |
+
return
|
339 |
|
340 |
+
# 2) ์์ด๋์ด ์์ฑ
|
341 |
+
yield [("assistant", "์ฐฝ์์ ์ธ ๋ชจ๋ธ/์ปจ์
/ํ์ ๋ณํ ์์ด๋์ด ์์ฑ ์ค... (์นดํ
๊ณ ๋ฆฌ๋ณ ๋ถ์)")]
|
342 |
+
time.sleep(0.3)
|
343 |
+
results, objects = generate_transformations(text1, text2, text3)
|
344 |
+
|
345 |
+
# ์นดํ
๊ณ ๋ฆฌ๋ณ ์คํธ๋ฆฌ๋ฐ ์ฒ๋ฆฌ
|
346 |
+
obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
|
347 |
+
|
348 |
+
for i, (category, result_dict) in enumerate(results.items(), start=1):
|
349 |
+
base_desc = result_dict["base"]
|
350 |
+
|
351 |
+
# ์นดํ
๊ณ ๋ฆฌ ์๋ด ์ถ๋ ฅ
|
352 |
+
yield [("assistant", f"**[{i}/{len(results)}] ์นดํ
๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์์ด๋์ด: {base_desc}\n\n์ง๊ธ๋ถํฐ Thinking + Response๋ฅผ ๋จ๊ณ์ ์ผ๋ก ์คํธ๋ฆฌ๋ฐํฉ๋๋ค...")]
|
353 |
+
time.sleep(0.5)
|
354 |
+
|
355 |
+
# ์คํธ๋ฆฌ๋ฐ LLM ํธ์ถ
|
356 |
+
thinking_text = ""
|
357 |
+
response_text = ""
|
358 |
+
is_thinking_done = False
|
359 |
+
|
360 |
+
# enhance_with_llm_stream ํธ์ถ
|
361 |
+
for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
|
362 |
+
if chunk.startswith("[Thinking Chunk]"):
|
363 |
+
# ์๊ฐ ํํธ
|
364 |
+
thinking_text += chunk.replace("[Thinking Chunk]", "")
|
365 |
+
messages_to_user = f"**[Thinking]**\n{thinking_text}"
|
366 |
+
yield [("assistant", messages_to_user)]
|
367 |
+
elif chunk.startswith("[Response Start]"):
|
368 |
+
# ์๋ต ์์ ์์
|
369 |
+
is_thinking_done = True
|
370 |
+
# ๋จ์์๋ ๋ถ๋ถ์ response_text๋ก
|
371 |
+
partial = chunk.replace("[Response Start]", "")
|
372 |
+
response_text += partial
|
373 |
+
messages_to_user = f"**[Final Response ์์]**\n{partial}"
|
374 |
+
yield [("assistant", messages_to_user)]
|
375 |
+
elif chunk.startswith("[Final Response]"):
|
376 |
+
# ์ต์ข
์ข
๋ฃ
|
377 |
+
final = chunk.replace("[Final Response]", "")
|
378 |
+
response_text += f"\n{final}"
|
379 |
+
yield [("assistant", f"**[์ต์ข
Response]**\n{response_text.strip()}")]
|
380 |
+
else:
|
381 |
+
# ์ผ๋ฐ ์๋ต ์คํธ๋ฆฌ๋ฐ
|
382 |
+
if is_thinking_done:
|
383 |
+
response_text += chunk
|
384 |
+
yield [("assistant", f"**[์๋ต ์งํ]**\n{response_text}") ]
|
385 |
+
else:
|
386 |
+
thinking_text += chunk
|
387 |
+
yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
|
388 |
|
389 |
+
# ํ ์นดํ
๊ณ ๋ฆฌ ์๋ต ์๋ฃ
|
390 |
+
result_dict["enhanced"] = response_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
|
392 |
+
# 3) ์ ์ฒด ์นดํ
๊ณ ๋ฆฌ ์๋ฃ
|
393 |
+
yield [("assistant", "**๋ชจ๋ ์นดํ
๊ณ ๋ฆฌ์ ๋ํ ์คํธ๋ฆฌ๋ฐ์ด ์๋ฃ๋์์ต๋๋ค!**")]
|
|
|
|
|
394 |
|
395 |
|
396 |
+
##############################################################################
|
397 |
+
# Gradio UI
|
398 |
+
##############################################################################
|
399 |
+
with gr.Blocks(title="์คํธ๋ฆฌ๋ฐ ์์ : Gemini 2.0 Flash Thinking",
|
400 |
+
theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
|
401 |
|
402 |
+
gr.Markdown("# ๐ ํค์๋ ๊ธฐ๋ฐ ์ฐฝ์์ ๋ณํ ์์ด๋์ด (Gemini 2.0 Flash Thinking, Streaming)")
|
403 |
+
gr.Markdown("ํค์๋ 1~3๊ฐ๋ฅผ ์
๋ ฅํ๋ฉด, **์นดํ
๊ณ ๋ฆฌ๋ณ๋ก** 'Thinking'๊ณผ 'Response'๊ฐ ์ค์๊ฐ ์คํธ๋ฆฌ๋ฐ๋ฉ๋๋ค.")
|
|
|
|
|
404 |
|
|
|
405 |
chatbot = gr.Chatbot(
|
406 |
+
label="์นดํ
๊ณ ๋ฆฌ๋ณ ์์ด๋์ด(Thinking + Response) ์คํธ๋ฆฌ๋ฐ",
|
407 |
+
type="tuple", # (role, content) ์์ ๋ฆฌ์คํธ๋ก ์ ๋ฌ
|
408 |
+
render_markdown=True
|
|
|
|
|
409 |
)
|
410 |
|
411 |
+
with gr.Row():
|
412 |
+
with gr.Column(scale=1):
|
413 |
+
text_input1 = gr.Textbox(label="ํค์๋ 1 (ํ์)", placeholder="์: ์๋์ฐจ")
|
414 |
+
text_input2 = gr.Textbox(label="ํค์๋ 2 (์ ํ)", placeholder="์: ๋ก๋ด")
|
415 |
+
text_input3 = gr.Textbox(label="ํค์๋ 3 (์ ํ)", placeholder="์: ์ธ๊ณต์ง๋ฅ")
|
416 |
+
submit_button = gr.Button("์์ด๋์ด ์์ฑํ๊ธฐ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
417 |
|
418 |
+
clear_button = gr.Button("๋ํ ์ง์ฐ๊ธฐ")
|
419 |
+
|
420 |
+
with gr.Column(scale=2):
|
421 |
+
# ์ด๋ฏธ chatbot์ด ์๋ฆฌ๋ฅผ ์ฐจ์งํ๋ฏ๋ก ํจ์ค
|
422 |
+
pass
|
|
|
423 |
|
424 |
+
def clear_chat():
|
425 |
+
return []
|
426 |
|
427 |
+
examples = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
|
429 |
+
["์๋์ฐจ", "", ""],
|
430 |
+
["์ค๋งํธํฐ", "์ธ๊ณต์ง๋ฅ", ""],
|
431 |
+
["๋๋ก ", "์ธ๊ณต์ง๋ฅ", ""],
|
432 |
+
["์ด๋ํ", "์จ์ด๋ฌ๋ธ", "๊ฑด๊ฐ"],
|
433 |
+
]
|
434 |
+
gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
|
435 |
|
436 |
+
submit_button.click(
|
437 |
+
fn=process_inputs_stream,
|
438 |
+
inputs=[text_input1, text_input2, text_input3],
|
439 |
+
outputs=chatbot,
|
440 |
+
stream=True # ์คํธ๋ฆฌ๋ฐ ์ถ๋ ฅ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
)
|
442 |
|
443 |
+
clear_button.click(
|
444 |
+
fn=clear_chat,
|
445 |
+
outputs=chatbot
|
446 |
+
)
|
447 |
|
|
|
448 |
if __name__ == "__main__":
|
449 |
+
demo.launch(debug=True)
|