aiqcamp commited on
Commit
cf7da81
ยท
verified ยท
1 Parent(s): 0950619

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +412 -195
app.py CHANGED
@@ -1,232 +1,449 @@
1
  import os
2
  import gradio as gr
3
- from gradio import ChatMessage
 
 
4
  from typing import Iterator
 
5
  import google.generativeai as genai
6
- import time # Import time module for potential debugging/delay
 
 
 
 
 
 
 
 
 
 
7
 
8
- # get Gemini API Key from the environ variable
9
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
10
  genai.configure(api_key=GEMINI_API_KEY)
11
 
12
- # we will be using the Gemini 2.0 Flash model with Thinking capabilities
13
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- def format_chat_history(messages: list) -> list:
17
- """
18
- Formats the chat history into a structure Gemini can understand
19
- """
20
- formatted_history = []
21
- for message in messages:
22
- # Skip thinking messages (messages with metadata)
23
- if not (message.get("role") == "assistant" and "metadata" in message):
24
- formatted_history.append({
25
- "role": "user" if message.get("role") == "user" else "assistant",
26
- "parts": [message.get("content", "")]
27
- })
28
- return formatted_history
29
-
30
- def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
31
  """
32
- Streams thoughts and response with conversation history support for text input only.
 
33
  """
34
- if not user_message.strip(): # Robust check: if text message is empty or whitespace
35
- messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")) # More specific message
36
- yield messages
37
- return
38
-
39
- try:
40
- print(f"\n=== New Request (Text) ===")
41
- print(f"User message: {user_message}")
42
-
43
- # Format chat history for Gemini
44
- chat_history = format_chat_history(messages)
45
-
46
- # Initialize Gemini chat
47
- chat = model.start_chat(history=chat_history)
48
- response = chat.send_message(user_message, stream=True)
49
-
50
- # Initialize buffers and flags
51
- thought_buffer = ""
52
- response_buffer = ""
53
- thinking_complete = False
54
-
55
- # Add initial thinking message
56
- messages.append(
57
- ChatMessage(
58
- role="assistant",
59
- content="",
60
- metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
61
- )
62
- )
63
-
64
- for chunk in response:
65
- parts = chunk.candidates[0].content.parts
66
  current_chunk = parts[0].text
 
 
 
 
 
 
 
67
 
68
- if len(parts) == 2 and not thinking_complete:
69
- # Complete thought and start response
70
- thought_buffer += current_chunk
71
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
72
-
73
- messages[-1] = ChatMessage(
74
- role="assistant",
75
- content=thought_buffer,
76
- metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
77
- )
78
- yield messages
79
-
80
- # Start response
81
- response_buffer = parts[1].text
82
- print(f"\n=== Starting Response ===\n{response_buffer}")
83
-
84
- messages.append(
85
- ChatMessage(
86
- role="assistant",
87
- content=response_buffer
88
- )
89
- )
90
- thinking_complete = True
91
-
92
- elif thinking_complete:
93
- # Stream response
94
- response_buffer += current_chunk
95
- print(f"\n=== Response Chunk ===\n{current_chunk}")
96
-
97
- messages[-1] = ChatMessage(
98
- role="assistant",
99
- content=response_buffer
100
- )
101
 
102
- else:
103
- # Stream thinking
104
- thought_buffer += current_chunk
105
- print(f"\n=== Thinking Chunk ===\n{current_chunk}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- messages[-1] = ChatMessage(
108
- role="assistant",
109
- content=thought_buffer,
110
- metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
111
- )
112
- #time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
113
 
114
- yield messages
 
 
 
 
 
115
 
116
- print(f"\n=== Final Response ===\n{response_buffer}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
- except Exception as e:
119
- print(f"\n=== Error ===\n{str(e)}")
120
- messages.append(
121
- ChatMessage(
122
- role="assistant",
123
- content=f"I apologize, but I encountered an error: {str(e)}"
124
- )
125
- )
126
- yield messages
127
 
128
- def user_message(msg: str, history: list) -> tuple[str, list]:
129
- """Adds user message to chat history"""
130
- history.append(ChatMessage(role="user", content=msg))
131
- return "", history
132
 
133
 
134
- # Create the Gradio interface
135
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
136
- gr.Markdown("# Chat with Gemini 2.0 Flash and See its Thoughts ๐Ÿ’ญ")
 
 
137
 
138
-
139
- gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2Faiqcamp-Gemini2-Flash-Thinking.hf.space">
140
- <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Faiqcamp-Gemini2-Flash-Thinking.hf.space&countColor=%23263759" />
141
- </a>""")
142
 
143
-
144
  chatbot = gr.Chatbot(
145
- type="messages",
146
- label="Gemini2.0 'Thinking' Chatbot (Streaming Output)", #Label now indicates streaming
147
- render_markdown=True,
148
- scale=1,
149
- avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
150
  )
151
 
152
- with gr.Row(equal_height=True):
153
- input_box = gr.Textbox(
154
- lines=1,
155
- label="Chat Message",
156
- placeholder="Type your message here...",
157
- scale=4
158
- )
159
-
160
- clear_button = gr.Button("Clear Chat", scale=1)
161
-
162
- # Add example prompts - removed file upload examples. Kept text focused examples.
163
- example_prompts = [
164
- ["Write a short poem about the sunset."],
165
- ["Explain the theory of relativity in simple terms."],
166
- ["If a train leaves Chicago at 6am traveling at 60mph, and another train leaves New York at 8am traveling at 80mph, at what time will they meet?"],
167
- ["Summarize the plot of Hamlet."],
168
- ["Write a haiku about a cat."]
169
- ]
170
 
171
- gr.Examples(
172
- examples=example_prompts,
173
- inputs=input_box,
174
- label="Examples: Try these prompts to see Gemini's thinking!",
175
- examples_per_page=5 # Adjust as needed
176
- )
177
 
 
 
178
 
179
- # Set up event handlers
180
- msg_store = gr.State("") # Store for preserving user message
181
-
182
- input_box.submit(
183
- lambda msg: (msg, msg, ""), # Store message and clear input
184
- inputs=[input_box],
185
- outputs=[msg_store, input_box, input_box],
186
- queue=False
187
- ).then(
188
- user_message, # Add user message to chat
189
- inputs=[msg_store, chatbot],
190
- outputs=[input_box, chatbot],
191
- queue=False
192
- ).then(
193
- stream_gemini_response, # Generate and stream response
194
- inputs=[msg_store, chatbot],
195
- outputs=chatbot
196
- )
197
 
198
- clear_button.click(
199
- lambda: ([], "", ""),
200
- outputs=[chatbot, input_box, msg_store],
201
- queue=False
202
- )
 
203
 
204
- gr.Markdown( # Description moved to the bottom - updated for text-only
205
- """
206
- <br><br><br> <!-- Add some vertical space -->
207
- ---
208
- ### About this Chatbot
209
- This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
210
- You can observe the model's thought process as it generates responses, displayed with the "โš™๏ธ Thinking" prefix.
211
-
212
- **Try out the example prompts below to see Gemini in action!**
213
-
214
- **Key Features:**
215
- * Powered by Google's **Gemini 2.0 Flash** model.
216
- * Shows the model's **thoughts** before the final answer (experimental feature).
217
- * Supports **conversation history** for multi-turn chats.
218
- * Uses **streaming** for a more interactive experience.
219
- **Instructions:**
220
- 1. Type your message in the input box below or select an example.
221
- 2. Press Enter or click Submit to send.
222
- 3. Observe the chatbot's "Thinking" process followed by the final response.
223
- 4. Use the "Clear Chat" button to start a new conversation.
224
-
225
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
226
- """
227
  )
228
 
 
 
 
 
229
 
230
- # Launch the interface
231
  if __name__ == "__main__":
232
- demo.launch(debug=True)
 
1
  import os
2
  import gradio as gr
3
+ import random
4
+ import time
5
+ import logging
6
  from typing import Iterator
7
+
8
  import google.generativeai as genai
9
+ from gradio import ChatMessage # ChatMessage ๊ตฌ์กฐ ์‚ฌ์šฉ (Thinking/Response ๊ตฌ๋ถ„ ๊ฐ€๋Šฅ)
10
+
11
+ logging.basicConfig(
12
+ level=logging.INFO,
13
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
14
+ handlers=[
15
+ logging.FileHandler("api_debug.log"),
16
+ logging.StreamHandler()
17
+ ]
18
+ )
19
+ logger = logging.getLogger("idea_generator")
20
 
21
+ # Gemini API ํ‚ค ์„ค์ •
22
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
23
  genai.configure(api_key=GEMINI_API_KEY)
24
 
25
+ # ์‚ฌ์šฉํ•  Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋Šฅ ํฌํ•จ)
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
+ ##############################################################################
29
+ # ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋‘ ์˜ต์…˜ ์ค‘ ํ•˜๋‚˜ ์„ ํƒ
30
+ ##############################################################################
31
+ def choose_alternative(transformation):
32
+ if "/" not in transformation:
33
+ return transformation
34
+ parts = transformation.split("/")
35
+ if len(parts) != 2:
36
+ return random.choice([part.strip() for part in parts])
37
+ left = parts[0].strip()
38
+ right = parts[1].strip()
39
+ if " " in left:
40
+ tokens = left.split(" ", 1)
41
+ prefix = tokens[0]
42
+ if not right.startswith(prefix):
43
+ option1 = left
44
+ option2 = prefix + " " + right
45
+ else:
46
+ option1 = left
47
+ option2 = right
48
+ return random.choice([option1, option2])
49
+ else:
50
+ return random.choice([left, right])
51
+
52
+ ##############################################################################
53
+ # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „ (์ผ๋ถ€๋งŒ ๋ฐœ์ทŒ ๊ฐ€๋Šฅ. ์—ฌ๊ธฐ์„œ๋Š” ์˜ˆ์‹œ๋กœ 3๊ฐœ๋งŒ ์œ ์ง€)
54
+ ##############################################################################
55
+ physical_transformation_categories = {
56
+ "๊ณต๊ฐ„ ์ด๋™": [
57
+ "์•ž/๋’ค ์ด๋™", "์ขŒ/์šฐ ์ด๋™", "์œ„/์•„๋ž˜ ์ด๋™", "์„ธ๋กœ์ถ• ํšŒ์ „(๊ณ ๊ฐœ ๋„๋•์ž„)",
58
+ "๊ฐ€๋กœ์ถ• ํšŒ์ „(๊ณ ๊ฐœ ์ “๊ธฐ)", "๊ธธ์ด์ถ• ํšŒ์ „(์˜†์œผ๋กœ ๊ธฐ์šธ์ž„)", "์› ์šด๋™", "๋‚˜์„ ํ˜• ์ด๋™",
59
+ "๊ด€์„ฑ์— ์˜ํ•œ ๋ฏธ๋„๋Ÿฌ์ง", "ํšŒ์ „์ถ• ๋ณ€ํ™”", "๋ถˆ๊ทœ์น™ ํšŒ์ „", "ํ”๋“ค๋ฆผ ์šด๋™", "ํฌ๋ฌผ์„  ์ด๋™",
60
+ "๋ฌด์ค‘๋ ฅ ๋ถ€์œ ", "์ˆ˜๋ฉด ์œ„ ๋ถ€์œ ", "์ ํ”„/๋„์•ฝ", "์Šฌ๋ผ์ด๋”ฉ", "๋กค๋ง", "์ž์œ  ๋‚™ํ•˜",
61
+ "์™•๋ณต ์šด๋™", "ํƒ„์„ฑ ํŠ•๊น€", "๊ด€ํ†ต", "ํšŒํ”ผ ์›€์ง์ž„", "์ง€๊ทธ์žฌ๊ทธ ์ด๋™", "์Šค์œ™ ์šด๋™"
62
+ ],
63
+
64
+ "ํฌ๊ธฐ์™€ ํ˜•ํƒœ ๋ณ€ํ™”": [
65
+ "๋ถ€ํ”ผ ๋Š˜์–ด๋‚จ/์ค„์–ด๋“ฆ", "๊ธธ์ด ๋Š˜์–ด๋‚จ/์ค„์–ด๋“ฆ", "๋„ˆ๋น„ ๋Š˜์–ด๋‚จ/์ค„์–ด๋“ฆ", "๋†’์ด ๋Š˜์–ด๋‚จ/์ค„์–ด๋“ฆ",
66
+ "๋ฐ€๋„ ๋ณ€ํ™”", "๋ฌด๊ฒŒ ์ฆ๊ฐ€/๊ฐ์†Œ", "๋ชจ์–‘ ๋ณ€ํ˜•", "์ƒํƒœ ๋ณ€ํ™”", "๋ถˆ๊ท ๋“ฑ ๋ณ€ํ˜•",
67
+ "๋ณต์žกํ•œ ํ˜•ํƒœ ๋ณ€ํ˜•", "๋น„ํ‹€๋ฆผ/๊ผฌ์ž„", "๋ถˆ๊ท ์ผํ•œ ํ™•์žฅ/์ถ•์†Œ", "๋ชจ์„œ๋ฆฌ ๋‘ฅ๊ธ€๊ฒŒ/๋‚ ์นด๋กญ๊ฒŒ",
68
+ "๊นจ์ง/๊ฐˆ๋ผ์ง", "์—ฌ๋Ÿฌ ์กฐ๊ฐ ๋‚˜๋ˆ ์ง", "๋ฌผ ์ €ํ•ญ", "๋จผ์ง€ ์ €ํ•ญ", "์ฐŒ๊ทธ๋Ÿฌ์ง/๋ณต์›",
69
+ "์ ‘ํž˜/ํŽผ์ณ์ง", "์••์ฐฉ/ํŒฝ์ฐฝ", "๋Š˜์–ด๋‚จ/์ˆ˜์ถ•", "๊ตฌ๊ฒจ์ง/ํ‰ํ‰ํ•ด์ง", "๋ญ‰๊ฐœ์ง/๋‹จ๋‹จํ•ด์ง",
70
+ "๋ง๋ฆผ/ํŽด์ง", "๊บพ์ž„/๊ตฌ๋ถ€๋Ÿฌ์ง"
71
+ ],
72
+
73
+ "ํ‘œ๋ฉด ๋ฐ ์™ธ๊ด€ ๋ณ€ํ™”": [
74
+ "์ƒ‰์ƒ ๋ณ€ํ™”", "์งˆ๊ฐ ๋ณ€ํ™”", "ํˆฌ๋ช…/๋ถˆํˆฌ๋ช… ๋ณ€ํ™”", "๋ฐ˜์ง์ž„/๋ฌด๊ด‘ ๋ณ€ํ™”",
75
+ "๋น› ๋ฐ˜์‚ฌ ์ •๋„ ๋ณ€ํ™”", "๋ฌด๋Šฌ ๋ณ€ํ™”", "๊ฐ๋„์— ๋”ฐ๋ฅธ ์ƒ‰์ƒ ๋ณ€ํ™”", "๋น›์— ๋”ฐ๋ฅธ ์ƒ‰์ƒ ๋ณ€ํ™”",
76
+ "์˜จ๋„์— ๋”ฐ๋ฅธ ์ƒ‰์ƒ ๋ณ€ํ™”", "ํ™€๋กœ๊ทธ๋žจ ํšจ๊ณผ", "ํ‘œ๋ฉด ๊ฐ๋„๋ณ„ ๋น› ๋ฐ˜์‚ฌ", "ํ‘œ๋ฉด ๋ชจ์–‘ ๋ณ€ํ˜•",
77
+ "์ดˆ๋ฏธ์„ธ ํ‘œ๋ฉด ๊ตฌ์กฐ ๋ณ€ํ™”", "์ž๊ฐ€ ์„ธ์ • ํšจ๊ณผ", "์–ผ๋ฃฉ/ํŒจํ„ด ์ƒ์„ฑ", "ํ๋ฆผ/์„ ๋ช…ํ•จ ๋ณ€ํ™”",
78
+ "๊ด‘ํƒ/์œค๊ธฐ ๋ณ€ํ™”", "์ƒ‰์กฐ/์ฑ„๋„ ๋ณ€ํ™”", "๋ฐœ๊ด‘/ํ˜•๊ด‘", "๋น› ์‚ฐ๋ž€ ํšจ๊ณผ",
79
+ "๋น› ํก์ˆ˜ ๋ณ€ํ™”", "๋ฐ˜ํˆฌ๋ช… ํšจ๊ณผ", "๊ทธ๋ฆผ์ž ํšจ๊ณผ ๋ณ€ํ™”", "์ž์™ธ์„  ๋ฐ˜์‘ ๋ณ€ํ™”",
80
+ "์•ผ๊ด‘ ํšจ๊ณผ"
81
+ ],
82
+
83
+ "๋ฌผ์งˆ์˜ ์ƒํƒœ ๋ณ€ํ™”": [
84
+ "๊ณ ์ฒด/์•ก์ฒด/๊ธฐ์ฒด ์ „ํ™˜", "๊ฒฐ์ •ํ™”/์šฉํ•ด", "์‚ฐํ™”/๋ถ€์‹", "๋”ฑ๋”ฑํ•ด์ง/๋ถ€๋“œ๋Ÿฌ์›Œ์ง",
85
+ "ํŠน์ˆ˜ ์ƒํƒœ ์ „ํ™˜", "๋ฌด์ •ํ˜•/๊ฒฐ์ •ํ˜• ์ „ํ™˜", "์„ฑ๋ถ„ ๋ถ„๋ฆฌ", "๋ฏธ์„ธ ์ž…์ž ํ˜•์„ฑ/๋ถ„ํ•ด",
86
+ "์ ค ํ˜•์„ฑ/ํ’€์–ด์ง", "์ค€์•ˆ์ • ์ƒํƒœ ๋ณ€ํ™”", "๋ถ„์ž ์ž๊ฐ€ ์ •๋ ฌ/๋ถ„ํ•ด", "์ƒํƒœ๋ณ€ํ™” ์ง€์—ฐ ํ˜„์ƒ",
87
+ "๋…น์Œ", "๊ตณ์Œ", "์ฆ๋ฐœ/์‘์ถ•", "์Šนํ™”/์ฆ์ฐฉ", "์นจ์ „/๋ถ€์œ ", "๋ถ„์‚ฐ/์‘์ง‘",
88
+ "๊ฑด์กฐ/์Šต์œค", "ํŒฝ์œค/์ˆ˜์ถ•", "๋™๊ฒฐ/ํ•ด๋™", "ํ’ํ™”/์นจ์‹", "์ถฉ์ „/๋ฐฉ์ „",
89
+ "๊ฒฐํ•ฉ/๋ถ„๋ฆฌ", "๋ฐœํšจ/๋ถ€ํŒจ"
90
+ ],
91
+
92
+ "์—ด ๊ด€๋ จ ๋ณ€ํ™”": [
93
+ "์˜จ๋„ ์ƒ์Šน/ํ•˜๊ฐ•", "์—ด์— ์˜ํ•œ ํŒฝ์ฐฝ/์ˆ˜์ถ•", "์—ด ์ „๋‹ฌ/์ฐจ๋‹จ", "์••๋ ฅ ์ƒ์Šน/ํ•˜๊ฐ•",
94
+ "์—ด ๋ณ€ํ™”์— ๋”ฐ๋ฅธ ์žํ™”", "๋ฌด์งˆ์„œ๋„ ๋ณ€ํ™”", "์—ด์ „๊ธฐ ํ˜„์ƒ", "์ž๊ธฐ์žฅ์— ์˜ํ•œ ์—ด ๋ณ€ํ™”",
95
+ "์ƒํƒœ๋ณ€๏ฟฝ๏ฟฝ๏ฟฝ ์ค‘ ์—ด ์ €์žฅ/๋ฐฉ์ถœ", "์—ด ์ŠคํŠธ๋ ˆ์Šค ๋ฐœ์ƒ/ํ•ด์†Œ", "๊ธ‰๊ฒฉํ•œ ์˜จ๋„ ๋ณ€ํ™” ์˜ํ–ฅ",
96
+ "๋ณต์‚ฌ์—ด์— ์˜ํ•œ ๋ƒ‰๊ฐ/๊ฐ€์—ด", "๋ฐœ์—ด/ํก์—ด", "์—ด ๋ถ„ํฌ ๋ณ€ํ™”", "์—ด ๋ฐ˜์‚ฌ/ํก์ˆ˜",
97
+ "๋ƒ‰๊ฐ ์‘์ถ•", "์—ด ํ™œ์„ฑํ™”", "์—ด ๋ณ€์ƒ‰", "์—ด ํŒฝ์ฐฝ ๊ณ„์ˆ˜ ๋ณ€ํ™”", "์—ด ์•ˆ์ •์„ฑ ๋ณ€ํ™”",
98
+ "๋‚ด์—ด์„ฑ/๋‚ดํ•œ์„ฑ", "์ž๊ธฐ๋ฐœ์—ด", "์—ด์  ํ‰ํ˜•/๋ถˆ๊ท ํ˜•", "์—ด์  ๋ณ€ํ˜•", "์—ด ๋ถ„์‚ฐ/์ง‘์ค‘"
99
+ ],
100
+
101
+ "์›€์ง์ž„ ํŠน์„ฑ ๋ณ€ํ™”": [
102
+ "๊ฐ€์†/๊ฐ์†", "์ผ์ • ์†๋„ ์œ ์ง€", "์ง„๋™/์ง„๋™ ๊ฐ์†Œ", "๋ถ€๋”ชํž˜/ํŠ•๊น€",
103
+ "ํšŒ์ „ ์†๋„ ์ฆ๊ฐ€/๊ฐ์†Œ", "ํšŒ์ „ ๋ฐฉํ–ฅ ๋ณ€ํ™”", "๋ถˆ๊ทœ์น™ ์›€์ง์ž„", "๋ฉˆ์ท„๋‹ค ๋ฏธ๋„๋Ÿฌ์ง€๋Š” ํ˜„์ƒ",
104
+ "๊ณต์ง„/๋ฐ˜๊ณต์ง„", "์œ ์ฒด ์† ์ €ํ•ญ/์–‘๋ ฅ ๋ณ€ํ™”", "์›€์ง์ž„ ์ €ํ•ญ ๋ณ€ํ™”", "๋ณตํ•ฉ ์ง„๋™ ์›€์ง์ž„",
105
+ "ํŠน์ˆ˜ ์œ ์ฒด ์† ์›€์ง์ž„", "ํšŒ์ „-์ด๋™ ์—ฐ๊ณ„ ์›€์ง์ž„", "๊ด€์„ฑ ์ •์ง€", "์ถฉ๊ฒฉ ํก์ˆ˜",
106
+ "์ถฉ๊ฒฉ ์ „๋‹ฌ", "์šด๋™๋Ÿ‰ ๋ณด์กด", "๋งˆ์ฐฐ๋ ฅ ๋ณ€ํ™”", "๊ด€์„ฑ ํƒˆ์ถœ", "๋ถˆ์•ˆ์ • ๊ท ํ˜•",
107
+ "๋™์  ์•ˆ์ •์„ฑ", "ํ”๋“ค๋ฆผ ๊ฐ์‡ ", "๊ฒฝ๋กœ ์˜ˆ์ธก์„ฑ", "ํšŒํ”ผ ์›€์ง์ž„"
108
+ ],
109
+
110
+ "๊ตฌ์กฐ์  ๋ณ€ํ™”": [
111
+ "๋ถ€ํ’ˆ ์ถ”๊ฐ€/์ œ๊ฑฐ", "์กฐ๋ฆฝ/๋ถ„ํ•ด", "์ ‘๊ธฐ/ํŽด๊ธฐ", "๋ณ€ํ˜•/์›์ƒ๋ณต๊ตฌ", "์ตœ์  ๊ตฌ์กฐ ๋ณ€ํ™”",
112
+ "์ž๊ฐ€ ์žฌ๋ฐฐ์—ด", "์ž์—ฐ ํŒจํ„ด ํ˜•์„ฑ/์†Œ๋ฉธ", "๊ทœ์น™์  ํŒจํ„ด ๋ณ€ํ™”", "๋ชจ๋“ˆ์‹ ๋ณ€ํ˜•",
113
+ "๋ณต์žก์„ฑ ์ฆ๊ฐ€ ๊ตฌ์กฐ", "์›๋ž˜ ๋ชจ์–‘ ๊ธฐ์–ต ํšจ๊ณผ", "์‹œ๊ฐ„์— ๋”ฐ๋ฅธ ํ˜•ํƒœ ๋ณ€ํ™”", "๋ถ€๋ถ„ ์ œ๊ฑฐ",
114
+ "๋ถ€๋ถ„ ๊ต์ฒด", "๊ฒฐํ•ฉ", "๋ถ„๋ฆฌ", "๋ถ„ํ• /ํ†ตํ•ฉ", "์ค‘์ฒฉ/๊ฒน์นจ", "๋‚ด๋ถ€ ๊ตฌ์กฐ ๋ณ€ํ™”",
115
+ "์™ธ๋ถ€ ๊ตฌ์กฐ ๋ณ€ํ™”", "์ค‘์‹ฌ์ถ• ์ด๋™", "๊ท ํ˜•์  ๋ณ€ํ™”", "๊ณ„์ธต ๊ตฌ์กฐ ๋ณ€ํ™”", "์ง€์ง€ ๊ตฌ์กฐ ๋ณ€ํ™”",
116
+ "์‘๋ ฅ ๋ถ„์‚ฐ ๊ตฌ์กฐ", "์ถฉ๊ฒฉ ํก์ˆ˜ ๊ตฌ์กฐ", "๊ทธ๋ฆฌ๋“œ/๋งคํŠธ๋ฆญ์Šค ๊ตฌ์กฐ ๋ณ€ํ™”", "์ƒํ˜ธ ์—ฐ๊ฒฐ์„ฑ ๋ณ€ํ™”"
117
+ ],
118
+
119
+ "์ „๊ธฐ ๋ฐ ์ž๊ธฐ ๋ณ€ํ™”": [
120
+ "์ž์„ฑ ์ƒ์„ฑ/์†Œ๋ฉธ", "์ „ํ•˜๋Ÿ‰ ์ฆ๊ฐ€/๊ฐ์†Œ", "์ „๊ธฐ์žฅ ์ƒ์„ฑ/์†Œ๋ฉธ", "์ž๊ธฐ์žฅ ์ƒ์„ฑ/์†Œ๋ฉธ",
121
+ "์ดˆ์ „๋„ ์ƒํƒœ ์ „ํ™˜", "๊ฐ•์œ ์ „์ฒด ํŠน์„ฑ ๋ณ€ํ™”", "์–‘์ž ์ƒํƒœ ๋ณ€ํ™”", "ํ”Œ๋ผ์ฆˆ๋งˆ ์ƒํƒœ ํ˜•์„ฑ/์†Œ๋ฉธ",
122
+ "์Šคํ•€ํŒŒ ์ „๋‹ฌ", "๋น›์— ์˜ํ•œ ์ „๊ธฐ ๋ฐœ์ƒ", "์••๋ ฅ์— ์˜ํ•œ ์ „๊ธฐ ๋ฐœ์ƒ", "์ž๊ธฐ์žฅ ์† ์ „๋ฅ˜ ๋ณ€ํ™”",
123
+ "์ „๊ธฐ ์ €ํ•ญ ๋ณ€ํ™”", "์ „๊ธฐ ์ „๋„์„ฑ ๋ณ€ํ™”", "์ •์ „๊ธฐ ๋ฐœ์ƒ/๋ฐฉ์ „", "์ „์ž๊ธฐ ์œ ๋„",
124
+ "์ „์ž๊ธฐํŒŒ ๋ฐฉ์ถœ/ํก์ˆ˜", "์ „๊ธฐ ์šฉ๋Ÿ‰ ๋ณ€ํ™”", "์ž๊ธฐ ์ด๋ ฅ ํ˜„์ƒ", "์ „๊ธฐ์  ๋ถ„๊ทน",
125
+ "์ „์ž ํ๋ฆ„ ๋ฐฉํ–ฅ ๋ณ€ํ™”", "์ „๊ธฐ์  ๊ณต๋ช…", "์ „๊ธฐ์  ์ฐจํ/๋…ธ์ถœ", "์ž๊ธฐ ์ฐจํ/๋…ธ์ถœ",
126
+ "์ž๊ธฐ์žฅ ๋ฐฉํ–ฅ ์ •๋ ฌ"
127
+ ],
128
+
129
+ "ํ™”ํ•™์  ๋ณ€ํ™”": [
130
+ "ํ‘œ๋ฉด ์ฝ”ํŒ… ๋ณ€ํ™”", "๋ฌผ์งˆ ์„ฑ๋ถ„ ๋ณ€ํ™”", "ํ™”ํ•™ ๋ฐ˜์‘ ๋ณ€ํ™”", "์ด‰๋งค ์ž‘์šฉ ์‹œ์ž‘/์ค‘๋‹จ",
131
+ "๋น›์— ์˜ํ•œ ํ™”ํ•™ ๋ฐ˜์‘", "์ „๊ธฐ์— ์˜ํ•œ ํ™”ํ•™ ๋ฐ˜์‘", "๋‹จ๋ถ„์ž๋ง‰ ํ˜•์„ฑ", "๋ถ„์ž ์ˆ˜์ค€ ๊ณ„์‚ฐ ๋ณ€ํ™”",
132
+ "์ž์—ฐ ๋ชจ๋ฐฉ ํ‘œ๋ฉด ๋ณ€ํ™”", "ํ™˜๊ฒฝ ๋ฐ˜์‘ํ˜• ๋ฌผ์งˆ ๋ณ€ํ™”", "์ฃผ๊ธฐ์  ํ™”ํ•™ ๋ฐ˜์‘", "์‚ฐํ™”", "ํ™˜์›",
133
+ "๊ณ ๋ถ„์žํ™”", "๋ฌผ ๋ถ„ํ•ด", "ํ™”ํ•ฉ", "๋ฐฉ์‚ฌ์„  ์˜ํ–ฅ", "์‚ฐ-์—ผ๊ธฐ ๋ฐ˜์‘", "์ค‘ํ™” ๋ฐ˜์‘",
134
+ "์ด์˜จํ™”", "ํ™”ํ•™์  ํก์ฐฉ/ํƒˆ์ฐฉ", "์ด‰๋งค ํšจ์œจ ๋ณ€ํ™”", "ํšจ์†Œ ํ™œ์„ฑ ๋ณ€ํ™”", "๋ฐœ์ƒ‰ ๋ฐ˜์‘",
135
+ "pH ๋ณ€ํ™”", "ํ™”ํ•™์  ํ‰ํ˜• ์ด๋™", "๊ฒฐํ•ฉ ํ˜•์„ฑ/๋ถ„ํ•ด", "์šฉํ•ด๋„ ๋ณ€ํ™”"
136
+ ],
137
+
138
+ "์‹œ๊ฐ„ ๊ด€๋ จ ๋ณ€ํ™”": [
139
+ "๋…ธํ™”/ํ’ํ™”", "๋งˆ๋ชจ/๋ถ€์‹", "์ƒ‰ ๋ฐ”๋žจ/๋ณ€์ƒ‰", "์†์ƒ/ํšŒ๋ณต", "์ˆ˜๋ช… ์ฃผ๊ธฐ ๋ณ€ํ™”",
140
+ "์‚ฌ์šฉ์ž ์ƒํ˜ธ์ž‘์šฉ์— ๋”ฐ๋ฅธ ์ ์‘", "ํ•™์Šต ๊ธฐ๋ฐ˜ ํ˜•ํƒœ ์ตœ์ ํ™”", "์‹œ๊ฐ„์— ๋”ฐ๋ฅธ ๋ฌผ์„ฑ ๋ณ€ํ™”",
141
+ "์ง‘๋‹จ ๊ธฐ์–ต ํšจ๊ณผ", "๋ฌธํ™”์  ์˜๋ฏธ ๋ณ€ํ™”", "์ง€์—ฐ ๋ฐ˜์‘", "์ด์ „ ์ƒํƒœ ์˜์กด ๋ณ€ํ™”",
142
+ "์ ์ง„์  ์‹œ๊ฐ„ ๋ณ€ํ™”", "์ง„ํ™”์  ๋ณ€ํ™”", "์ฃผ๊ธฐ์  ์žฌ์ƒ", "๊ณ„์ ˆ ๋ณ€ํ™” ์ ์‘",
143
+ "์ƒ์ฒด๋ฆฌ๋“ฌ ๋ณ€ํ™”", "์ƒ์•  ์ฃผ๊ธฐ ๋‹จ๊ณ„", "์„ฑ์žฅ/ํ‡ดํ™”", "์ž๊ธฐ ๋ณต๊ตฌ/์žฌ์ƒ",
144
+ "์ž์—ฐ ์ˆœํ™˜ ์ ์‘", "์ง€์†์„ฑ/์ผ์‹œ์„ฑ", "๊ธฐ์–ต ํšจ๊ณผ", "์ง€์—ฐ๋œ ์ž‘์šฉ", "๋ˆ„์  ํšจ๊ณผ"
145
+ ],
146
+
147
+ "๋น›๊ณผ ์‹œ๊ฐ ํšจ๊ณผ": [
148
+ "๋ฐœ๊ด‘/์†Œ๋“ฑ", "๋น› ํˆฌ๊ณผ/์ฐจ๋‹จ", "๋น› ์‚ฐ๋ž€/์ง‘์ค‘", "์ƒ‰์ƒ ์ŠคํŽ™ํŠธ๋Ÿผ ๋ณ€ํ™”", "๋น› ํšŒ์ ˆ",
149
+ "๋น› ๊ฐ„์„ญ", "ํ™€๋กœ๊ทธ๋žจ ์ƒ์„ฑ", "๋ ˆ์ด์ € ํšจ๊ณผ", "๋น› ํŽธ๊ด‘", "ํ˜•๊ด‘/์ธ๊ด‘",
150
+ "์ž์™ธ์„ /์ ์™ธ์„  ๋ฐœ๊ด‘", "๊ด‘ํ•™์  ์ฐฉ์‹œ", "๋น› ๊ตด์ ˆ", "๊ทธ๋ฆผ์ž ์ƒ์„ฑ/์ œ๊ฑฐ",
151
+ "์ƒ‰์ˆ˜์ฐจ ํšจ๊ณผ", "๋ฌด์ง€๊ฐœ ํšจ๊ณผ", "๊ธ€๋กœ์šฐ ํšจ๊ณผ", "ํ”Œ๋ž˜์‹œ ํšจ๊ณผ", "์กฐ๋ช… ํŒจํ„ด",
152
+ "๋น” ํšจ๊ณผ", "๊ด‘ ํ•„ํ„ฐ ํšจ๊ณผ", "๋น›์˜ ๋ฐฉํ–ฅ์„ฑ ๋ณ€ํ™”", "ํˆฌ์˜ ํšจ๊ณผ", "๋น› ๊ฐ์ง€/๋ฐ˜์‘",
153
+ "๊ด‘๋„ ๋ณ€ํ™”"
154
+ ],
155
+
156
+ "์†Œ๋ฆฌ์™€ ์ง„๋™ ํšจ๊ณผ": [
157
+ "์†Œ๋ฆฌ ๋ฐœ์ƒ/์†Œ๋ฉธ", "์†Œ๋ฆฌ ๋†’๋‚ฎ์ด ๋ณ€ํ™”", "์†Œ๋ฆฌ ํฌ๊ธฐ ๋ณ€ํ™”", "์Œ์ƒ‰ ๋ณ€ํ™”",
158
+ "๊ณต๋ช…/๋ฐ˜๊ณต๋ช…", "์Œํ–ฅ ์ง„๋™", "์ดˆ์ŒํŒŒ/์ €์ŒํŒŒ ๋ฐœ์ƒ", "์Œํ–ฅ ์ง‘์ค‘/๋ถ„์‚ฐ",
159
+ "์Œํ–ฅ ๋ฐ˜์‚ฌ/ํก์ˆ˜", "์Œํ–ฅ ๋„ํ”Œ๋Ÿฌ ํšจ๊ณผ", "์ŒํŒŒ ๊ฐ„์„ญ", "์Œํ–ฅ ๊ณต์ง„",
160
+ "์ง„๋™ ํŒจํ„ด ๋ณ€ํ™”", "ํƒ€์•… ํšจ๊ณผ", "์Œํ–ฅ ํ”ผ๋“œ๋ฐฑ", "์Œํ–ฅ ์ฐจํ/์ฆํญ",
161
+ "์†Œ๋ฆฌ ์ง€ํ–ฅ์„ฑ", "์Œํ–ฅ ์™œ๊ณก", "๋น„ํŠธ ์ƒ์„ฑ", "ํ•˜๋ชจ๋‹‰๏ฟฝ๏ฟฝ๏ฟฝ ์ƒ์„ฑ", "์ฃผํŒŒ์ˆ˜ ๋ณ€์กฐ",
162
+ "์Œํ–ฅ ์ถฉ๊ฒฉํŒŒ", "์Œํ–ฅ ํ•„ํ„ฐ๋ง", "์ŒํŒŒ ์ „ํŒŒ ํŒจํ„ด", "์ง„๋™ ๋Œํ•‘"
163
+ ],
164
+
165
+ "์ƒ๋ฌผํ•™์  ๋ณ€ํ™”": [
166
+ "์ƒ์žฅ/์œ„์ถ•", "์„ธํฌ ๋ถ„์—ด/์‚ฌ๋ฉธ", "์ƒ๋ฌผ ๋ฐœ๊ด‘", "์‹ ์ง„๋Œ€์‚ฌ ๋ณ€ํ™”", "๋ฉด์—ญ ๋ฐ˜์‘",
167
+ "ํ˜ธ๋ฅด๋ชฌ ๋ถ„๋น„", "์‹ ๊ฒฝ ๋ฐ˜์‘", "์œ ์ „์  ๋ฐœํ˜„", "์ ์‘/์ง„ํ™”", "์ƒ์ฒด๋ฆฌ๋“ฌ ๋ณ€ํ™”",
168
+ "์žฌ์ƒ/์น˜์œ ", "๋…ธํ™”/์„ฑ์ˆ™", "์ƒ์ฒด ๋ชจ๋ฐฉ ๋ณ€ํ™”", "๋ฐ”์ด์˜คํ•„๋ฆ„ ํ˜•์„ฑ", "์ƒ๋ฌผํ•™์  ๋ถ„ํ•ด",
169
+ "ํšจ์†Œ ํ™œ์„ฑํ™”/๋น„ํ™œ์„ฑํ™”", "์ƒ๋ฌผํ•™์  ์‹ ํ˜ธ ์ „๋‹ฌ", "์ŠคํŠธ๋ ˆ์Šค ๋ฐ˜์‘", "์ฒด์˜จ ์กฐ์ ˆ",
170
+ "์ƒ๋ฌผํ•™์  ์‹œ๊ณ„ ๋ณ€ํ™”", "์„ธํฌ์™ธ ๊ธฐ์งˆ ๋ณ€ํ™”", "์ƒ์ฒด ์—ญํ•™์  ๋ฐ˜์‘", "์„ธํฌ ์šด๋™์„ฑ",
171
+ "์„ธํฌ ๊ทน์„ฑ ๋ณ€ํ™”", "์˜์–‘ ์ƒํƒœ ๋ณ€ํ™”"
172
+ ],
173
+
174
+ "ํ™˜๊ฒฝ ์ƒํ˜ธ์ž‘์šฉ": [
175
+ "์˜จ๋„ ๋ฐ˜์‘", "์Šต๋„ ๋ฐ˜์‘", "๊ธฐ์•• ๋ฐ˜์‘", "์ค‘๋ ฅ ๋ฐ˜์‘", "์ž๊ธฐ์žฅ ๋ฐ˜์‘",
176
+ "๋น› ๋ฐ˜์‘", "์†Œ๋ฆฌ ๋ฐ˜์‘", "ํ™”ํ•™ ๋ฌผ์งˆ ๊ฐ์ง€", "๊ธฐ๊ณ„์  ์ž๊ทน ๊ฐ์ง€", "์ „๊ธฐ ์ž๊ทน ๋ฐ˜์‘",
177
+ "๋ฐฉ์‚ฌ์„  ๋ฐ˜์‘", "์ง„๋™ ๊ฐ์ง€", "pH ๋ฐ˜์‘", "์šฉ๋งค ๋ฐ˜์‘", "๊ธฐ์ฒด ๊ตํ™˜",
178
+ "ํ™˜๊ฒฝ ์˜ค์—ผ ๋ฐ˜์‘", "๋‚ ์”จ ๋ฐ˜์‘", "๊ณ„์ ˆ ๋ณ€ํ™” ๋ฐ˜์‘", "์ผ์ฃผ๊ธฐ ๋ฐ˜์‘", "์ƒํƒœ๊ณ„ ์ƒํ˜ธ์ž‘์šฉ",
179
+ "๊ณต์ƒ/๊ฒฝ์Ÿ ๋ฐ˜์‘", "ํฌ์‹/ํ”ผ์‹ ๊ด€๊ณ„", "๊ตฐ์ง‘ ํ˜•์„ฑ", "์˜์—ญ ์„ค์ •", "์ด์ฃผ/์ •์ฐฉ ํŒจํ„ด"
180
+ ],
181
+
182
+ "์„ผ์„œ ๊ธฐ๋Šฅ": [
183
+ "์‹œ๊ฐ ์„ผ์„œ/๊ฐ์ง€", "์ฒญ๊ฐ ์„ผ์„œ/๊ฐ์ง€", "์ด‰๊ฐ ์„ผ์„œ/๊ฐ์ง€", "๋ฏธ๊ฐ ์„ผ์„œ/๊ฐ์ง€", "ํ›„๊ฐ ์„ผ์„œ/๊ฐ์ง€",
184
+ "์˜จ๋„ ์„ผ์„œ/๊ฐ์ง€", "์Šต๋„ ์„ผ์„œ/๊ฐ์ง€", "์••๋ ฅ ์„ผ์„œ/๊ฐ์ง€", "๊ฐ€์†๋„ ์„ผ์„œ/๊ฐ์ง€", "ํšŒ์ „ ์„ผ์„œ/๊ฐ์ง€",
185
+ "๊ทผ์ ‘ ์„ผ์„œ/๊ฐ์ง€", "์œ„์น˜ ์„ผ์„œ/๊ฐ์ง€", "์šด๋™ ์„ผ์„œ/๊ฐ์ง€", "๊ฐ€์Šค ์„ผ์„œ/๊ฐ์ง€", "์ ์™ธ์„  ์„ผ์„œ/๊ฐ์ง€",
186
+ "์ž์™ธ์„  ์„ผ์„œ/๊ฐ์ง€", "๋ฐฉ์‚ฌ์„  ์„ผ์„œ/๊ฐ์ง€", "์ž๊ธฐ์žฅ ์„ผ์„œ/๊ฐ์ง€", "์ „๊ธฐ์žฅ ์„ผ์„œ/๊ฐ์ง€", "ํ™”ํ•™๋ฌผ์งˆ ์„ผ์„œ/๊ฐ์ง€",
187
+ "์ƒ์ฒด์‹ ํ˜ธ ์„ผ์„œ/๊ฐ์ง€", "์ง„๋™ ์„ผ์„œ/๊ฐ์ง€", "์†Œ์Œ ์„ผ์„œ/๊ฐ์ง€", "๋น› ์„ธ๊ธฐ ์„ผ์„œ/๊ฐ์ง€", "๋น› ํŒŒ์žฅ ์„ผ์„œ/๊ฐ์ง€",
188
+ "๊ธฐ์šธ๊ธฐ ์„ผ์„œ/๊ฐ์ง€", "pH ์„ผ์„œ/๊ฐ์ง€", "์ „๋ฅ˜ ์„ผ์„œ/๊ฐ์ง€", "์ „์•• ์„ผ์„œ/๊ฐ์ง€", "์ด๋ฏธ์ง€ ์„ผ์„œ/๊ฐ์ง€",
189
+ "๊ฑฐ๋ฆฌ ์„ผ์„œ/๊ฐ์ง€", "๊นŠ์ด ์„ผ์„œ/๊ฐ์ง€", "์ค‘๋ ฅ ์„ผ์„œ/๊ฐ์ง€", "์†๋„ ์„ผ์„œ/๊ฐ์ง€", "ํ๋ฆ„ ์„ผ์„œ/๊ฐ์ง€",
190
+ "์ˆ˜์œ„ ์„ผ์„œ/๊ฐ์ง€", "ํƒ๋„ ์„ผ์„œ/๊ฐ์ง€", "์—ผ๋„ ์„ผ์„œ/๊ฐ์ง€", "๊ธˆ์† ๊ฐ์ง€", "์••์ „ ์„ผ์„œ/๊ฐ์ง€",
191
+ "๊ด‘์ „ ์„ผ์„œ/๊ฐ์ง€", "์—ด์ „๋Œ€ ์„ผ์„œ/๊ฐ์ง€", "ํ™€ ํšจ๊ณผ ์„ผ์„œ/๊ฐ์ง€", "์ดˆ์ŒํŒŒ ์„ผ์„œ/๊ฐ์ง€", "๋ ˆ์ด๋” ์„ผ์„œ/๊ฐ์ง€",
192
+ "๋ผ์ด๋‹ค ์„ผ์„œ/๊ฐ์ง€", "ํ„ฐ์น˜ ์„ผ์„œ/๊ฐ์ง€", "์ œ์Šค์ฒ˜ ์„ผ์„œ/๊ฐ์ง€", "์‹ฌ๋ฐ• ์„ผ์„œ/๊ฐ์ง€", "ํ˜ˆ์•• ์„ผ์„œ/๊ฐ์ง€"
193
+ ]
194
+ }
195
 
196
+ ##############################################################################
197
+ # ์ŠคํŠธ๋ฆฌ๋ฐ์šฉ Gemini API ํ•จ์ˆ˜:
198
+ # - 'Thinking' ๋‹จ๊ณ„(์•„์ด๋””์–ด ๋‚ด๋ถ€ ์ถ”๋ก )์™€ ์ตœ์ข… 'Response' ๋‹จ๊ณ„๋กœ ๊ตฌ์„ฑ
199
+ ##############################################################################
200
+ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
 
 
 
 
 
 
 
 
 
 
201
  """
202
+ Gemini 2.0 Flash with 'Thinking' ๋ถ€๋ถ„๊ณผ 'Response' ๋ถ€๋ถ„์„
203
+ ๋ถ„๋ฆฌํ•˜์—ฌ ์ŠคํŠธ๋ฆฌ๋ฐ(Chunk)์œผ๋กœ ์ œ๊ณตํ•œ๋‹ค.
204
  """
205
+ # chat ์ดˆ๊ธฐํ™” (history ์—†์ด ๋‹จ๋ฐœ์„ฑ ํ˜ธ์ถœ)
206
+ chat = model.start_chat(history=[])
207
+ response = chat.send_message(prompt, stream=True)
208
+
209
+ thought_buffer = ""
210
+ response_buffer = ""
211
+ thinking_complete = False
212
+
213
+ for chunk in response:
214
+ # ๊ฐ chunk์—๋Š” candidates[0].content.parts๊ฐ€ ๋“ค์–ด์žˆ๋‹ค
215
+ parts = chunk.candidates[0].content.parts
216
+
217
+ # ์˜ˆ์‹œ) parts๊ฐ€ 2๊ฐœ์ด๋ฉด (0: Thinking, 1: Response ์‹œ์ž‘)
218
+ # ๊ทธ ์™ธ์—๋Š” 1๊ฐœ์”ฉ ๋Š์–ด์„œ ๋“ค์–ด์˜ฌ ์ˆ˜ ์žˆ์Œ
219
+ if len(parts) == 2 and not thinking_complete:
220
+ # ์•„์ง Thinking ์ค‘์ธ๋ฐ, ์™„์„ฑ๋œ Thinking + Response ์‹œ์ž‘์ด ํ•œ ๋ฒˆ์— ์˜ด
221
+ thought_buffer += parts[0].text
222
+ yield f"[Thinking Chunk] {parts[0].text}"
223
+
224
+ response_buffer = parts[1].text
225
+ yield f"[Response Start] {parts[1].text}"
226
+
227
+ thinking_complete = True
228
+ elif thinking_complete:
229
+ # ์ด๋ฏธ Thinking์€ ๋๋‚จ โ†’ Response๋ฅผ ์ด์–ด์„œ ์ŠคํŠธ๋ฆฌ๋ฐ
 
 
 
 
 
 
 
230
  current_chunk = parts[0].text
231
+ response_buffer += current_chunk
232
+ yield current_chunk
233
+ else:
234
+ # Thinking ์ง„ํ–‰ ์ค‘ (parts๊ฐ€ 1๊ฐœ์”ฉ ์ถ”๊ฐ€๋จ)
235
+ current_chunk = parts[0].text
236
+ thought_buffer += current_chunk
237
+ yield f"[Thinking Chunk] {current_chunk}"
238
 
239
+ # ์ŠคํŠธ๋ฆฌ๋ฐ ์™„๋ฃŒ ํ›„ ์ตœ์ข… ๊ฒฐ๊ณผ ํ•œ๋ฒˆ์— ์ œ๊ณตํ•  ์ˆ˜๋„ ์žˆ์Œ
240
+ yield f"\n[Final Response]\n{response_buffer}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242
+ ##############################################################################
243
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๊ฐ„๋‹จ ์„ค๋ช…์„ 'Thinking' + 'Response'๋กœ ํ™•์žฅ (์ŠคํŠธ๋ฆฌ๋ฐ)
244
+ ##############################################################################
245
+ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
246
+ """
247
+ ๊ธฐ์กด enhance_with_llm๋ฅผ ์ŠคํŠธ๋ฆฌ๋ฐ ํ˜•ํƒœ๋กœ ๋ฐ”๊พผ ํ•จ์ˆ˜:
248
+ 'Thinking' + 'Response' ๋‹จ๊ณ„๋ฅผ chunk๋กœ ์ˆœ์ฐจ ์ „๋‹ฌ
249
+ """
250
+ prompt = f"""
251
+ ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
252
+ "{base_description}"
253
+ ์œ„ ๋‚ด์šฉ์„ ๋ณด๋‹ค ๊ตฌ์ฒดํ™”ํ•˜์—ฌ,
254
+ 1) ์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ์˜ ๋ณ€ํ™”์— ๋Œ€ํ•œ ์ดํ•ด,
255
+ 2) ํ˜์‹  ํฌ์ธํŠธ์™€ ๊ธฐ๋Šฅ์„ฑ ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ
256
+ 3~4๋ฌธ์žฅ์˜ ์•„์ด๋””์–ด๋กœ ํ™•์žฅํ•ด ์ฃผ์„ธ์š”.
257
+ """
258
+ # query_gemini_api_stream()๋กœ๋ถ€ํ„ฐ chunk๋ฅผ ๋ฐ›์•„ ๊ทธ๋Œ€๋กœ yield
259
+ for chunk in query_gemini_api_stream(prompt):
260
+ yield chunk
261
+
262
+ ##############################################################################
263
+ # ํ•œ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ๊ธฐ๋ณธ ์•„์ด๋””์–ด(์นดํ…Œ๊ณ ๋ฆฌ๋ณ„) ์ƒ์„ฑ
264
+ ##############################################################################
265
+ def generate_single_object_transformations(obj):
266
+ results = {}
267
+ for category, transformations in physical_transformation_categories.items():
268
+ transformation = choose_alternative(random.choice(transformations))
269
+ base_description = f"{obj}์ด(๊ฐ€) {transformation} ํ˜„์ƒ์„ ๋ณด์ธ๋‹ค"
270
+ results[category] = {"base": base_description, "enhanced": ""}
271
+ return results
272
+
273
+ ##############################################################################
274
+ # 2๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
275
+ ##############################################################################
276
+ def generate_two_objects_interaction(obj1, obj2):
277
+ results = {}
278
+ for category, transformations in physical_transformation_categories.items():
279
+ transformation = choose_alternative(random.choice(transformations))
280
+ template = random.choice([
281
+ "{obj1}์ด(๊ฐ€) {obj2}๏ฟฝ๏ฟฝ ๊ฒฐํ•ฉํ•˜์—ฌ {change}๊ฐ€ ๋ฐœ์ƒํ–ˆ๋‹ค",
282
+ "{obj1}๊ณผ(์™€) {obj2}์ด(๊ฐ€) ์ถฉ๋Œํ•˜๋ฉด์„œ {change}๊ฐ€ ์ผ์–ด๋‚ฌ๋‹ค"
283
+ ])
284
+ base_description = template.format(obj1=obj1, obj2=obj2, change=transformation)
285
+ results[category] = {"base": base_description, "enhanced": ""}
286
+ return results
287
+
288
+ ##############################################################################
289
+ # 3๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
290
+ ##############################################################################
291
+ def generate_three_objects_interaction(obj1, obj2, obj3):
292
+ results = {}
293
+ for category, transformations in physical_transformation_categories.items():
294
+ transformation = choose_alternative(random.choice(transformations))
295
+ template = random.choice([
296
+ "{obj1}, {obj2}, {obj3}์ด(๊ฐ€) ์‚ผ๊ฐํ˜• ๊ตฌ์กฐ๋กœ ๊ฒฐํ•ฉํ•˜์—ฌ {change}๊ฐ€ ๋ฐœ์ƒํ–ˆ๋‹ค",
297
+ "{obj1}์ด(๊ฐ€) {obj2}์™€(๊ณผ) {obj3} ์‚ฌ์ด์—์„œ ๋งค๊ฐœ์ฒด ์—ญํ• ์„ ํ•˜๋ฉฐ {change}๋ฅผ ์ด‰์ง„ํ–ˆ๋‹ค"
298
+ ])
299
+ base_description = template.format(obj1=obj1, obj2=obj2, obj3=obj3, change=transformation)
300
+ results[category] = {"base": base_description, "enhanced": ""}
301
+ return results
302
+
303
+ ##############################################################################
304
+ # ์‹ค์ œ ๋ณ€ํ™˜ ์ƒ์„ฑ ๋กœ์ง
305
+ ##############################################################################
306
+ def generate_transformations(text1, text2=None, text3=None):
307
+ if text2 and text3:
308
+ results = generate_three_objects_interaction(text1, text2, text3)
309
+ objects = [text1, text2, text3]
310
+ elif text2:
311
+ results = generate_two_objects_interaction(text1, text2)
312
+ objects = [text1, text2]
313
+ else:
314
+ results = generate_single_object_transformations(text1)
315
+ objects = [text1]
316
+ return results, objects
317
+
318
+ ##############################################################################
319
+ # ์ŠคํŠธ๋ฆฌ๋ฐ: ๊ฐ ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ 'Thinking' + 'Response' ๋ถ€๋ถ„์„ ์‹ค์‹œ๊ฐ„ ์ „๋‹ฌ
320
+ ##############################################################################
321
+ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
322
+ """
323
+ Gradio์˜ Chatbot ํ˜•์‹์— ๋งž์ถฐ์„œ,
324
+ [(role='assistant'|'user', content=...), ...] ํ˜•ํƒœ๋กœ yieldํ•œ๋‹ค.
325
+ ์ƒ๊ฐ(Thinking) ๋‹จ๊ณ„์™€ ์ตœ์ข… ์‘๋‹ต์„ ๋ถ„๋ฆฌํ•ด์„œ ์‹ค์‹œ๊ฐ„ ์ „์†ก.
326
+ """
327
+ messages = []
328
 
329
+ # 1) ์ž…๋ ฅ๊ฐ’ ํ™•์ธ
330
+ yield [("assistant", "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘...")]
331
+ time.sleep(0.3)
 
 
 
332
 
333
+ text1 = text1.strip() if text1 else None
334
+ text2 = text2.strip() if text2 else None
335
+ text3 = text3.strip() if text3 else None
336
+ if not text1:
337
+ yield [("assistant", "์˜ค๋ฅ˜: ์ตœ์†Œ ํ•˜๋‚˜์˜ ํ‚ค์›Œ๋“œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.")]
338
+ return
339
 
340
+ # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
341
+ yield [("assistant", "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)")]
342
+ time.sleep(0.3)
343
+ results, objects = generate_transformations(text1, text2, text3)
344
+
345
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
346
+ obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
347
+
348
+ for i, (category, result_dict) in enumerate(results.items(), start=1):
349
+ base_desc = result_dict["base"]
350
+
351
+ # ์นดํ…Œ๊ณ ๋ฆฌ ์•ˆ๋‚ด ์ถœ๋ ฅ
352
+ yield [("assistant", f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\n์ง€๊ธˆ๋ถ€ํ„ฐ Thinking + Response๋ฅผ ๋‹จ๊ณ„์ ์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐํ•ฉ๋‹ˆ๋‹ค...")]
353
+ time.sleep(0.5)
354
+
355
+ # ์ŠคํŠธ๋ฆฌ๋ฐ LLM ํ˜ธ์ถœ
356
+ thinking_text = ""
357
+ response_text = ""
358
+ is_thinking_done = False
359
+
360
+ # enhance_with_llm_stream ํ˜ธ์ถœ
361
+ for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
362
+ if chunk.startswith("[Thinking Chunk]"):
363
+ # ์ƒ๊ฐ ํŒŒํŠธ
364
+ thinking_text += chunk.replace("[Thinking Chunk]", "")
365
+ messages_to_user = f"**[Thinking]**\n{thinking_text}"
366
+ yield [("assistant", messages_to_user)]
367
+ elif chunk.startswith("[Response Start]"):
368
+ # ์‘๋‹ต ์‹œ์ž‘ ์‹œ์ 
369
+ is_thinking_done = True
370
+ # ๋‚จ์•„์žˆ๋Š” ๋ถ€๋ถ„์€ response_text๋กœ
371
+ partial = chunk.replace("[Response Start]", "")
372
+ response_text += partial
373
+ messages_to_user = f"**[Final Response ์‹œ์ž‘]**\n{partial}"
374
+ yield [("assistant", messages_to_user)]
375
+ elif chunk.startswith("[Final Response]"):
376
+ # ์ตœ์ข… ์ข…๋ฃŒ
377
+ final = chunk.replace("[Final Response]", "")
378
+ response_text += f"\n{final}"
379
+ yield [("assistant", f"**[์ตœ์ข… Response]**\n{response_text.strip()}")]
380
+ else:
381
+ # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
382
+ if is_thinking_done:
383
+ response_text += chunk
384
+ yield [("assistant", f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}") ]
385
+ else:
386
+ thinking_text += chunk
387
+ yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
388
 
389
+ # ํ•œ ์นดํ…Œ๊ณ ๋ฆฌ ์‘๋‹ต ์™„๋ฃŒ
390
+ result_dict["enhanced"] = response_text
 
 
 
 
 
 
 
391
 
392
+ # 3) ์ „์ฒด ์นดํ…Œ๊ณ ๋ฆฌ ์™„๋ฃŒ
393
+ yield [("assistant", "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**")]
 
 
394
 
395
 
396
+ ##############################################################################
397
+ # Gradio UI
398
+ ##############################################################################
399
+ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
400
+ theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
401
 
402
+ gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)")
403
+ gr.Markdown("ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ** 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
 
 
404
 
 
405
  chatbot = gr.Chatbot(
406
+ label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์•„์ด๋””์–ด(Thinking + Response) ์ŠคํŠธ๋ฆฌ๋ฐ",
407
+ type="tuple", # (role, content) ์Œ์˜ ๋ฆฌ์ŠคํŠธ๋กœ ์ „๋‹ฌ
408
+ render_markdown=True
 
 
409
  )
410
 
411
+ with gr.Row():
412
+ with gr.Column(scale=1):
413
+ text_input1 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 1 (ํ•„์ˆ˜)", placeholder="์˜ˆ: ์ž๋™์ฐจ")
414
+ text_input2 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 2 (์„ ํƒ)", placeholder="์˜ˆ: ๋กœ๋ด‡")
415
+ text_input3 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 3 (์„ ํƒ)", placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ")
416
+ submit_button = gr.Button("์•„์ด๋””์–ด ์ƒ์„ฑํ•˜๊ธฐ")
 
 
 
 
 
 
 
 
 
 
 
 
417
 
418
+ clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
419
+
420
+ with gr.Column(scale=2):
421
+ # ์ด๋ฏธ chatbot์ด ์ž๋ฆฌ๋ฅผ ์ฐจ์ง€ํ•˜๋ฏ€๋กœ ํŒจ์Šค
422
+ pass
 
423
 
424
+ def clear_chat():
425
+ return []
426
 
427
+ examples = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
 
429
+ ["์ž๋™์ฐจ", "", ""],
430
+ ["์Šค๋งˆํŠธํฐ", "์ธ๊ณต์ง€๋Šฅ", ""],
431
+ ["๋“œ๋ก ", "์ธ๊ณต์ง€๋Šฅ", ""],
432
+ ["์šด๋™ํ™”", "์›จ์–ด๋Ÿฌ๋ธ”", "๊ฑด๊ฐ•"],
433
+ ]
434
+ gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
435
 
436
+ submit_button.click(
437
+ fn=process_inputs_stream,
438
+ inputs=[text_input1, text_input2, text_input3],
439
+ outputs=chatbot,
440
+ stream=True # ์ŠคํŠธ๋ฆฌ๋ฐ ์ถœ๋ ฅ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
  )
442
 
443
+ clear_button.click(
444
+ fn=clear_chat,
445
+ outputs=chatbot
446
+ )
447
 
 
448
  if __name__ == "__main__":
449
+ demo.launch(debug=True)