tonko22 commited on
Commit
05a3ce6
·
1 Parent(s): 1a4ab8e

Initial commit from (move code from) Colab

Browse files
Files changed (5) hide show
  1. .gitignore +3 -0
  2. Gradio_UI.py +295 -0
  3. app.py +62 -54
  4. pyproject.toml +12 -0
  5. requirements.txt +252 -1
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ .venv
3
+ uv.lock
Gradio_UI.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mimetypes
2
+ import os
3
+ import re
4
+ import shutil
5
+ from typing import Optional
6
+
7
+ from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
8
+ from smolagents.agents import ActionStep, MultiStepAgent
9
+ from smolagents.memory import MemoryStep
10
+ from smolagents.utils import _is_package_available
11
+
12
+
13
+ def pull_messages_from_step(
14
+ step_log: MemoryStep,
15
+ ):
16
+ """Extract ChatMessage objects from agent steps with proper nesting"""
17
+ import gradio as gr
18
+
19
+ if isinstance(step_log, ActionStep):
20
+ # Output the step number
21
+ step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
22
+ yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
23
+
24
+ # First yield the thought/reasoning from the LLM
25
+ if hasattr(step_log, "model_output") and step_log.model_output is not None:
26
+ # Clean up the LLM output
27
+ model_output = step_log.model_output.strip()
28
+ # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
29
+ model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
30
+ model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
31
+ model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
32
+ model_output = model_output.strip()
33
+ yield gr.ChatMessage(role="assistant", content=model_output)
34
+
35
+ # For tool calls, create a parent message
36
+ if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
37
+ first_tool_call = step_log.tool_calls[0]
38
+ used_code = first_tool_call.name == "python_interpreter"
39
+ parent_id = f"call_{len(step_log.tool_calls)}"
40
+
41
+ # Tool call becomes the parent message with timing info
42
+ # First we will handle arguments based on type
43
+ args = first_tool_call.arguments
44
+ if isinstance(args, dict):
45
+ content = str(args.get("answer", str(args)))
46
+ else:
47
+ content = str(args).strip()
48
+
49
+ if used_code:
50
+ # Clean up the content by removing any end code tags
51
+ content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
52
+ content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
53
+ content = content.strip()
54
+ if not content.startswith("```python"):
55
+ content = f"```python\n{content}\n```"
56
+
57
+ parent_message_tool = gr.ChatMessage(
58
+ role="assistant",
59
+ content=content,
60
+ metadata={
61
+ "title": f"🛠️ Used tool {first_tool_call.name}",
62
+ "id": parent_id,
63
+ "status": "pending",
64
+ },
65
+ )
66
+ yield parent_message_tool
67
+
68
+ # Nesting execution logs under the tool call if they exist
69
+ if hasattr(step_log, "observations") and (
70
+ step_log.observations is not None and step_log.observations.strip()
71
+ ): # Only yield execution logs if there's actual content
72
+ log_content = step_log.observations.strip()
73
+ if log_content:
74
+ log_content = re.sub(r"^Execution logs:\s*", "", log_content)
75
+ yield gr.ChatMessage(
76
+ role="assistant",
77
+ content=f"{log_content}",
78
+ metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
79
+ )
80
+
81
+ # Nesting any errors under the tool call
82
+ if hasattr(step_log, "error") and step_log.error is not None:
83
+ yield gr.ChatMessage(
84
+ role="assistant",
85
+ content=str(step_log.error),
86
+ metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
87
+ )
88
+
89
+ # Update parent message metadata to done status without yielding a new message
90
+ parent_message_tool.metadata["status"] = "done"
91
+
92
+ # Handle standalone errors but not from tool calls
93
+ elif hasattr(step_log, "error") and step_log.error is not None:
94
+ yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
95
+
96
+ # Calculate duration and token information
97
+ step_footnote = f"{step_number}"
98
+ if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
99
+ token_str = (
100
+ f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
101
+ )
102
+ step_footnote += token_str
103
+ if hasattr(step_log, "duration"):
104
+ step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
105
+ step_footnote += step_duration
106
+ step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
107
+ yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
108
+ yield gr.ChatMessage(role="assistant", content="-----")
109
+
110
+
111
+ def stream_to_gradio(
112
+ agent,
113
+ task: str,
114
+ reset_agent_memory: bool = False,
115
+ additional_args: Optional[dict] = None,
116
+ ):
117
+ """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
118
+ if not _is_package_available("gradio"):
119
+ raise ModuleNotFoundError(
120
+ "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
121
+ )
122
+ import gradio as gr
123
+
124
+ total_input_tokens = 0
125
+ total_output_tokens = 0
126
+
127
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
128
+ # Track tokens if model provides them
129
+ if hasattr(agent.model, "last_input_token_count"):
130
+ total_input_tokens += agent.model.last_input_token_count
131
+ total_output_tokens += agent.model.last_output_token_count
132
+ if isinstance(step_log, ActionStep):
133
+ step_log.input_token_count = agent.model.last_input_token_count
134
+ step_log.output_token_count = agent.model.last_output_token_count
135
+
136
+ for message in pull_messages_from_step(
137
+ step_log,
138
+ ):
139
+ yield message
140
+
141
+ final_answer = step_log # Last log is the run's final_answer
142
+ final_answer = handle_agent_output_types(final_answer)
143
+
144
+ if isinstance(final_answer, AgentText):
145
+ yield gr.ChatMessage(
146
+ role="assistant",
147
+ content=f"**Final answer:**\n{final_answer.to_string()}\n",
148
+ )
149
+ elif isinstance(final_answer, AgentImage):
150
+ yield gr.ChatMessage(
151
+ role="assistant",
152
+ content={"path": final_answer.to_string(), "mime_type": "image/png"},
153
+ )
154
+ elif isinstance(final_answer, AgentAudio):
155
+ yield gr.ChatMessage(
156
+ role="assistant",
157
+ content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
158
+ )
159
+ else:
160
+ yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
161
+
162
+
163
+ class GradioUI:
164
+ """A one-line interface to launch your agent in Gradio"""
165
+
166
+ def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
167
+ if not _is_package_available("gradio"):
168
+ raise ModuleNotFoundError(
169
+ "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
170
+ )
171
+ self.agent = agent
172
+ self.file_upload_folder = file_upload_folder
173
+ if self.file_upload_folder is not None:
174
+ if not os.path.exists(file_upload_folder):
175
+ os.mkdir(file_upload_folder)
176
+
177
+ def interact_with_agent(self, prompt, messages):
178
+ import gradio as gr
179
+
180
+ messages.append(gr.ChatMessage(role="user", content=prompt))
181
+ yield messages
182
+ for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
183
+ messages.append(msg)
184
+ yield messages
185
+ yield messages
186
+
187
+ def upload_file(
188
+ self,
189
+ file,
190
+ file_uploads_log,
191
+ allowed_file_types=[
192
+ "application/pdf",
193
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
194
+ "text/plain",
195
+ ],
196
+ ):
197
+ """
198
+ Handle file uploads, default allowed types are .pdf, .docx, and .txt
199
+ """
200
+ import gradio as gr
201
+
202
+ if file is None:
203
+ return gr.Textbox("No file uploaded", visible=True), file_uploads_log
204
+
205
+ try:
206
+ mime_type, _ = mimetypes.guess_type(file.name)
207
+ except Exception as e:
208
+ return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
209
+
210
+ if mime_type not in allowed_file_types:
211
+ return gr.Textbox("File type disallowed", visible=True), file_uploads_log
212
+
213
+ # Sanitize file name
214
+ original_name = os.path.basename(file.name)
215
+ sanitized_name = re.sub(
216
+ r"[^\w\-.]", "_", original_name
217
+ ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
218
+
219
+ type_to_ext = {}
220
+ for ext, t in mimetypes.types_map.items():
221
+ if t not in type_to_ext:
222
+ type_to_ext[t] = ext
223
+
224
+ # Ensure the extension correlates to the mime type
225
+ sanitized_name = sanitized_name.split(".")[:-1]
226
+ sanitized_name.append("" + type_to_ext[mime_type])
227
+ sanitized_name = "".join(sanitized_name)
228
+
229
+ # Save the uploaded file to the specified folder
230
+ file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
231
+ shutil.copy(file.name, file_path)
232
+
233
+ return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
234
+
235
+ def log_user_message(self, text_input, file_uploads_log):
236
+ return (
237
+ text_input
238
+ + (
239
+ f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
240
+ if len(file_uploads_log) > 0
241
+ else ""
242
+ ),
243
+ "",
244
+ )
245
+
246
+ def launch(self, **kwargs):
247
+ import gradio as gr
248
+
249
+ # Define instruction text
250
+ instructions = """
251
+ # 🎵 Song Meaning Bot 🎶
252
+ ### How to Use:
253
+ 1️⃣ **Paste a YouTube song link** in the input field.
254
+ 2️⃣ The AI will **extract the song title & artist** from the video.
255
+ 3️⃣ It will **search for lyrics**.
256
+ 4️⃣ If lyrics are found, they will be **translated to English**.
257
+ 5️⃣ AI will **summarize the song's meaning** in simple terms.
258
+
259
+ 📌 *Note:* The agent currently only takes links in the format https://www.youtube.com/watch?v=XXXXXXXXXX.
260
+ """
261
+
262
+ with gr.Blocks(fill_height=True) as demo:
263
+ gr.Markdown(instructions)
264
+ stored_messages = gr.State([])
265
+ file_uploads_log = gr.State([])
266
+ chatbot = gr.Chatbot(
267
+ label="Agent",
268
+ type="messages",
269
+ avatar_images=(
270
+ None,
271
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
272
+ ),
273
+ resizeable=True,
274
+ scale=1,
275
+ )
276
+ # If an upload folder is provided, enable the upload feature
277
+ if self.file_upload_folder is not None:
278
+ upload_file = gr.File(label="Upload a file")
279
+ upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
280
+ upload_file.change(
281
+ self.upload_file,
282
+ [upload_file, file_uploads_log],
283
+ [upload_status, file_uploads_log],
284
+ )
285
+ text_input = gr.Textbox(lines=1, label="Chat Message")
286
+ text_input.submit(
287
+ self.log_user_message,
288
+ [text_input, file_uploads_log],
289
+ [stored_messages, text_input],
290
+ ).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
291
+
292
+ demo.launch(debug=True, share=True, **kwargs)
293
+
294
+
295
+ __all__ = ["stream_to_gradio", "GradioUI"]
app.py CHANGED
@@ -1,64 +1,72 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import os
 
2
 
3
+ from Gradio_UI import GradioUI
4
+ from litellm import completion
5
+ from smolagents import (
6
+ CodeAgent,
7
+ DuckDuckGoSearchTool,
8
+ FinalAnswerTool,
9
+ LiteLLMModel,
10
+ VisitWebpageTool,
11
+ tool,
12
+ )
 
 
 
 
 
13
 
14
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
 
 
 
 
15
 
16
+ @tool
17
+ def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
18
+ """
19
+ Performs a deep analysis of the musical track, given its metadata.
20
+
21
+ Args:
22
+ song_title: title of the song or music trach.
23
+ artist: The name of the artist.
24
+ lyrics: The lyrics of the song.
25
+
26
+ Returns:
27
+ A summary of the song's meaning in English.
28
+ """
29
 
30
+ prompt = f'''You are an expert in songs and their meanings.
31
+ Summarize the meaning of {song_title} by {artist} and identify
32
+ key themes based on the lyrics:
33
+ {lyrics}.
34
 
35
+ Includs deep idea and vibes analysis with explainations
36
+ based on references to the exact lines
37
+ '''
 
 
 
 
 
38
 
39
+ response = completion(
40
+ model="gemini/gemini-2.0-flash",
41
+ messages=[
42
+ {"role": "user", "content": prompt}
43
+ ])
44
+
45
+ try:
46
+ lyrics = response.choices[0].message.content.strip()
47
+ return lyrics
48
+ except (AttributeError, KeyError, IndexError):
49
+ try:
50
+ lyrics = response['choices'][0]['message']['content'].strip()
51
+ return lyrics
52
+ except (AttributeError, KeyError, IndexError):
53
+ pass
54
 
55
+ final_answer = FinalAnswerTool()
56
+ model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
57
 
58
+ # Example usage within the agent
59
+ agent = CodeAgent(
60
+ tools=[
61
+ FinalAnswerTool(),
62
+ DuckDuckGoSearchTool(),
63
+ VisitWebpageTool(),
64
+ analyze_lyrics_tool
65
+ ],
66
+ model=model,
67
+ additional_authorized_imports=['numpy', 'bs4'],
68
+ max_steps=22,
69
+ verbosity_level=1
 
 
 
 
 
70
  )
71
 
72
+ GradioUI(agent).launch()
 
 
pyproject.toml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "lyricsanalyzeragent"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "gradio>=5.20.0",
9
+ "huggingface-hub>=0.29.1",
10
+ "litellm>=1.61.20",
11
+ "smolagents>=1.9.2",
12
+ ]
requirements.txt CHANGED
@@ -1 +1,252 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ aiofiles==23.2.1
4
+ # via gradio
5
+ aiohappyeyeballs==2.4.6
6
+ # via aiohttp
7
+ aiohttp==3.11.13
8
+ # via litellm
9
+ aiosignal==1.3.2
10
+ # via aiohttp
11
+ annotated-types==0.7.0
12
+ # via pydantic
13
+ anyio==4.8.0
14
+ # via
15
+ # gradio
16
+ # httpx
17
+ # openai
18
+ # starlette
19
+ attrs==25.1.0
20
+ # via
21
+ # aiohttp
22
+ # jsonschema
23
+ # referencing
24
+ audioop-lts==0.2.1
25
+ # via gradio
26
+ beautifulsoup4==4.13.3
27
+ # via markdownify
28
+ certifi==2025.1.31
29
+ # via
30
+ # httpcore
31
+ # httpx
32
+ # requests
33
+ charset-normalizer==3.4.1
34
+ # via requests
35
+ click==8.1.8
36
+ # via
37
+ # duckduckgo-search
38
+ # litellm
39
+ # typer
40
+ # uvicorn
41
+ distro==1.9.0
42
+ # via openai
43
+ duckduckgo-search==7.5.0
44
+ # via smolagents
45
+ fastapi==0.115.11
46
+ # via gradio
47
+ ffmpy==0.5.0
48
+ # via gradio
49
+ filelock==3.17.0
50
+ # via huggingface-hub
51
+ frozenlist==1.5.0
52
+ # via
53
+ # aiohttp
54
+ # aiosignal
55
+ fsspec==2025.2.0
56
+ # via
57
+ # gradio-client
58
+ # huggingface-hub
59
+ gradio==5.20.0
60
+ # via lyricsanalyzeragent (pyproject.toml)
61
+ gradio-client==1.7.2
62
+ # via gradio
63
+ groovy==0.1.2
64
+ # via gradio
65
+ h11==0.14.0
66
+ # via
67
+ # httpcore
68
+ # uvicorn
69
+ httpcore==1.0.7
70
+ # via httpx
71
+ httpx==0.28.1
72
+ # via
73
+ # gradio
74
+ # gradio-client
75
+ # litellm
76
+ # openai
77
+ # safehttpx
78
+ huggingface-hub==0.29.1
79
+ # via
80
+ # lyricsanalyzeragent (pyproject.toml)
81
+ # gradio
82
+ # gradio-client
83
+ # smolagents
84
+ # tokenizers
85
+ idna==3.10
86
+ # via
87
+ # anyio
88
+ # httpx
89
+ # requests
90
+ # yarl
91
+ importlib-metadata==8.6.1
92
+ # via litellm
93
+ jinja2==3.1.5
94
+ # via
95
+ # gradio
96
+ # litellm
97
+ # smolagents
98
+ jiter==0.8.2
99
+ # via openai
100
+ jsonschema==4.23.0
101
+ # via litellm
102
+ jsonschema-specifications==2024.10.1
103
+ # via jsonschema
104
+ litellm==1.61.20
105
+ # via lyricsanalyzeragent (pyproject.toml)
106
+ lxml==5.3.1
107
+ # via duckduckgo-search
108
+ markdown-it-py==3.0.0
109
+ # via rich
110
+ markdownify==1.0.0
111
+ # via smolagents
112
+ markupsafe==2.1.5
113
+ # via
114
+ # gradio
115
+ # jinja2
116
+ mdurl==0.1.2
117
+ # via markdown-it-py
118
+ multidict==6.1.0
119
+ # via
120
+ # aiohttp
121
+ # yarl
122
+ numpy==2.2.3
123
+ # via
124
+ # gradio
125
+ # pandas
126
+ openai==1.65.2
127
+ # via litellm
128
+ orjson==3.10.15
129
+ # via gradio
130
+ packaging==24.2
131
+ # via
132
+ # gradio
133
+ # gradio-client
134
+ # huggingface-hub
135
+ pandas==2.2.3
136
+ # via
137
+ # gradio
138
+ # smolagents
139
+ pillow==11.1.0
140
+ # via
141
+ # gradio
142
+ # smolagents
143
+ primp==0.14.0
144
+ # via duckduckgo-search
145
+ propcache==0.3.0
146
+ # via
147
+ # aiohttp
148
+ # yarl
149
+ pydantic==2.10.6
150
+ # via
151
+ # fastapi
152
+ # gradio
153
+ # litellm
154
+ # openai
155
+ pydantic-core==2.27.2
156
+ # via pydantic
157
+ pydub==0.25.1
158
+ # via gradio
159
+ pygments==2.19.1
160
+ # via rich
161
+ python-dateutil==2.9.0.post0
162
+ # via pandas
163
+ python-dotenv==1.0.1
164
+ # via
165
+ # litellm
166
+ # smolagents
167
+ python-multipart==0.0.20
168
+ # via gradio
169
+ pytz==2025.1
170
+ # via pandas
171
+ pyyaml==6.0.2
172
+ # via
173
+ # gradio
174
+ # huggingface-hub
175
+ referencing==0.36.2
176
+ # via
177
+ # jsonschema
178
+ # jsonschema-specifications
179
+ regex==2024.11.6
180
+ # via tiktoken
181
+ requests==2.32.3
182
+ # via
183
+ # huggingface-hub
184
+ # smolagents
185
+ # tiktoken
186
+ rich==13.9.4
187
+ # via
188
+ # smolagents
189
+ # typer
190
+ rpds-py==0.23.1
191
+ # via
192
+ # jsonschema
193
+ # referencing
194
+ ruff==0.9.9
195
+ # via gradio
196
+ safehttpx==0.1.6
197
+ # via gradio
198
+ semantic-version==2.10.0
199
+ # via gradio
200
+ shellingham==1.5.4
201
+ # via typer
202
+ six==1.17.0
203
+ # via
204
+ # markdownify
205
+ # python-dateutil
206
+ smolagents==1.9.2
207
+ # via lyricsanalyzeragent (pyproject.toml)
208
+ sniffio==1.3.1
209
+ # via
210
+ # anyio
211
+ # openai
212
+ soupsieve==2.6
213
+ # via beautifulsoup4
214
+ starlette==0.46.0
215
+ # via
216
+ # fastapi
217
+ # gradio
218
+ tiktoken==0.9.0
219
+ # via litellm
220
+ tokenizers==0.21.0
221
+ # via litellm
222
+ tomlkit==0.13.2
223
+ # via gradio
224
+ tqdm==4.67.1
225
+ # via
226
+ # huggingface-hub
227
+ # openai
228
+ typer==0.15.2
229
+ # via gradio
230
+ typing-extensions==4.12.2
231
+ # via
232
+ # beautifulsoup4
233
+ # fastapi
234
+ # gradio
235
+ # gradio-client
236
+ # huggingface-hub
237
+ # openai
238
+ # pydantic
239
+ # pydantic-core
240
+ # typer
241
+ tzdata==2025.1
242
+ # via pandas
243
+ urllib3==2.3.0
244
+ # via requests
245
+ uvicorn==0.34.0
246
+ # via gradio
247
+ websockets==15.0
248
+ # via gradio-client
249
+ yarl==1.18.3
250
+ # via aiohttp
251
+ zipp==3.21.0
252
+ # via importlib-metadata