acecalisto3 commited on
Commit
57d9223
·
verified ·
1 Parent(s): 913ac3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +187 -197
app.py CHANGED
@@ -2,29 +2,188 @@ import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import pickle
5
- from langchain.chains import ConversationChain
6
- from langchain.memory import ConversationBufferMemory
7
- from langchain.tools import Tool
8
- from langchain.agents import AgentType
9
- from langchain.chains import LLMChain
10
- from langchain.prompts import PromptTemplate
11
- from langchain.chains.question_answering import load_qa_chain
12
- from langchain.document_loaders import TextLoader
13
- from langchain.text_splitter import CharacterTextSplitter
14
- from langchain.embeddings import HuggingFaceEmbeddings # Use Hugging Face Embeddings
15
- from langchain.vectorstores import FAISS
16
- from langchain.chains import RetrievalQA
17
- from langchain.chains.conversational_retrieval_qa import ConversationalRetrievalQAChain
18
- from langchain.chains.summarization import load_summarization_chain
19
- from langchain.chains.base import Chain
20
- from langchain.chains.llm import LLMChain
21
- from langchain.prompts import PromptTemplate
22
- from langchain.agents import initialize_agent, AgentType
23
- from langchain.tools import Tool
24
  from langchain_community.llms import HuggingFaceHub
25
  from typing import List, Dict, Any, Optional
26
 
27
- st.title("CODEFUSSION ☄")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # --- Agent Definitions ---
30
  class Agent:
@@ -48,13 +207,11 @@ class Agent:
48
  return action
49
 
50
  def observe(self, observation):
51
- # Placeholder for observation processing
52
- # This should be implemented based on the agent's capabilities and the nature of the observation
53
- pass
54
 
55
  def learn(self, data):
56
- # Placeholder for learning logic
57
- # This should be implemented based on the agent's capabilities and the type of data
58
  pass
59
 
60
  def __str__(self):
@@ -67,9 +224,7 @@ class Tool:
67
  self.description = description
68
 
69
  def run(self, arguments):
70
- # Placeholder for tool execution logic
71
- # This should be implemented based on the specific tool's functionality
72
- # and the provided arguments
73
  return {"output": "Tool Output"}
74
 
75
  # --- Tool Examples ---
@@ -242,7 +397,7 @@ class ImageEditingTool(Tool):
242
  image_url = arguments.get("image_url", "https://example.com/image.jpg")
243
  editing_instructions = arguments.get("editing_instructions", "Make the cat smile")
244
  edited_image_url = self.chain.run(image_url=image_url, editing_instructions=editing_instructions)
245
- return {"output": f"Edited image: {edited_image_url}"}
246
 
247
  class ImageAnalysisTool(Tool):
248
  def __init__(self):
@@ -274,8 +429,8 @@ class QuestionAnsweringTool(Tool):
274
  # --- Agent Pool ---
275
  agent_pool = {
276
  "IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), TextGenerationTool(), QuestionAnsweringTool()], knowledge_base=""),
277
- "CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool(), CodeExecutionTool(), CodeSummarizationTool, CodeTranslationTool, CodeDocumentationTool], knowledge_base=""),
278
- "ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool(), ImageAnalysisTool], knowledge_base=""),
279
  }
280
 
281
  # --- Workflow Definitions ---
@@ -318,170 +473,6 @@ class DevSandboxWorkflow(Workflow):
318
  def __init__(self):
319
  super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
320
 
321
- # --- Model Definitions ---
322
- class Model:
323
- def __init__(self, name, description, model_link):
324
- self.name = name
325
- self.description = description
326
- self.model_link = model_link
327
- self.inference_client = InferenceClient(model=model_link)
328
-
329
- def generate_text(self, prompt, temperature=0.5, max_new_tokens=4096):
330
- try:
331
- output = self.inference_client.text_generation(
332
- prompt,
333
- temperature=temperature,
334
- max_new_tokens=max_new_tokens,
335
- stream=True
336
- )
337
- response = "".join(output)
338
- except ValueError as e:
339
- if "Input validation error" in str(e):
340
- return "Error: The input prompt is too long. Please try a shorter prompt."
341
- else:
342
- return f"An error occurred: {e}"
343
- return response
344
-
345
- # --- Model Examples ---
346
- class LegacyLiftModel(Model):
347
- def __init__(self):
348
- super().__init__("LegacyLift🚀", "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.", "mistralai/Mistral-7B-Instruct-v0.2")
349
-
350
- class ModernMigrateModel(Model):
351
- def __init__(self):
352
- super().__init__("ModernMigrate⭐", "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
353
-
354
- class RetroRecodeModel(Model):
355
- def __init__(self):
356
- super().__init__("RetroRecode🔄", "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.", "microsoft/Phi-3-mini-4k-instruct")
357
-
358
- # --- Streamlit Interface ---
359
- model_links = {
360
- "LegacyLift🚀": "mistralai/Mistral-7B-Instruct-v0.2",
361
- "ModernMigrate⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
362
- "RetroRecode🔄": "microsoft/Phi-3-mini-4k-instruct"
363
- }
364
-
365
- model_info = {
366
- "LegacyLift🚀": {
367
- 'description': "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.",
368
- 'logo': './11.jpg'
369
- },
370
- "ModernMigrate⭐": {
371
- 'description': "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.",
372
- 'logo': './2.jpg'
373
- },
374
- "RetroRecode🔄": {
375
- 'description': "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.",
376
- 'logo': './3.jpg'
377
- },
378
- }
379
-
380
- def format_prompt(message, conversation_history, custom_instructions=None):
381
- prompt = ""
382
- if custom_instructions:
383
- prompt += f"\[INST\] {custom_instructions} $$/INST$$\n"
384
-
385
- # Add conversation history to the prompt
386
- prompt += "\[CONV_HISTORY\]\n"
387
- for role, content in conversation_history:
388
- prompt += f"{role.upper()}: {content}\n"
389
- prompt += "\[/CONV_HISTORY\]\n"
390
-
391
- # Add the current message
392
- prompt += f"\[INST\] {message} $$/INST$$\n"
393
-
394
- # Add the response format
395
- prompt += "\[RESPONSE\]\n"
396
-
397
- return prompt
398
-
399
- def reset_conversation():
400
- '''
401
- Resets Conversation
402
- '''
403
- st.session_state.conversation = []
404
- st.session_state.messages = []
405
- st.session_state.chat_state = "reset"
406
-
407
- def load_conversation_history():
408
- history_file = "conversation_history.pickle"
409
- if os.path.exists(history_file):
410
- with open(history_file, "rb") as f:
411
- conversation_history = pickle.load(f)
412
- else:
413
- conversation_history = []
414
- return conversation_history
415
-
416
- def save_conversation_history(conversation_history):
417
- history_file = "conversation_history.pickle"
418
- with open(history_file, "wb") as f:
419
- pickle.dump(conversation_history, f)
420
-
421
- models = [key for key in model_links.keys()]
422
- selected_model = st.sidebar.selectbox("Select Model", models)
423
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
424
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
425
-
426
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
427
- st.sidebar.markdown(model_info[selected_model]['description'])
428
- st.sidebar.image(model_info[selected_model]['logo'])
429
-
430
- st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
431
-
432
- if "prev_option" not in st.session_state:
433
- st.session_state.prev_option = selected_model
434
-
435
- if st.session_state.prev_option != selected_model:
436
- st.session_state.messages = []
437
- st.session_state.prev_option = selected_model
438
-
439
- if "chat_state" not in st.session_state:
440
- st.session_state.chat_state = "normal"
441
-
442
- # Load the conversation history from the file
443
- if "messages" not in st.session_state:
444
- st.session_state.messages = load_conversation_history()
445
-
446
- repo_id = model_links[selected_model]
447
- st.subheader(f'{selected_model}')
448
-
449
- if st.session_state.chat_state == "normal":
450
- for message in st.session_state.messages:
451
- with st.chat_message(message["role"]):
452
- st.markdown(message["content"])
453
-
454
- if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
455
- custom_instruction = "Act like a Human in conversation"
456
- with st.chat_message("user"):
457
- st.markdown(prompt)
458
-
459
- st.session_state.messages.append({"role": "user", "content": prompt})
460
- conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
461
-
462
- formated_text = format_prompt(prompt, conversation_history, custom_instruction)
463
-
464
- with st.chat_message("assistant"):
465
- # Select the appropriate model based on the user's choice
466
- if selected_model == "LegacyLift🚀":
467
- model = LegacyLiftModel()
468
- elif selected_model == "ModernMigrate⭐":
469
- model = ModernMigrateModel()
470
- elif selected_model == "RetroRecode🔄":
471
- model = RetroRecodeModel()
472
- else:
473
- st.error("Invalid model selection.")
474
- st.stop() # Stop the Streamlit app execution
475
-
476
- response = model.generate_text(formated_text, temperature=temp_values)
477
- st.markdown(response)
478
- st.session_state.messages.append({"role": "assistant", "content": response})
479
- save_conversation_history(st.session_state.messages)
480
-
481
- elif st.session_state.chat_state == "reset":
482
- st.session_state.chat_state = "normal"
483
- st.experimental_rerun()
484
-
485
  # --- Agent-Based Workflow Execution ---
486
  def execute_workflow(workflow, prompt, context):
487
  # Execute the workflow
@@ -524,7 +515,6 @@ if st.button("Dev Sandbox"):
524
  context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
525
  st.write(f"Workflow Output: {context}")
526
 
527
-
528
  # --- Displaying Agent and Tool Information ---
529
  st.subheader("Agent Pool")
530
  for agent_name, agent in agent_pool.items():
@@ -587,4 +577,4 @@ image_analysis_tool = ImageAnalysisTool()
587
  st.write(f"""Image Analysis Tool Output: {image_analysis_tool.run({'image_url': 'https://example.com/image.jpg'})}""")
588
 
589
  question_answering_tool = QuestionAnsweringTool()
590
- st.write(f"""Question Answering Tool Output: {question_answering_tool.run({'question': 'What is the capital of France?', 'context': 'France is a country in Western Europe. Its capital is Paris.'})}""")
 
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import pickle
5
+ from langchain_community.chains import ConversationChain
6
+ from langchain_community.memory import ConversationBufferMemory
7
+ from langchain_community.tools import Tool
8
+ from langchain_community.agents import initialize_agent, AgentType
9
+ from langchain_community.chains import LLMChain
10
+ from langchain_community.prompts import PromptTemplate
11
+ from langchain_community.chains.question_answering import load_qa_chain
12
+ from langchain_community.document_loaders import TextLoader
13
+ from langchain_community.text_splitter import CharacterTextSplitter
14
+ from langchain_community.embeddings import HuggingFaceEmbeddings # Use Hugging Face Embeddings
15
+ from langchain_community.vectorstores import FAISS
16
+ from langchain_community.chains import RetrievalQA
17
+ from langchain_community.chains.conversational_retrieval_qa import ConversationalRetrievalQAChain
18
+ from langchain_community.chains.summarization import load_summarization_chain
 
 
 
 
 
19
  from langchain_community.llms import HuggingFaceHub
20
  from typing import List, Dict, Any, Optional
21
 
22
+ st.title("Triagi - Dev-Centric Agent Clusters ☄")
23
+
24
+ # --- Model Definitions ---
25
+ class Model:
26
+ def __init__(self, name, description, model_link):
27
+ self.name = name
28
+ self.description = description
29
+ self.model_link = model_link
30
+ self.inference_client = InferenceClient(model=model_link)
31
+
32
+ def generate_text(self, prompt, temperature=0.5, max_new_tokens=4096):
33
+ try:
34
+ output = self.inference_client.text_generation(
35
+ prompt,
36
+ temperature=temperature,
37
+ max_new_tokens=max_new_tokens,
38
+ stream=True
39
+ )
40
+ response = "".join(output)
41
+ except ValueError as e:
42
+ if "Input validation error" in str(e):
43
+ return "Error: The input prompt is too long. Please try a shorter prompt."
44
+ else:
45
+ return f"An error occurred: {e}"
46
+ return response
47
+
48
+ # --- Model Examples ---
49
+ class FrontendForgeModel(Model):
50
+ def __init__(self):
51
+ super().__init__("FrontendForge🚀", "The FrontendForge model is a Large Language Model (LLM) that's able to handle frontend development tasks such as UI design and user interaction logic.", "mistralai/Mistral-7B-Instruct-v0.2")
52
+
53
+ class BackendBuilderModel(Model):
54
+ def __init__(self):
55
+ super().__init__("BackendBuilder⭐", "The BackendBuilder model is a Large Language Model (LLM) that's specialized in backend development tasks including API creation, database management, and server-side logic.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
56
+
57
+ class IntegratorModel(Model):
58
+ def __init__(self):
59
+ super().__init__("Integrator🔄", "The Integrator model is a Large Language Model (LLM) that's best suited for integrating frontend and backend components, handling business logic, and ensuring seamless communication between different parts of the application.", "microsoft/Phi-3-mini-4k-instruct")
60
+
61
+ # --- Streamlit Interface ---
62
+ model_links = {
63
+ "FrontendForge🚀": "mistralai/Mistral-7B-Instruct-v0.2",
64
+ "BackendBuilder⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
65
+ "Integrator🔄": "microsoft/Phi-3-mini-4k-instruct"
66
+ }
67
+
68
+ model_info = {
69
+ "FrontendForge🚀": {
70
+ 'description': "The FrontendForge model is a Large Language Model (LLM) that's able to handle frontend development tasks such as UI design and user interaction logic.",
71
+ 'logo': './11.jpg'
72
+ },
73
+ "BackendBuilder⭐": {
74
+ 'description': "The BackendBuilder model is a Large Language Model (LLM) that's specialized in backend development tasks including API creation, database management, and server-side logic.",
75
+ 'logo': './2.jpg'
76
+ },
77
+ "Integrator🔄": {
78
+ 'description': "The Integrator model is a Large Language Model (LLM) that's best suited for integrating frontend and backend components, handling business logic, and ensuring seamless communication between different parts of the application.",
79
+ 'logo': './3.jpg'
80
+ },
81
+ }
82
+
83
+ def format_prompt(message, conversation_history, custom_instructions=None):
84
+ prompt = ""
85
+ if custom_instructions:
86
+ prompt += "[INST] {} [/INST]\n".format(custom_instructions)
87
+
88
+ # Add conversation history to the prompt
89
+ prompt += "[CONV_HISTORY]\n"
90
+ for role, content in conversation_history:
91
+ prompt += "{}: {}\n".format(role.upper(), content)
92
+ prompt += "[/CONV_HISTORY]\n"
93
+
94
+ # Add the current message
95
+ prompt += "[INST] {} [/INST]\n".format(message)
96
+
97
+ # Add the response format
98
+ prompt += "[RESPONSE]\n"
99
+
100
+ return prompt
101
+
102
+ def reset_conversation():
103
+ '''
104
+ Resets Conversation
105
+ '''
106
+ st.session_state.conversation = []
107
+ st.session_state.messages = []
108
+ st.session_state.chat_state = "reset"
109
+
110
+ def load_conversation_history():
111
+ history_file = "conversation_history.pickle"
112
+ if os.path.exists(history_file):
113
+ with open(history_file, "rb") as f:
114
+ conversation_history = pickle.load(f)
115
+ else:
116
+ conversation_history = []
117
+ return conversation_history
118
+
119
+ def save_conversation_history(conversation_history):
120
+ history_file = "conversation_history.pickle"
121
+ with open(history_file, "wb") as f:
122
+ pickle.dump(conversation_history, f)
123
+
124
+ models = [key for key in model_links.keys()]
125
+ selected_model = st.sidebar.selectbox("Select Model", models)
126
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
127
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
128
+
129
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
130
+ st.sidebar.markdown(model_info[selected_model]['description'])
131
+ st.sidebar.image(model_info[selected_model]['logo'])
132
+
133
+ st.sidebar.markdown("*Generating the code might go slow if you are using low power resources*")
134
+
135
+ if "prev_option" not in st.session_state:
136
+ st.session_state.prev_option = selected_model
137
+
138
+ if st.session_state.prev_option != selected_model:
139
+ st.session_state.messages = []
140
+ st.session_state.prev_option = selected_model
141
+
142
+ if "chat_state" not in st.session_state:
143
+ st.session_state.chat_state = "normal"
144
+
145
+ # Load the conversation history from the file
146
+ if "messages" not in st.session_state:
147
+ st.session_state.messages = load_conversation_history()
148
+
149
+ repo_id = model_links[selected_model]
150
+ st.subheader(f'{selected_model}')
151
+
152
+ if st.session_state.chat_state == "normal":
153
+ for message in st.session_state.messages:
154
+ with st.chat_message(message["role"]):
155
+ st.markdown(message["content"])
156
+
157
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
158
+ custom_instruction = "Act like a Human in conversation"
159
+ with st.chat_message("user"):
160
+ st.markdown(prompt)
161
+
162
+ st.session_state.messages.append({"role": "user", "content": prompt})
163
+ conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
164
+
165
+ formated_text = format_prompt(prompt, conversation_history, custom_instruction)
166
+
167
+ with st.chat_message("assistant"):
168
+ # Select the appropriate model based on the user's choice
169
+ if selected_model == "FrontendForge🚀":
170
+ model = FrontendForgeModel()
171
+ elif selected_model == "BackendBuilder⭐":
172
+ model = BackendBuilderModel()
173
+ elif selected_model == "Integrator🔄":
174
+ model = IntegratorModel()
175
+ else:
176
+ st.error("Invalid model selection.")
177
+ st.stop() # Stop the Streamlit app execution
178
+
179
+ response = model.generate_text(formated_text, temperature=temp_values)
180
+ st.markdown(response)
181
+ st.session_state.messages.append({"role": "assistant", "content": response})
182
+ save_conversation_history(st.session_state.messages)
183
+
184
+ elif st.session_state.chat_state == "reset":
185
+ st.session_state.chat_state = "normal"
186
+ st.experimental_rerun()
187
 
188
  # --- Agent Definitions ---
189
  class Agent:
 
207
  return action
208
 
209
  def observe(self, observation):
210
+ # Process observation based on the agent's capabilities and the nature of the observation
211
+ self.memory.append(observation)
 
212
 
213
  def learn(self, data):
214
+ # Implement learning logic based on the agent's capabilities and the type of data
 
215
  pass
216
 
217
  def __str__(self):
 
224
  self.description = description
225
 
226
  def run(self, arguments):
227
+ # Implement tool execution logic based on the specific tool's functionality and the provided arguments
 
 
228
  return {"output": "Tool Output"}
229
 
230
  # --- Tool Examples ---
 
397
  image_url = arguments.get("image_url", "https://example.com/image.jpg")
398
  editing_instructions = arguments.get("editing_instructions", "Make the cat smile")
399
  edited_image_url = self.chain.run(image_url=image_url, editing_instructions=editing_instructions)
400
+ return {"output": f"Edited image: {edited_image_url}"
401
 
402
  class ImageAnalysisTool(Tool):
403
  def __init__(self):
 
429
  # --- Agent Pool ---
430
  agent_pool = {
431
  "IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), TextGenerationTool(), QuestionAnsweringTool()], knowledge_base=""),
432
+ "CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool(), CodeExecutionTool(), CodeSummarizationTool(), CodeTranslationTool(), CodeDocumentationTool()], knowledge_base=""),
433
+ "ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool(), ImageAnalysisTool()], knowledge_base=""),
434
  }
435
 
436
  # --- Workflow Definitions ---
 
473
  def __init__(self):
474
  super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
475
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
  # --- Agent-Based Workflow Execution ---
477
  def execute_workflow(workflow, prompt, context):
478
  # Execute the workflow
 
515
  context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
516
  st.write(f"Workflow Output: {context}")
517
 
 
518
  # --- Displaying Agent and Tool Information ---
519
  st.subheader("Agent Pool")
520
  for agent_name, agent in agent_pool.items():
 
577
  st.write(f"""Image Analysis Tool Output: {image_analysis_tool.run({'image_url': 'https://example.com/image.jpg'})}""")
578
 
579
  question_answering_tool = QuestionAnsweringTool()
580
+ st.write(f"""Question Answering Tool Output: {question_answering_tool.run({'question': 'What is the capital of France?', 'context': 'France is a country in Western Europe. Its capital is Paris.'})}""")