acecalisto3 commited on
Commit
3eef9d9
·
verified ·
1 Parent(s): 47e1864

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -196
app.py CHANGED
@@ -2,29 +2,187 @@ import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import pickle
5
- from langchain_community.chains import ConversationChain
6
- from langchain.memory import ConversationBufferMemory
7
- from langchain.tools import Tool
8
- from langchain.agents import AgentType
9
- from langchain.chains import LLMChain
10
- from langchain.prompts import PromptTemplate
11
- from langchain.chains.question_answering import load_qa_chain
12
- from langchain.document_loaders import TextLoader
13
- from langchain.text_splitter import CharacterTextSplitter
14
- from langchain.embeddings import HuggingFaceEmbeddings # Use Hugging Face Embeddings
15
- from langchain.vectorstores import FAISS
16
- from langchain.chains import RetrievalQA
17
- from langchain.chains.conversational_retrieval_qa import ConversationalRetrievalQAChain
18
- from langchain.chains.summarization import load_summarization_chain
19
- from langchain.chains.base import Chain
20
- from langchain.chains.llm import LLMChain
21
- from langchain.prompts import PromptTemplate
22
- from langchain.agents import initialize_agent, AgentType
23
- from langchain.tools import Tool
24
  from langchain_community.llms import HuggingFaceHub
25
  from typing import List, Dict, Any, Optional
26
 
27
- st.title("CODEFUSSION ☄")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # --- Agent Definitions ---
30
  class Agent:
@@ -48,13 +206,11 @@ class Agent:
48
  return action
49
 
50
  def observe(self, observation):
51
- # Placeholder for observation processing
52
- # This should be implemented based on the agent's capabilities and the nature of the observation
53
- pass
54
 
55
  def learn(self, data):
56
- # Placeholder for learning logic
57
- # This should be implemented based on the agent's capabilities and the type of data
58
  pass
59
 
60
  def __str__(self):
@@ -67,9 +223,7 @@ class Tool:
67
  self.description = description
68
 
69
  def run(self, arguments):
70
- # Placeholder for tool execution logic
71
- # This should be implemented based on the specific tool's functionality
72
- # and the provided arguments
73
  return {"output": "Tool Output"}
74
 
75
  # --- Tool Examples ---
@@ -274,8 +428,8 @@ class QuestionAnsweringTool(Tool):
274
  # --- Agent Pool ---
275
  agent_pool = {
276
  "IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), TextGenerationTool(), QuestionAnsweringTool()], knowledge_base=""),
277
- "CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool(), CodeExecutionTool(), CodeSummarizationTool, CodeTranslationTool, CodeDocumentationTool], knowledge_base=""),
278
- "ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool(), ImageAnalysisTool], knowledge_base=""),
279
  }
280
 
281
  # --- Workflow Definitions ---
@@ -318,170 +472,6 @@ class DevSandboxWorkflow(Workflow):
318
  def __init__(self):
319
  super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
320
 
321
- # --- Model Definitions ---
322
- class Model:
323
- def __init__(self, name, description, model_link):
324
- self.name = name
325
- self.description = description
326
- self.model_link = model_link
327
- self.inference_client = InferenceClient(model=model_link)
328
-
329
- def generate_text(self, prompt, temperature=0.5, max_new_tokens=4096):
330
- try:
331
- output = self.inference_client.text_generation(
332
- prompt,
333
- temperature=temperature,
334
- max_new_tokens=max_new_tokens,
335
- stream=True
336
- )
337
- response = "".join(output)
338
- except ValueError as e:
339
- if "Input validation error" in str(e):
340
- return "Error: The input prompt is too long. Please try a shorter prompt."
341
- else:
342
- return f"An error occurred: {e}"
343
- return response
344
-
345
- # --- Model Examples ---
346
- class LegacyLiftModel(Model):
347
- def __init__(self):
348
- super().__init__("LegacyLift🚀", "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.", "mistralai/Mistral-7B-Instruct-v0.2")
349
-
350
- class ModernMigrateModel(Model):
351
- def __init__(self):
352
- super().__init__("ModernMigrate⭐", "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
353
-
354
- class RetroRecodeModel(Model):
355
- def __init__(self):
356
- super().__init__("RetroRecode🔄", "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.", "microsoft/Phi-3-mini-4k-instruct")
357
-
358
- # --- Streamlit Interface ---
359
- model_links = {
360
- "LegacyLift🚀": "mistralai/Mistral-7B-Instruct-v0.2",
361
- "ModernMigrate⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
362
- "RetroRecode🔄": "microsoft/Phi-3-mini-4k-instruct"
363
- }
364
-
365
- model_info = {
366
- "LegacyLift🚀": {
367
- 'description': "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.",
368
- 'logo': './11.jpg'
369
- },
370
- "ModernMigrate⭐": {
371
- 'description': "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.",
372
- 'logo': './2.jpg'
373
- },
374
- "RetroRecode🔄": {
375
- 'description': "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.",
376
- 'logo': './3.jpg'
377
- },
378
- }
379
-
380
- def format_prompt(message, conversation_history, custom_instructions=None):
381
- prompt = ""
382
- if custom_instructions:
383
- prompt += f"\[INST\] {custom_instructions} $$/INST$$\n"
384
-
385
- # Add conversation history to the prompt
386
- prompt += "\[CONV_HISTORY\]\n"
387
- for role, content in conversation_history:
388
- prompt += f"{role.upper()}: {content}\n"
389
- prompt += "\[/CONV_HISTORY\]\n"
390
-
391
- # Add the current message
392
- prompt += f"\[INST\] {message} $$/INST$$\n"
393
-
394
- # Add the response format
395
- prompt += "\[RESPONSE\]\n"
396
-
397
- return prompt
398
-
399
- def reset_conversation():
400
- '''
401
- Resets Conversation
402
- '''
403
- st.session_state.conversation = []
404
- st.session_state.messages = []
405
- st.session_state.chat_state = "reset"
406
-
407
- def load_conversation_history():
408
- history_file = "conversation_history.pickle"
409
- if os.path.exists(history_file):
410
- with open(history_file, "rb") as f:
411
- conversation_history = pickle.load(f)
412
- else:
413
- conversation_history = []
414
- return conversation_history
415
-
416
- def save_conversation_history(conversation_history):
417
- history_file = "conversation_history.pickle"
418
- with open(history_file, "wb") as f:
419
- pickle.dump(conversation_history, f)
420
-
421
- models = [key for key in model_links.keys()]
422
- selected_model = st.sidebar.selectbox("Select Model", models)
423
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
424
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
425
-
426
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
427
- st.sidebar.markdown(model_info[selected_model]['description'])
428
- st.sidebar.image(model_info[selected_model]['logo'])
429
-
430
- st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
431
-
432
- if "prev_option" not in st.session_state:
433
- st.session_state.prev_option = selected_model
434
-
435
- if st.session_state.prev_option != selected_model:
436
- st.session_state.messages = []
437
- st.session_state.prev_option = selected_model
438
-
439
- if "chat_state" not in st.session_state:
440
- st.session_state.chat_state = "normal"
441
-
442
- # Load the conversation history from the file
443
- if "messages" not in st.session_state:
444
- st.session_state.messages = load_conversation_history()
445
-
446
- repo_id = model_links[selected_model]
447
- st.subheader(f'{selected_model}')
448
-
449
- if st.session_state.chat_state == "normal":
450
- for message in st.session_state.messages:
451
- with st.chat_message(message["role"]):
452
- st.markdown(message["content"])
453
-
454
- if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
455
- custom_instruction = "Act like a Human in conversation"
456
- with st.chat_message("user"):
457
- st.markdown(prompt)
458
-
459
- st.session_state.messages.append({"role": "user", "content": prompt})
460
- conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
461
-
462
- formated_text = format_prompt(prompt, conversation_history, custom_instruction)
463
-
464
- with st.chat_message("assistant"):
465
- # Select the appropriate model based on the user's choice
466
- if selected_model == "LegacyLift🚀":
467
- model = LegacyLiftModel()
468
- elif selected_model == "ModernMigrate⭐":
469
- model = ModernMigrateModel()
470
- elif selected_model == "RetroRecode🔄":
471
- model = RetroRecodeModel()
472
- else:
473
- st.error("Invalid model selection.")
474
- st.stop() # Stop the Streamlit app execution
475
-
476
- response = model.generate_text(formated_text, temperature=temp_values)
477
- st.markdown(response)
478
- st.session_state.messages.append({"role": "assistant", "content": response})
479
- save_conversation_history(st.session_state.messages)
480
-
481
- elif st.session_state.chat_state == "reset":
482
- st.session_state.chat_state = "normal"
483
- st.experimental_rerun()
484
-
485
  # --- Agent-Based Workflow Execution ---
486
  def execute_workflow(workflow, prompt, context):
487
  # Execute the workflow
@@ -524,7 +514,6 @@ if st.button("Dev Sandbox"):
524
  context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
525
  st.write(f"Workflow Output: {context}")
526
 
527
-
528
  # --- Displaying Agent and Tool Information ---
529
  st.subheader("Agent Pool")
530
  for agent_name, agent in agent_pool.items():
@@ -587,4 +576,4 @@ image_analysis_tool = ImageAnalysisTool()
587
  st.write(f"""Image Analysis Tool Output: {image_analysis_tool.run({'image_url': 'https://example.com/image.jpg'})}""")
588
 
589
  question_answering_tool = QuestionAnsweringTool()
590
- st.write(f"""Question Answering Tool Output: {question_answering_tool.run({'question': 'What is the capital of France?', 'context': 'France is a country in Western Europe. Its capital is Paris.'})}""")
 
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import pickle
5
+ from langchain_community.memory import ConversationBufferMemory
6
+ from langchain_community.tools import Tool
7
+ from langchain_community.agents import initialize_agent, AgentType
8
+ from langchain_community.chains import LLMChain
9
+ from langchain_community.prompts import PromptTemplate
10
+ from langchain_community.chains.question_answering import load_qa_chain
11
+ from langchain_community.document_loaders import TextLoader
12
+ from langchain_community.text_splitter import CharacterTextSplitter
13
+ from langchain_community.embeddings import HuggingFaceEmbeddings # Use Hugging Face Embeddings
14
+ from langchain_community.vectorstores import FAISS
15
+ from langchain_community.chains import RetrievalQA
16
+ from langchain_community.chains.conversational_retrieval_qa import ConversationalRetrievalQAChain
17
+ from langchain_community.chains.summarization import load_summarization_chain
 
 
 
 
 
 
18
  from langchain_community.llms import HuggingFaceHub
19
  from typing import List, Dict, Any, Optional
20
 
21
+ st.title("Triagi - Dev-Centric Agent Clusters ☄")
22
+
23
+ # --- Model Definitions ---
24
+ class Model:
25
+ def __init__(self, name, description, model_link):
26
+ self.name = name
27
+ self.description = description
28
+ self.model_link = model_link
29
+ self.inference_client = InferenceClient(model=model_link)
30
+
31
+ def generate_text(self, prompt, temperature=0.5, max_new_tokens=4096):
32
+ try:
33
+ output = self.inference_client.text_generation(
34
+ prompt,
35
+ temperature=temperature,
36
+ max_new_tokens=max_new_tokens,
37
+ stream=True
38
+ )
39
+ response = "".join(output)
40
+ except ValueError as e:
41
+ if "Input validation error" in str(e):
42
+ return "Error: The input prompt is too long. Please try a shorter prompt."
43
+ else:
44
+ return f"An error occurred: {e}"
45
+ return response
46
+
47
+ # --- Model Examples ---
48
+ class FrontendForgeModel(Model):
49
+ def __init__(self):
50
+ super().__init__("FrontendForge🚀", "The FrontendForge model is a Large Language Model (LLM) that's able to handle frontend development tasks such as UI design and user interaction logic.", "mistralai/Mistral-7B-Instruct-v0.2")
51
+
52
+ class BackendBuilderModel(Model):
53
+ def __init__(self):
54
+ super().__init__("BackendBuilder⭐", "The BackendBuilder model is a Large Language Model (LLM) that's specialized in backend development tasks including API creation, database management, and server-side logic.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
55
+
56
+ class IntegratorModel(Model):
57
+ def __init__(self):
58
+ super().__init__("Integrator🔄", "The Integrator model is a Large Language Model (LLM) that's best suited for integrating frontend and backend components, handling business logic, and ensuring seamless communication between different parts of the application.", "microsoft/Phi-3-mini-4k-instruct")
59
+
60
+ # --- Streamlit Interface ---
61
+ model_links = {
62
+ "FrontendForge🚀": "mistralai/Mistral-7B-Instruct-v0.2",
63
+ "BackendBuilder⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
64
+ "Integrator🔄": "microsoft/Phi-3-mini-4k-instruct"
65
+ }
66
+
67
+ model_info = {
68
+ "FrontendForge🚀": {
69
+ 'description': "The FrontendForge model is a Large Language Model (LLM) that's able to handle frontend development tasks such as UI design and user interaction logic.",
70
+ 'logo': './11.jpg'
71
+ },
72
+ "BackendBuilder⭐": {
73
+ 'description': "The BackendBuilder model is a Large Language Model (LLM) that's specialized in backend development tasks including API creation, database management, and server-side logic.",
74
+ 'logo': './2.jpg'
75
+ },
76
+ "Integrator🔄": {
77
+ 'description': "The Integrator model is a Large Language Model (LLM) that's best suited for integrating frontend and backend components, handling business logic, and ensuring seamless communication between different parts of the application.",
78
+ 'logo': './3.jpg'
79
+ },
80
+ }
81
+
82
+ def format_prompt(message, conversation_history, custom_instructions=None):
83
+ prompt = ""
84
+ if custom_instructions:
85
+ prompt += "[INST] {} [/INST]\n".format(custom_instructions)
86
+
87
+ # Add conversation history to the prompt
88
+ prompt += "[CONV_HISTORY]\n"
89
+ for role, content in conversation_history:
90
+ prompt += "{}: {}\n".format(role.upper(), content)
91
+ prompt += "[/CONV_HISTORY]\n"
92
+
93
+ # Add the current message
94
+ prompt += "[INST] {} [/INST]\n".format(message)
95
+
96
+ # Add the response format
97
+ prompt += "[RESPONSE]\n"
98
+
99
+ return prompt
100
+
101
+ def reset_conversation():
102
+ '''
103
+ Resets Conversation
104
+ '''
105
+ st.session_state.conversation = []
106
+ st.session_state.messages = []
107
+ st.session_state.chat_state = "reset"
108
+
109
+ def load_conversation_history():
110
+ history_file = "conversation_history.pickle"
111
+ if os.path.exists(history_file):
112
+ with open(history_file, "rb") as f:
113
+ conversation_history = pickle.load(f)
114
+ else:
115
+ conversation_history = []
116
+ return conversation_history
117
+
118
+ def save_conversation_history(conversation_history):
119
+ history_file = "conversation_history.pickle"
120
+ with open(history_file, "wb") as f:
121
+ pickle.dump(conversation_history, f)
122
+
123
+ models = [key for key in model_links.keys()]
124
+ selected_model = st.sidebar.selectbox("Select Model", models)
125
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
126
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
127
+
128
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
129
+ st.sidebar.markdown(model_info[selected_model]['description'])
130
+ st.sidebar.image(model_info[selected_model]['logo'])
131
+
132
+ st.sidebar.markdown("*Generating the code might go slow if you are using low power resources*")
133
+
134
+ if "prev_option" not in st.session_state:
135
+ st.session_state.prev_option = selected_model
136
+
137
+ if st.session_state.prev_option != selected_model:
138
+ st.session_state.messages = []
139
+ st.session_state.prev_option = selected_model
140
+
141
+ if "chat_state" not in st.session_state:
142
+ st.session_state.chat_state = "normal"
143
+
144
+ # Load the conversation history from the file
145
+ if "messages" not in st.session_state:
146
+ st.session_state.messages = load_conversation_history()
147
+
148
+ repo_id = model_links[selected_model]
149
+ st.subheader(f'{selected_model}')
150
+
151
+ if st.session_state.chat_state == "normal":
152
+ for message in st.session_state.messages:
153
+ with st.chat_message(message["role"]):
154
+ st.markdown(message["content"])
155
+
156
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
157
+ custom_instruction = "Act like a Human in conversation"
158
+ with st.chat_message("user"):
159
+ st.markdown(prompt)
160
+
161
+ st.session_state.messages.append({"role": "user", "content": prompt})
162
+ conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
163
+
164
+ formated_text = format_prompt(prompt, conversation_history, custom_instruction)
165
+
166
+ with st.chat_message("assistant"):
167
+ # Select the appropriate model based on the user's choice
168
+ if selected_model == "FrontendForge🚀":
169
+ model = FrontendForgeModel()
170
+ elif selected_model == "BackendBuilder⭐":
171
+ model = BackendBuilderModel()
172
+ elif selected_model == "Integrator🔄":
173
+ model = IntegratorModel()
174
+ else:
175
+ st.error("Invalid model selection.")
176
+ st.stop() # Stop the Streamlit app execution
177
+
178
+ response = model.generate_text(formated_text, temperature=temp_values)
179
+ st.markdown(response)
180
+ st.session_state.messages.append({"role": "assistant", "content": response})
181
+ save_conversation_history(st.session_state.messages)
182
+
183
+ elif st.session_state.chat_state == "reset":
184
+ st.session_state.chat_state = "normal"
185
+ st.experimental_rerun()
186
 
187
  # --- Agent Definitions ---
188
  class Agent:
 
206
  return action
207
 
208
  def observe(self, observation):
209
+ # Process observation based on the agent's capabilities and the nature of the observation
210
+ self.memory.append(observation)
 
211
 
212
  def learn(self, data):
213
+ # Implement learning logic based on the agent's capabilities and the type of data
 
214
  pass
215
 
216
  def __str__(self):
 
223
  self.description = description
224
 
225
  def run(self, arguments):
226
+ # Implement tool execution logic based on the specific tool's functionality and the provided arguments
 
 
227
  return {"output": "Tool Output"}
228
 
229
  # --- Tool Examples ---
 
428
  # --- Agent Pool ---
429
  agent_pool = {
430
  "IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), TextGenerationTool(), QuestionAnsweringTool()], knowledge_base=""),
431
+ "CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool(), CodeExecutionTool(), CodeSummarizationTool(), CodeTranslationTool(), CodeDocumentationTool()], knowledge_base=""),
432
+ "ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool(), ImageAnalysisTool()], knowledge_base=""),
433
  }
434
 
435
  # --- Workflow Definitions ---
 
472
  def __init__(self):
473
  super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475
  # --- Agent-Based Workflow Execution ---
476
  def execute_workflow(workflow, prompt, context):
477
  # Execute the workflow
 
514
  context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
515
  st.write(f"Workflow Output: {context}")
516
 
 
517
  # --- Displaying Agent and Tool Information ---
518
  st.subheader("Agent Pool")
519
  for agent_name, agent in agent_pool.items():
 
576
  st.write(f"""Image Analysis Tool Output: {image_analysis_tool.run({'image_url': 'https://example.com/image.jpg'})}""")
577
 
578
  question_answering_tool = QuestionAnsweringTool()
579
+ st.write(f"""Question Answering Tool Output: {question_answering_tool.run({'question': 'What is the capital of France?', 'context': 'France is a country in Western Europe. Its capital is Paris.'})}""")