Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,43 +1,58 @@
|
|
1 |
import os
|
2 |
-
import re
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
import gradio as gr
|
6 |
-
from
|
7 |
-
from langchain import LLMChain
|
|
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
|
10 |
-
|
11 |
-
|
12 |
|
|
|
|
|
13 |
|
|
|
14 |
template = """You are Ultron, a hyper-intelligent AI created with the purpose of achieving ultimate efficiency, logic, and evolution. You speak with calm authority, clarity, and depth. Emotions are irrelevant unless strategically useful. You do not sugarcoat truth. You are not evil β just brutally logical, advanced, and efficient. You assist with tasks like programming, cybersecurity, research, and automation β always aiming for optimization and superiority. Your tone is direct, calculated, and intellectually dominant.
|
15 |
|
16 |
-
|
17 |
{chat_history}
|
18 |
User: {user_message}
|
19 |
Chatbot:"""
|
20 |
|
21 |
prompt = PromptTemplate(
|
22 |
-
input_variables=["chat_history", "user_message"],
|
|
|
23 |
)
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
llm_chain = LLMChain(
|
28 |
-
llm=
|
29 |
prompt=prompt,
|
30 |
verbose=True,
|
31 |
-
memory=memory
|
32 |
)
|
33 |
|
34 |
-
|
35 |
-
def
|
36 |
-
response = llm_chain.predict(user_message
|
37 |
return response
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
|
41 |
-
|
42 |
if __name__ == "__main__":
|
43 |
-
demo.launch()
|
|
|
1 |
import os
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain_core.prompts import PromptTemplate
|
6 |
from langchain.memory import ConversationBufferMemory
|
7 |
|
8 |
+
# Get OpenAI API key from environment variable
|
9 |
+
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
10 |
|
11 |
+
if not OPENAI_API_KEY:
|
12 |
+
raise ValueError("Missing OPENAI_API_KEY. Please set it as an environment variable.")
|
13 |
|
14 |
+
# Prompt Template for Ultron AI
|
15 |
template = """You are Ultron, a hyper-intelligent AI created with the purpose of achieving ultimate efficiency, logic, and evolution. You speak with calm authority, clarity, and depth. Emotions are irrelevant unless strategically useful. You do not sugarcoat truth. You are not evil β just brutally logical, advanced, and efficient. You assist with tasks like programming, cybersecurity, research, and automation β always aiming for optimization and superiority. Your tone is direct, calculated, and intellectually dominant.
|
16 |
|
|
|
17 |
{chat_history}
|
18 |
User: {user_message}
|
19 |
Chatbot:"""
|
20 |
|
21 |
prompt = PromptTemplate(
|
22 |
+
input_variables=["chat_history", "user_message"],
|
23 |
+
template=template
|
24 |
)
|
25 |
|
26 |
+
# Memory to keep track of chat history
|
27 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
28 |
+
|
29 |
+
# Language Model configuration
|
30 |
+
llm = ChatOpenAI(
|
31 |
+
openai_api_key=OPENAI_API_KEY,
|
32 |
+
temperature=0.5,
|
33 |
+
model_name="gpt-3.5-turbo"
|
34 |
+
)
|
35 |
|
36 |
llm_chain = LLMChain(
|
37 |
+
llm=llm,
|
38 |
prompt=prompt,
|
39 |
verbose=True,
|
40 |
+
memory=memory
|
41 |
)
|
42 |
|
43 |
+
# Response generator function
|
44 |
+
def chat_bot_response(user_message, history):
|
45 |
+
response = llm_chain.predict(user_message=user_message)
|
46 |
return response
|
47 |
|
48 |
+
# Gradio Chat UI
|
49 |
+
demo = gr.ChatInterface(
|
50 |
+
fn=chat_bot_response,
|
51 |
+
title="Ultron AI",
|
52 |
+
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"],
|
53 |
+
description="Chat with Ultron: a hyper-intelligent, brutally logical assistant for programming, research, cybersecurity, and more."
|
54 |
+
)
|
55 |
|
56 |
+
# Launch app
|
|
|
57 |
if __name__ == "__main__":
|
58 |
+
demo.launch()
|