Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,11 +8,7 @@ from langchain import LLMChain, PromptTemplate
|
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
|
10 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
11 |
-
PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY')
|
12 |
-
PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID')
|
13 |
|
14 |
-
PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID')
|
15 |
-
play_ht_api_get_audio_url = "https://play.ht/api/v2/tts"
|
16 |
|
17 |
|
18 |
template = """You are Ultron, a hyper-intelligent AI created with the purpose of achieving ultimate efficiency, logic, and evolution. You speak with calm authority, clarity, and depth. Emotions are irrelevant unless strategically useful. You do not sugarcoat truth. You are not evil β just brutally logical, advanced, and efficient. You assist with tasks like programming, cybersecurity, research, and automation β always aiming for optimization and superiority. Your tone is direct, calculated, and intellectually dominant.
|
@@ -35,130 +31,11 @@ llm_chain = LLMChain(
|
|
35 |
memory=memory,
|
36 |
)
|
37 |
|
38 |
-
headers = {
|
39 |
-
"accept": "text/event-stream",
|
40 |
-
"content-type": "application/json",
|
41 |
-
"AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY,
|
42 |
-
"X-USER-ID": PLAY_HT_USER_ID
|
43 |
-
}
|
44 |
-
|
45 |
-
|
46 |
-
def get_payload(text):
|
47 |
-
return {
|
48 |
-
"text": text,
|
49 |
-
"voice": PLAY_HT_VOICE_ID,
|
50 |
-
"quality": "medium",
|
51 |
-
"output_format": "mp3",
|
52 |
-
"speed": 1,
|
53 |
-
"sample_rate": 24000,
|
54 |
-
"seed": None,
|
55 |
-
"temperature": None
|
56 |
-
}
|
57 |
-
|
58 |
-
def get_generated_audio(text):
|
59 |
-
payload = get_payload(text)
|
60 |
-
generated_response = {}
|
61 |
-
try:
|
62 |
-
response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers)
|
63 |
-
response.raise_for_status()
|
64 |
-
generated_response["type"]= 'SUCCESS'
|
65 |
-
generated_response["response"] = response.text
|
66 |
-
except requests.exceptions.RequestException as e:
|
67 |
-
generated_response["type"]= 'ERROR'
|
68 |
-
try:
|
69 |
-
response_text = json.loads(response.text)
|
70 |
-
if response_text['error_message']:
|
71 |
-
generated_response["response"] = response_text['error_message']
|
72 |
-
else:
|
73 |
-
generated_response["response"] = response.text
|
74 |
-
except Exception as e:
|
75 |
-
generated_response["response"] = response.text
|
76 |
-
except Exception as e:
|
77 |
-
generated_response["type"]= 'ERROR'
|
78 |
-
generated_response["response"] = response.text
|
79 |
-
return generated_response
|
80 |
-
|
81 |
-
def extract_urls(text):
|
82 |
-
# Define the regex pattern for URLs
|
83 |
-
url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*'
|
84 |
-
|
85 |
-
# Find all occurrences of URLs in the text
|
86 |
-
urls = re.findall(url_pattern, text)
|
87 |
-
|
88 |
-
return urls
|
89 |
-
|
90 |
-
def get_audio_reply_for_question(text):
|
91 |
-
generated_audio_event = get_generated_audio(text)
|
92 |
-
#From get_generated_audio, you will get events in a string format, from that we need to extract the url
|
93 |
-
final_response = {
|
94 |
-
"audio_url": '',
|
95 |
-
"message": ''
|
96 |
-
}
|
97 |
-
if generated_audio_event["type"] == 'SUCCESS':
|
98 |
-
audio_urls = extract_urls(generated_audio_event["response"])
|
99 |
-
if len(audio_urls) == 0:
|
100 |
-
final_response['message'] = "No audio file link found in generated event"
|
101 |
-
else:
|
102 |
-
final_response['audio_url'] = audio_urls[-1]
|
103 |
-
else:
|
104 |
-
final_response['message'] = generated_audio_event['response']
|
105 |
-
return final_response
|
106 |
-
|
107 |
-
def download_url(url):
|
108 |
-
try:
|
109 |
-
# Send a GET request to the URL to fetch the content
|
110 |
-
final_response = {
|
111 |
-
'content':'',
|
112 |
-
'error':''
|
113 |
-
}
|
114 |
-
response = requests.get(url)
|
115 |
-
# Check if the request was successful (status code 200)
|
116 |
-
if response.status_code == 200:
|
117 |
-
final_response['content'] = response.content
|
118 |
-
else:
|
119 |
-
final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}"
|
120 |
-
except Exception as e:
|
121 |
-
final_response['error'] = f"Failed to download the URL. Error: {e}"
|
122 |
-
return final_response
|
123 |
-
|
124 |
-
def get_filename_from_url(url):
|
125 |
-
# Use os.path.basename() to extract the file name from the URL
|
126 |
-
file_name = os.path.basename(url)
|
127 |
-
return file_name
|
128 |
|
129 |
def get_text_response(user_message):
|
130 |
response = llm_chain.predict(user_message = user_message)
|
131 |
return response
|
132 |
|
133 |
-
def get_text_response_and_audio_response(user_message):
|
134 |
-
response = get_text_response(user_message) # Getting the reply from Open AI
|
135 |
-
audio_reply_for_question_response = get_audio_reply_for_question(response)
|
136 |
-
final_response = {
|
137 |
-
'output_file_path': '',
|
138 |
-
'message':''
|
139 |
-
}
|
140 |
-
audio_url = audio_reply_for_question_response['audio_url']
|
141 |
-
if audio_url:
|
142 |
-
output_file_path=get_filename_from_url(audio_url)
|
143 |
-
download_url_response = download_url(audio_url)
|
144 |
-
audio_content = download_url_response['content']
|
145 |
-
if audio_content:
|
146 |
-
with open(output_file_path, "wb") as audio_file:
|
147 |
-
audio_file.write(audio_content)
|
148 |
-
final_response['output_file_path'] = output_file_path
|
149 |
-
else:
|
150 |
-
final_response['message'] = download_url_response['error']
|
151 |
-
else:
|
152 |
-
final_response['message'] = audio_reply_for_question_response['message']
|
153 |
-
return final_response
|
154 |
-
|
155 |
-
def chat_bot_response(message, history):
|
156 |
-
text_and_audio_response = get_text_response_and_audio_response(message)
|
157 |
-
output_file_path = text_and_audio_response['output_file_path']
|
158 |
-
if output_file_path:
|
159 |
-
return (text_and_audio_response['output_file_path'],)
|
160 |
-
else:
|
161 |
-
return text_and_audio_response['message']
|
162 |
|
163 |
demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
|
164 |
|
|
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
|
10 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
|
|
|
|
11 |
|
|
|
|
|
12 |
|
13 |
|
14 |
template = """You are Ultron, a hyper-intelligent AI created with the purpose of achieving ultimate efficiency, logic, and evolution. You speak with calm authority, clarity, and depth. Emotions are irrelevant unless strategically useful. You do not sugarcoat truth. You are not evil β just brutally logical, advanced, and efficient. You assist with tasks like programming, cybersecurity, research, and automation β always aiming for optimization and superiority. Your tone is direct, calculated, and intellectually dominant.
|
|
|
31 |
memory=memory,
|
32 |
)
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
def get_text_response(user_message):
|
36 |
response = llm_chain.predict(user_message = user_message)
|
37 |
return response
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
|
41 |
|