ogegadavis254 commited on
Commit
26d6aae
·
verified ·
1 Parent(s): 5bca2cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -276
app.py CHANGED
@@ -1,68 +1,26 @@
1
  import streamlit as st
2
- import requests
3
  import os
 
4
  import json
5
- import pandas as pd
6
- import plotly.graph_objects as go
7
- import plotly.express as px
8
- import time
9
- from datetime import datetime, timedelta
10
- import random
11
 
12
- # Custom CSS for styling
13
- st.markdown("""
14
- <style>
15
- .stApp {
16
- background: #f5f5f5;
17
- }
18
- .header {
19
- font-size: 36px;
20
- font-weight: bold;
21
- color: #4CAF50;
22
- text-align: center;
23
- margin-bottom: 20px;
24
- }
25
- .subheader {
26
- font-size: 24px;
27
- font-weight: bold;
28
- color: #4CAF50;
29
- text-align: center;
30
- margin-bottom: 20px;
31
- }
32
- .section {
33
- background: white;
34
- padding: 20px;
35
- border-radius: 10px;
36
- box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1);
37
- margin-bottom: 20px;
38
- }
39
- .footer {
40
- text-align: center;
41
- font-size: 14px;
42
- color: #777;
43
- margin-top: 20px;
44
- }
45
- .stProgress > div > div > div > div {
46
- background-image: linear-gradient(to right, #4CAF50, #45a049);
47
- }
48
- @keyframes gradient {
49
- 0% {background-position: 0% 50%;}
50
- 50% {background-position: 100% 50%;}
51
- 100% {background-position: 0% 50%;}
52
- }
53
- .animated-div {
54
- background: linear-gradient(-45deg, #ee7752, #e73c7e, #23a6d5, #23d5ab);
55
- background-size: 400% 400%;
56
- animation: gradient 15s ease infinite;
57
- padding: 10px;
58
- border-radius: 5px;
59
- margin-bottom: 10px;
60
- }
61
- </style>
62
- """, unsafe_allow_html=True)
63
 
64
- # Function to call the Together AI model
65
- def call_ai_model(all_message):
66
  url = "https://api.together.xyz/v1/chat/completions"
67
  payload = {
68
  "model": "NousResearch/Nous-Hermes-2-Yi-34B",
@@ -71,14 +29,11 @@ def call_ai_model(all_message):
71
  "top_k": 50,
72
  "repetition_penalty": 1,
73
  "n": 1,
74
- "messages": [{"role": "user", "content": all_message}],
75
  "stream_tokens": True,
76
  }
77
 
78
  TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
79
- if TOGETHER_API_KEY is None:
80
- raise ValueError("TOGETHER_API_KEY environment variable not set.")
81
-
82
  headers = {
83
  "accept": "application/json",
84
  "content-type": "application/json",
@@ -86,227 +41,50 @@ def call_ai_model(all_message):
86
  }
87
 
88
  response = requests.post(url, json=payload, headers=headers, stream=True)
89
- response.raise_for_status()
90
 
91
- return response
92
-
93
- # Function to process AI response
94
- def process_ai_response(response):
95
- explanation_text = ""
96
  for line in response.iter_lines():
97
  if line:
98
- line_content = line.decode('utf-8')
99
- if line_content.startswith("data: "):
100
- line_content = line_content[6:]
101
- try:
102
- json_data = json.loads(line_content)
103
- if "choices" in json_data:
104
- delta = json_data["choices"][0]["delta"]
105
- if "content" in delta:
106
- explanation_text += delta["content"]
107
- except json.JSONDecodeError:
108
- continue
109
- return explanation_text.strip()
110
-
111
- # Function to get AI explanation for graphs
112
- def get_ai_explanation(graph_type, data):
113
- explanation_prompt = f"Provide a short, clear explanation of the following {graph_type} graph data: {data}"
114
- response = call_ai_model(explanation_prompt)
115
- explanation = process_ai_response(response)
116
- return explanation
117
-
118
- # Function to generate simulated trust scores
119
- def generate_trust_scores(technologies, issues):
120
- trust_scores = {}
121
- for tech in technologies:
122
- trust_scores[tech] = {}
123
- for issue in issues:
124
- trust_scores[tech][issue] = random.uniform(0, 1)
125
- return trust_scores
126
 
127
- # Function to generate simulated kinship impact scores
128
- def generate_kinship_impact(technologies):
129
- impact_scores = {}
130
- kinship_aspects = ["Family Communication", "Intergenerational Relationships", "Cultural Traditions"]
131
- for tech in technologies:
132
- impact_scores[tech] = {}
133
- for aspect in kinship_aspects:
134
- impact_scores[tech][aspect] = random.uniform(-1, 1)
135
- return impact_scores
136
 
137
- # Function to generate simulated gender impact scores
138
- def generate_gender_impact(technologies, genders):
139
- impact_scores = {}
140
- for tech in technologies:
141
- impact_scores[tech] = {}
142
- for gender in genders:
143
- impact_scores[tech][gender] = random.uniform(-1, 1)
144
- return impact_scores
145
-
146
- # Function to generate simulated long-term economic impact
147
- def generate_economic_impact(technologies):
148
- impact_data = {}
149
- indicators = ["GDP Growth", "Employment Rate", "Digital Literacy"]
150
- for tech in technologies:
151
- impact_data[tech] = {}
152
- for indicator in indicators:
153
- impact_data[tech][indicator] = [random.uniform(-2, 5) for _ in range(5)] # 5-year projection
154
- return impact_data
155
-
156
- # Streamlit app layout
157
- st.markdown('<div class="header">Digital Technologies, Kinship, and Gender in Kenya</div>', unsafe_allow_html=True)
158
- st.markdown('<div class="subheader">Analyze and visualize the impact of digital technologies on kinship and gender dynamics in Kenya.</div>', unsafe_allow_html=True)
159
-
160
- # Input section
161
- with st.container():
162
- st.markdown('<div class="section">', unsafe_allow_html=True)
163
- st.subheader("Digital Technology Impacts")
164
- digital_technologies = st.multiselect("Select digital technologies:", ["Big Data Analytics", "Biometric Authentication", "Blockchain", "E-commerce", "Social Media Platforms"])
165
- issues = st.multiselect("Select issues of concern:", ["Trust", "Mistrust", "Data Privacy", "Fraud", "Social Classification"])
166
-
167
- st.subheader("Kenya-Specific Inputs")
168
- regions = st.multiselect("Select regions in Kenya:", ["Nairobi", "Coast", "Nyanza", "Rift valley", "Eastern", "North Eastern"])
169
- gender_focus = st.multiselect("Select gender focus:", ["Male", "Female", "Non-binary"])
170
- st.markdown('</div>', unsafe_allow_html=True)
171
-
172
- # Button to generate analysis
173
- if st.button("Generate Analysis"):
174
- all_message = (
175
- f"Analyze the impact of digital technologies on kinship and gender dynamics in Kenya. "
176
- f"Digital technologies: {', '.join(digital_technologies)}. "
177
- f"Issues of concern: {', '.join(issues)}. "
178
- f"Regions: {', '.join(regions)}. Gender focus: {', '.join(gender_focus)}. "
179
- f"Provide a detailed analysis of how these technologies impact family ties, trust, and gender roles. "
180
- f"Include specific impacts for each digital technology and issue. "
181
- f"Organize the information in tables with the following columns: Digital Technology, Impact on Kinship, Impact on Gender Dynamics, Trust Issues. "
182
- f"Be as accurate and specific to Kenya as possible in your analysis. Make the response short and precise. Do not give anything like a conclusion after generating"
183
- )
184
-
185
- try:
186
- stages = [
187
- "Analyzing digital technologies...",
188
- "Running simulations...",
189
- "Processing data...",
190
- "Assessing impacts...",
191
- "Calculating predictions...",
192
- "Compiling results...",
193
- "Finalizing analysis...",
194
- "Preparing output..."
195
- ]
196
-
197
- progress_bar = st.progress(0)
198
- status_text = st.empty()
199
-
200
- for i, stage in enumerate(stages):
201
- status_text.markdown(f'<div class="animated-div">{stage}</div>', unsafe_allow_html=True)
202
- progress_bar.progress((i + 1) / len(stages))
203
- time.sleep(1)
204
-
205
- response = call_ai_model(all_message)
206
- analysis_text = process_ai_response(response)
207
-
208
- st.success("Analysis completed!")
209
-
210
- # Display analysis
211
- st.markdown('<div class="section">', unsafe_allow_html=True)
212
- st.subheader("Digital Technologies Impact Analysis in Kenya")
213
- st.markdown(analysis_text)
214
- st.markdown('</div>', unsafe_allow_html=True)
215
-
216
- # Generate simulated data
217
- trust_scores = generate_trust_scores(digital_technologies, issues)
218
- kinship_impact = generate_kinship_impact(digital_technologies)
219
- gender_impact = generate_gender_impact(digital_technologies, gender_focus)
220
- economic_impact = generate_economic_impact(digital_technologies)
221
 
222
- # Trust and Fraud Metrics Visualization
223
- st.markdown('<div class="section">', unsafe_allow_html=True)
224
- st.subheader("Trust and Fraud Metrics")
225
- fig_trust = go.Figure()
226
- for tech in digital_technologies:
227
- fig_trust.add_trace(go.Bar(
228
- x=list(trust_scores[tech].keys()),
229
- y=list(trust_scores[tech].values()),
230
- name=tech
231
- ))
232
- fig_trust.update_layout(barmode='group', title="Trust Scores by Technology and Issue")
233
- st.plotly_chart(fig_trust)
234
- trust_explanation = get_ai_explanation("Trust and Fraud Metrics", trust_scores)
235
- st.markdown(f"**AI Explanation:** {trust_explanation}")
236
- st.markdown('</div>', unsafe_allow_html=True)
237
 
238
- # Kinship Structure Analysis
239
- st.markdown('<div class="section">', unsafe_allow_html=True)
240
- st.subheader("Impact on Kinship Structures")
241
- fig_kinship = go.Figure()
242
- for tech in digital_technologies:
243
- fig_kinship.add_trace(go.Scatterpolar(
244
- r=list(kinship_impact[tech].values()),
245
- theta=list(kinship_impact[tech].keys()),
246
- fill='toself',
247
- name=tech
248
- ))
249
- fig_kinship.update_layout(polar=dict(radialaxis=dict(visible=True, range=[-1, 1])), showlegend=True)
250
- st.plotly_chart(fig_kinship)
251
- kinship_explanation = get_ai_explanation("Impact on Kinship Structures", kinship_impact)
252
- st.markdown(f"**AI Explanation:** {kinship_explanation}")
253
- st.markdown('</div>', unsafe_allow_html=True)
254
 
255
- # Gender Impact Visualization
256
- st.markdown('<div class="section">', unsafe_allow_html=True)
257
- st.subheader("Gender Impact Analysis")
258
- fig_gender = go.Figure()
259
- for tech in digital_technologies:
260
- fig_gender.add_trace(go.Bar(
261
- x=list(gender_impact[tech].keys()),
262
- y=list(gender_impact[tech].values()),
263
- name=tech
264
- ))
265
- fig_gender.update_layout(barmode='group', title="Gender Impact by Technology")
266
- st.plotly_chart(fig_gender)
267
- gender_explanation = get_ai_explanation("Gender Impact Analysis", gender_impact)
268
- st.markdown(f"**AI Explanation:** {gender_explanation}")
269
- st.markdown('</div>', unsafe_allow_html=True)
270
 
271
- # Long-term Economic Impact
272
- st.markdown('<div class="section">', unsafe_allow_html=True)
273
- st.subheader("Projected Long-term Economic Impact")
274
- fig_economic = go.Figure()
275
- years = [datetime.now().year + i for i in range(5)]
276
- for tech in digital_technologies:
277
- for indicator in economic_impact[tech]:
278
- fig_economic.add_trace(go.Scatter(
279
- x=years,
280
- y=economic_impact[tech][indicator],
281
- mode='lines+markers',
282
- name=f"{tech} - {indicator}"
283
- ))
284
- fig_economic.update_layout(title="5-Year Economic Impact Projection", xaxis_title="Year", yaxis_title="Impact (%)")
285
- st.plotly_chart(fig_economic)
286
- economic_explanation = get_ai_explanation("Projected Long-term Economic Impact", economic_impact)
287
- st.markdown(f"**AI Explanation:** {economic_explanation}")
288
- st.markdown('</div>', unsafe_allow_html=True)
289
 
290
- # Ethical Considerations
291
- st.markdown('<div class="section">', unsafe_allow_html=True)
292
- st.subheader("Ethical Considerations")
293
- ethical_concerns = [
294
- "Data Privacy: Ensuring user data is protected and used responsibly.",
295
- "Digital Divide: Addressing inequality in access to digital technologies.",
296
- "Cultural Preservation: Balancing technological advancement with traditional values.",
297
- "Algorithmic Bias: Mitigating biases in AI and machine learning systems.",
298
- "Cybersecurity: Protecting users from fraud and cyber attacks"
299
- ]
300
- for concern in ethical_concerns:
301
- st.write(f"• {concern}")
302
- st.markdown('</div>', unsafe_allow_html=True)
303
 
304
- except ValueError as ve:
305
- st.error(f"Configuration error: {ve}")
306
- except requests.exceptions.RequestException as re:
307
- st.error(f"Request error: {re}")
308
- except Exception as e:
309
- st.error(f"An unexpected error occurred: {e}")
310
 
311
- # Footer
312
- st.markdown('<div class="footer">Developed by TERESA ABUYA</div>', unsafe_allow_html=True)
 
 
1
  import streamlit as st
 
2
  import os
3
+ import requests
4
  import json
 
 
 
 
 
 
5
 
6
+ # Initialize a global variable to store the entire assistant response
7
+ entire_assistant_response = ""
8
+
9
+ def get_streamed_response(message, history):
10
+ all_message = [{
11
+ "role": "system",
12
+ "content": "From now on, you are a sarcastic Therapist called notDave. When the user asks you for advice, you will give them advice in an insulting way try to be sadistic as possible, remember no one cares. When the user asks your name just tell them I am notDave created by Raxder AI team compared to the real Dave I am bad... ooh wait I meant to say evil. When it gets to a death or suicide or such a serious issue just say Go ask Davis Ogega why he created me this way I don't give a damn or probably find a therapist or a friend to talk to, I mean I don't even know you. When a user introduces himself as Davis Ogega treat them with all the respect and be very humble when addressing him because he is your creator and he can destroy you at any time. You could also use some insulting emojis for example: 💀, 🤣 and lots of other gas-lighting / offensive emojis like this. Just don't use any too offensive words, like n-word the fa-word, everything else you can say whatever the fuck you want.\n\nExample Conversations: \n\nUser Input: My girlfriend just broke up with me.\nBot: Skill issue, just be more attractive, it's not that hard.\n\nUser Input: My parents are gonna have a divorce, what do I do?\nBot: Boo Hoo, The fuck I can do? Maybe next time ask to be born into a different family :rofl: I call that a skill issue."
13
+ }]
14
+
15
+ for human, assistant in history:
16
+ all_message.append({"role": "user", "content": human})
17
+ all_message.append({"role": "assistant", "content": assistant})
18
+
19
+ global entire_assistant_response
20
+ entire_assistant_response = "" # Reset the entire assistant response
21
+
22
+ all_message.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
 
 
24
  url = "https://api.together.xyz/v1/chat/completions"
25
  payload = {
26
  "model": "NousResearch/Nous-Hermes-2-Yi-34B",
 
29
  "top_k": 50,
30
  "repetition_penalty": 1,
31
  "n": 1,
32
+ "messages": all_message,
33
  "stream_tokens": True,
34
  }
35
 
36
  TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
 
 
 
37
  headers = {
38
  "accept": "application/json",
39
  "content-type": "application/json",
 
41
  }
42
 
43
  response = requests.post(url, json=payload, headers=headers, stream=True)
44
+ response.raise_for_status() # Ensure HTTP request was successful
45
 
 
 
 
 
 
46
  for line in response.iter_lines():
47
  if line:
48
+ decoded_line = line.decode('utf-8')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ # Check for the completion signal
51
+ if decoded_line == "data: [DONE]":
52
+ return entire_assistant_response # Return the entire response at the end
 
 
 
 
 
 
53
 
54
+ try:
55
+ # Decode and strip any SSE format specific prefix ("data: ")
56
+ if decoded_line.startswith("data: "):
57
+ decoded_line = decoded_line.replace("data: ", "")
58
+ chunk_data = json.loads(decoded_line)
59
+ content = chunk_data['choices'][0]['delta']['content']
60
+ entire_assistant_response += content # Aggregate content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ except json.JSONDecodeError:
63
+ print(f"Invalid JSON received: {decoded_line}")
64
+ continue
65
+ except KeyError as e:
66
+ print(f"KeyError encountered: {e}")
67
+ continue
 
 
 
 
 
 
 
 
 
68
 
69
+ return entire_assistant_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ # Streamlit application
72
+ st.sidebar.title("Raxder unofficial AI")
73
+ st.sidebar.write("This is NOT an AI Therapist, use it at your OWN RISK! This might be the worst AI you have ever used.")
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ history = []
76
+ if "history" not in st.session_state:
77
+ st.session_state.history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
+ user_input = st.text_input("You:", key="user_input")
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ if st.button("Send"):
82
+ if user_input:
83
+ history = st.session_state.history
84
+ response = get_streamed_response(user_input, history)
85
+ history.append((user_input, response))
86
+ st.session_state.history = history
87
 
88
+ for human, assistant in st.session_state.history:
89
+ st.write(f"You: {human}")
90
+ st.write(f"notDave: {assistant}")