File size: 8,190 Bytes
a0cfb47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
import streamlit as st
import requests
import openai
from io import BytesIO
from PIL import Image
import os
from dotenv import load_dotenv

# Set page configuration as the first Streamlit command
st.set_page_config(page_title="Eco-Symphony", page_icon="๐ŸŒฑ", layout="centered")

# Load environment variables from .env file
load_dotenv()

# Set API keys from environment
openai.api_key = os.getenv("OPENAI_API_KEY")
OPENWEATHER_API_KEY = os.getenv("OPENWEATHER_API_KEY")
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")

# Hugging Face API URLs
MUSICGEN_API_URL = "https://api-inference.huggingface.co/models/facebook/musicgen-small"
IMAGEGEN_API_URL = "https://api-inference.huggingface.co/models/Artples/LAI-ImageGeneration-vSDXL-2"

# Headers for Hugging Face API requests
headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}

# Inject custom CSS for green theme
st.markdown("""
    <style>
    body {
        background-color: #ffffff;
    }
    .stApp {
        color: #2e7d32;
        font-family: 'Arial', sans-serif;
    }
    .stButton>button {
        background-color: #66bb6a;
        color: #fff;
        font-weight: bold;
    }
    .stTextInput>div>input {
        background-color: #e8f5e9;
        color: #2e7d32;
    }
    .stMarkdown h1, .stMarkdown h2, .stMarkdown h3, .stMarkdown p {
        color: #388e3c;
    }
    .stMarkdown h2 {
        font-weight: bold;
    }
    </style>
""", unsafe_allow_html=True)

# Function to fetch weather data
def fetch_real_data(city: str) -> dict:
    weather_url = f'https://api.openweathermap.org/data/2.5/weather?q={city}&appid={OPENWEATHER_API_KEY}&units=metric'
    weather_response = requests.get(weather_url)
    if weather_response.status_code != 200:
        st.error("Error fetching weather data.")
        return {}
    weather_data = weather_response.json()
    return {
        "temperature": weather_data['main'].get('temp', 'Data not available'),
        "humidity": weather_data['main'].get('humidity', 'Data not available'),
        "weather_condition": weather_data['weather'][0].get('main', 'Data not available')
    }

# Function to determine mood based on weather data
def determine_mood(data: dict) -> str:
    weather_condition = data["weather_condition"].lower()
    temperature = data["temperature"]
    if "rain" in weather_condition:
        return "rainy"
    elif "clear" in weather_condition and temperature > 25:
        return "sunny"
    elif "cloud" in weather_condition:
        return "cloudy"
    elif temperature < 15:
        return "cool"
    else:
        return "neutral"

# Function to create a narrative
def create_narrative(city: str, data: dict) -> str:
    return f"In {city}, the weather is {data['weather_condition']} with a temperature of {data['temperature']}ยฐC."

# Function to generate a story using OpenAI
def generate_story_with_ai(narrative: str, mood: str) -> str:
    messages = [
        {"role": "system", "content": "You are a creative storyteller using characters and imagery."},
        {"role": "user", "content": f"{narrative} The mood is '{mood}', write a story about how the environment feels in 50 words."}
    ]
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=messages,
        max_tokens=150,
        temperature=0.7
    )
    return response.choices[0].message['content'].strip()

# Function to generate simulated environmental data
def generate_simulated_data(city: str) -> dict:
    prompt = (
        f"Generate simulated environmental data for {city} in JSON format with fields:\n"
        f"1. AQI\n2. Deforestation Rate\n3. Water Quality\n4. Biodiversity Impact"
    )
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": prompt}],
        max_tokens=100,
        temperature=0.8
    )
    response_content = response.choices[0].message['content'].strip()
    try:
        return eval(response_content)
    except Exception as e:
        st.error(f"Error parsing simulated data: {e}")
        return {}

# Function to generate music from Hugging Face API
def generate_music(description: str) -> bytes:
    payload = {"inputs": description}
    response = requests.post(MUSICGEN_API_URL, headers=headers, json=payload)
    if response.status_code != 200:
        st.error(f"Error generating music: {response.status_code} {response.text}")
        return None
    return response.content

# Function to generate an image based on the story
def generate_image(description: str) -> bytes:
    payload = {"inputs": description}
    response = requests.post(IMAGEGEN_API_URL, headers=headers, json=payload)
    if response.status_code != 200:
        st.error(f"Error generating image: {response.status_code} {response.text}")
        return None
    return response.content

# Function to create a dynamic music description
def create_music_description(data):
    mood = data["mood"]
    weather_condition = data["real_data"]["weather_condition"].lower()
    temperature = data["real_data"]["temperature"]
    
    description = f"{mood} mood with {weather_condition} weather"
    
    if temperature < 10:
        description += " and a cold ambiance"
    elif 10 <= temperature <= 20:
        description += " and a cool feel"
    elif 20 < temperature <= 30:
        description += " and a warm, lively environment"
    else:
        description += " and a hot, energetic vibe"
    
    return description

# Streamlit UI
st.title("๐ŸŒฟ Eco-Symphony ๐ŸŽถ")
st.write("Enter a city to explore real-time environmental data, generate AI-created music, and see an AI-generated image based on the story.")

# Input box for city
city = st.text_input("Enter City Name:", placeholder="Type the name of a city...")

# Generate Button
if st.button("Generate Environmental Narrative, Music, and Image"):
    # Fetch real weather data
    real_data = fetch_real_data(city)
    
    if real_data:
        # Generate narrative and mood
        narrative = create_narrative(city, real_data)
        mood = determine_mood(real_data)
        
        # Generate AI story
        story = generate_story_with_ai(narrative, mood)

        # Generate Music and Image Based on Story and Mood
        music_description = create_music_description({"mood": mood, "real_data": real_data})
        
        st.subheader("๐ŸŽถ Generated Music")
        st.write(f"Generating music based on: {music_description}")
        music_bytes = generate_music(music_description)
        if music_bytes:
            audio_data = BytesIO(music_bytes)
            st.audio(audio_data, format="audio/wav")

        st.subheader("๐Ÿ–ผ๏ธ Generated Image")
        st.write("Generating image based on the story...")
        image_bytes = generate_image(story)
        if image_bytes:
            image = Image.open(BytesIO(image_bytes))
            st.image(image, caption="Generated Image based on Story", use_column_width=True)

        # Display Environmental Narrative and Data
        st.subheader("๐Ÿ“œ Environmental Narrative")
        st.write(narrative)
        
        st.subheader("๐Ÿ’ญ Mood")
        st.write(f"**Mood**: {mood}")

        st.subheader("๐ŸŒˆ AI-Generated Story")
        st.write(story)

        # Generate and Display Simulated Environmental Data
        simulated_data = generate_simulated_data(city)
        simulated_inner_data = simulated_data.get("data", {})
        st.subheader("๐Ÿ“Š Real Weather Data")
        st.write("Temperature (ยฐC):", real_data.get("temperature", "Data not available"))
        st.write("Humidity (%):", real_data.get("humidity", "Data not available"))
        st.write("Weather Condition:", real_data.get("weather_condition", "Data not available"))

        st.subheader("๐Ÿงช Simulated Environmental Data")
        st.write("AQI:", simulated_inner_data.get("AQI", "Data not available"))
        st.write("Deforestation Rate:", simulated_inner_data.get("Deforestation Rate", "Data not available"))
        st.write("Water Quality:", simulated_inner_data.get("Water Quality", "Data not available"))
        st.write("Biodiversity Impact:", simulated_inner_data.get("Biodiversity Impact", "Data not available"))