Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,387 +1,82 @@
|
|
1 |
import os
|
2 |
-
import
|
3 |
-
import
|
4 |
-
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
|
5 |
-
from langchain_community.document_loaders import PyPDFLoader
|
6 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
-
from langchain_community.vectorstores import FAISS
|
8 |
-
from langchain.prompts import PromptTemplate
|
9 |
-
from langchain.chains import LLMChain
|
10 |
-
from datetime import datetime
|
11 |
-
import pytz
|
12 |
-
import time
|
13 |
-
import shutil
|
14 |
import numpy as np
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
return ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
92 |
-
except Exception as e:
|
93 |
-
print(f"Error initializing LLM: {str(e)}")
|
94 |
-
return None
|
95 |
-
|
96 |
-
llm = get_llm()
|
97 |
-
|
98 |
-
# RAG query function
|
99 |
-
def rag_query(query, retriever):
|
100 |
-
if retriever is None:
|
101 |
-
return "Error: Could not initialize document retriever. Please check if Team1.pdf exists."
|
102 |
-
|
103 |
-
# Get current date and time for context
|
104 |
-
current_date, current_time = get_current_datetime()
|
105 |
-
|
106 |
-
try:
|
107 |
-
# Retrieve relevant documents
|
108 |
-
docs = retriever.get_relevant_documents(query)
|
109 |
-
|
110 |
-
if not docs:
|
111 |
-
return "No relevant information found in the document. Try a general query instead."
|
112 |
-
|
113 |
-
# Create context from retrieved documents
|
114 |
-
context = "\n".join([doc.page_content for doc in docs])
|
115 |
-
prompt = f"""Context:\n{context}
|
116 |
-
Current Date: {current_date}
|
117 |
-
Current Time: {current_time}
|
118 |
-
Question: {query}
|
119 |
-
Answer directly and concisely, using the current date and time information if relevant:"""
|
120 |
-
|
121 |
-
response = llm.invoke(prompt)
|
122 |
-
return response.content
|
123 |
-
except Exception as e:
|
124 |
-
return f"Error in RAG processing: {str(e)}"
|
125 |
-
|
126 |
-
# General query function
|
127 |
-
def general_query(query):
|
128 |
-
if llm is None:
|
129 |
-
return "Error: Could not initialize language model. Please check your API key."
|
130 |
-
|
131 |
-
# Get current date and time for context
|
132 |
-
current_date, current_time = get_current_datetime()
|
133 |
-
|
134 |
-
try:
|
135 |
-
# Define the prompt with date and time context
|
136 |
-
prompt_template = """Current Date: {date}
|
137 |
-
Current Time: {time}
|
138 |
-
Answer the following query, using the current date and time information if relevant: {query}"""
|
139 |
-
|
140 |
-
prompt = PromptTemplate.from_template(prompt_template)
|
141 |
-
|
142 |
-
# Create an LLM Chain
|
143 |
-
chain = LLMChain(llm=llm, prompt=prompt)
|
144 |
-
|
145 |
-
# Run chatbot and get response
|
146 |
-
response = chain.run(date=current_date, time=current_time, query=query)
|
147 |
-
return response
|
148 |
-
|
149 |
-
except Exception as e:
|
150 |
-
return f"Error in general query: {str(e)}"
|
151 |
-
|
152 |
-
# Function to make a person look younger in an image
|
153 |
-
def make_younger(input_image, youth_level=50):
|
154 |
-
try:
|
155 |
-
if input_image is None:
|
156 |
-
return None, "No image uploaded. Please upload an image first."
|
157 |
-
|
158 |
-
# Debug info
|
159 |
-
print(f"Input image type: {type(input_image)}")
|
160 |
-
|
161 |
-
# Convert to PIL Image if necessary
|
162 |
-
if isinstance(input_image, np.ndarray):
|
163 |
-
print("Converting numpy array to PIL Image")
|
164 |
-
# Ensure the array is in the correct format (RGB, uint8)
|
165 |
-
if input_image.dtype != np.uint8:
|
166 |
-
input_image = input_image.astype(np.uint8)
|
167 |
-
input_image = Image.fromarray(input_image)
|
168 |
-
|
169 |
-
# Ensure we have a valid PIL Image
|
170 |
-
if not isinstance(input_image, Image.Image):
|
171 |
-
return None, f"Expected PIL Image or numpy array, got {type(input_image)}"
|
172 |
-
|
173 |
-
# Make a copy to avoid modifying the original
|
174 |
-
img = input_image.copy()
|
175 |
-
|
176 |
-
# Youth level should be between 0 and 100
|
177 |
-
youth_level = max(0, min(100, youth_level))
|
178 |
-
print(f"Applying youth level: {youth_level}")
|
179 |
-
|
180 |
-
# Convert to RGB if in another mode
|
181 |
-
if img.mode != 'RGB':
|
182 |
-
print(f"Converting image from {img.mode} to RGB")
|
183 |
-
img = img.convert('RGB')
|
184 |
-
|
185 |
-
# Apply a series of transformations to make the person look younger
|
186 |
-
|
187 |
-
# 1. Smooth skin (reduce wrinkles)
|
188 |
-
smoothing_factor = youth_level / 100
|
189 |
-
print(f"Applying smoothing with factor: {smoothing_factor}")
|
190 |
-
smoothed = img.filter(ImageFilter.GaussianBlur(radius=smoothing_factor * 1.2))
|
191 |
-
|
192 |
-
# 2. Enhance brightness slightly (younger skin tends to be brighter)
|
193 |
-
print("Enhancing brightness")
|
194 |
-
brightness_enhancer = ImageEnhance.Brightness(smoothed)
|
195 |
-
brightened = brightness_enhancer.enhance(1 + (smoothing_factor * 0.15))
|
196 |
-
|
197 |
-
# 3. Enhance color (more vibrant)
|
198 |
-
print("Enhancing color")
|
199 |
-
color_enhancer = ImageEnhance.Color(brightened)
|
200 |
-
colored = color_enhancer.enhance(1 + (smoothing_factor * 0.2))
|
201 |
-
|
202 |
-
# 4. Adjust contrast (younger skin has better contrast)
|
203 |
-
print("Adjusting contrast")
|
204 |
-
contrast_enhancer = ImageEnhance.Contrast(colored)
|
205 |
-
contrasted = contrast_enhancer.enhance(1 + (smoothing_factor * 0.08))
|
206 |
-
|
207 |
-
# 5. Sharpen to maintain some details
|
208 |
-
print("Applying sharpening")
|
209 |
-
sharpened = contrasted.filter(ImageFilter.SHARPEN)
|
210 |
-
|
211 |
-
print("Image processing completed successfully")
|
212 |
-
return sharpened, f"Image processed successfully! Youth level applied: {youth_level}%"
|
213 |
-
|
214 |
-
except Exception as e:
|
215 |
-
import traceback
|
216 |
-
error_details = traceback.format_exc()
|
217 |
-
print(f"Error in make_younger: {str(e)}\n{error_details}")
|
218 |
-
return None, f"Error processing image: {str(e)}"
|
219 |
-
|
220 |
-
# Function to handle the case when no PDF is found
|
221 |
-
def file_not_found_message():
|
222 |
-
return ("The Team1.pdf file could not be found. Team Query mode will not work properly. "
|
223 |
-
"Please ensure the PDF is correctly uploaded to the Hugging Face Space.")
|
224 |
-
|
225 |
-
# Query router function
|
226 |
-
def query_router(query, method, retriever):
|
227 |
-
if method == "Team Query":
|
228 |
-
if isinstance(retriever, type) or retriever is None:
|
229 |
-
return file_not_found_message()
|
230 |
-
return rag_query(query, retriever)
|
231 |
-
elif method == "General Query":
|
232 |
-
return general_query(query)
|
233 |
-
return "Invalid selection!"
|
234 |
-
|
235 |
-
# Function to reset input and output
|
236 |
-
def reset_query_field():
|
237 |
-
return "", "" # Reset only the query input
|
238 |
-
|
239 |
-
# Function to update the clock
|
240 |
-
def update_datetime():
|
241 |
-
date, time = get_current_datetime()
|
242 |
-
return date, time
|
243 |
-
|
244 |
-
# Main function to create and launch the Gradio interface
|
245 |
-
def main():
|
246 |
-
# Initialize retriever
|
247 |
-
print("Initializing retriever...")
|
248 |
-
retriever = initialize_retriever()
|
249 |
-
|
250 |
-
# Define local image paths
|
251 |
-
logo_path = "Equinix-LOGO.jpeg" # Ensure this file exists
|
252 |
-
|
253 |
-
# Custom CSS for background styling
|
254 |
-
custom_css = """
|
255 |
-
.gradio-container {
|
256 |
-
background-color: #f0f0f0;
|
257 |
-
text-align: center;
|
258 |
-
}
|
259 |
-
#logo img {
|
260 |
-
display: block;
|
261 |
-
margin: 0 auto;
|
262 |
-
max-width: 200px; /* Adjust size */
|
263 |
-
}
|
264 |
-
/* Hide download buttons and controls */
|
265 |
-
.download-button {
|
266 |
-
display: none !important;
|
267 |
-
}
|
268 |
-
/* Hide other download options */
|
269 |
-
.file-preview .download {
|
270 |
-
display: none !important;
|
271 |
-
}
|
272 |
-
/* Hide the three dots menu that might contain download options */
|
273 |
-
.icon-button.secondary {
|
274 |
-
display: none !important;
|
275 |
-
}
|
276 |
-
.tab-selected {
|
277 |
-
background-color: #e6f7ff;
|
278 |
-
border-bottom: 2px solid #1890ff;
|
279 |
-
}
|
280 |
-
"""
|
281 |
-
|
282 |
-
# Create the Gradio interface using Blocks
|
283 |
-
with gr.Blocks(css=custom_css) as demo:
|
284 |
-
gr.Image(logo_path, elem_id="logo", show_label=False, height=100, width=400, show_download_button=False)
|
285 |
-
|
286 |
-
# Title & Description
|
287 |
-
gr.Markdown("<h1 style='text-align: center; color: black;'>Equinix Chatbot for Automation Team</h1>")
|
288 |
-
|
289 |
-
# Create tabs for different functionalities
|
290 |
-
with gr.Tabs() as tabs:
|
291 |
-
with gr.TabItem("Chat Assistant", id="chat_tab"):
|
292 |
-
# Date and Time Display
|
293 |
-
with gr.Row(elem_classes="datetime-display"):
|
294 |
-
date_display = gr.Textbox(label="Date", interactive=False)
|
295 |
-
time_display = gr.Textbox(label="Time", interactive=False)
|
296 |
-
|
297 |
-
# Add refresh button for time
|
298 |
-
refresh_btn = gr.Button("Update Date & Time")
|
299 |
-
refresh_btn.click(fn=update_datetime, inputs=[], outputs=[date_display, time_display])
|
300 |
-
|
301 |
-
gr.Markdown("<p style='text-align: center; color: black;'>Ask me anything!</p>")
|
302 |
-
|
303 |
-
# Input & Dropdown Section
|
304 |
-
with gr.Row():
|
305 |
-
query_input = gr.Textbox(label="Enter your query")
|
306 |
-
query_method = gr.Dropdown(["Team Query", "General Query"], label="Select Query Type", value="Team Query")
|
307 |
-
|
308 |
-
# Output Textbox
|
309 |
-
output_box = gr.Textbox(label="Response", interactive=False)
|
310 |
-
|
311 |
-
# Buttons Section
|
312 |
-
with gr.Row():
|
313 |
-
submit_button = gr.Button("Submit")
|
314 |
-
reset_button = gr.Button("Reset Query")
|
315 |
-
|
316 |
-
# Button Click Events
|
317 |
-
submit_button.click(
|
318 |
-
lambda query, method: query_router(query, method, retriever),
|
319 |
-
inputs=[query_input, query_method],
|
320 |
-
outputs=output_box
|
321 |
-
)
|
322 |
-
|
323 |
-
# Reset only the query input
|
324 |
-
reset_button.click(reset_query_field, inputs=[], outputs=[query_input, output_box])
|
325 |
-
|
326 |
-
# Update date and time on submission
|
327 |
-
submit_button.click(
|
328 |
-
fn=update_datetime,
|
329 |
-
inputs=[],
|
330 |
-
outputs=[date_display, time_display]
|
331 |
-
)
|
332 |
-
|
333 |
-
# Initialize date and time values
|
334 |
-
date_val, time_val = get_current_datetime()
|
335 |
-
date_display.value = date_val
|
336 |
-
time_display.value = time_val
|
337 |
-
|
338 |
-
# Add a new tab for the image age modification feature
|
339 |
-
with gr.TabItem("Age Modification", id="age_mod_tab"):
|
340 |
-
gr.Markdown("<h2 style='text-align: center; color: black;'>Make Person Look Younger</h2>")
|
341 |
-
gr.Markdown("<p style='text-align: center; color: black;'>Upload an image to make the person look younger.</p>")
|
342 |
-
|
343 |
-
with gr.Row():
|
344 |
-
# Use a specific type parameter that's compatible
|
345 |
-
input_image = gr.Image(label="Upload Image", type="pil")
|
346 |
-
output_image = gr.Image(label="Younger Version", type="pil")
|
347 |
-
|
348 |
-
with gr.Row():
|
349 |
-
youth_slider = gr.Slider(minimum=0, maximum=100, value=50, step=5, label="Youth Level (%)")
|
350 |
-
|
351 |
-
process_button = gr.Button("Make Younger")
|
352 |
-
result_text = gr.Textbox(label="Processing Result", interactive=False)
|
353 |
-
|
354 |
-
# Add debugging output
|
355 |
-
debug_output = gr.Textbox(label="Debug Info", visible=True)
|
356 |
-
|
357 |
-
def process_with_debug(image, level):
|
358 |
-
if image is None:
|
359 |
-
return None, "No image uploaded.", "Error: No image provided"
|
360 |
-
|
361 |
-
try:
|
362 |
-
debug_info = f"Processing image of type: {type(image)}, youth level: {level}"
|
363 |
-
|
364 |
-
if isinstance(image, dict) and 'image' in image:
|
365 |
-
debug_info += f"\nImage is a dictionary with keys: {list(image.keys())}"
|
366 |
-
image = image['image']
|
367 |
-
debug_info += f"\nExtracted image of type: {type(image)}"
|
368 |
-
|
369 |
-
result_img, result_msg = make_younger(image, level)
|
370 |
-
debug_info += f"\nResult: {result_msg}"
|
371 |
-
return result_img, result_msg, debug_info
|
372 |
-
except Exception as e:
|
373 |
-
import traceback
|
374 |
-
error_details = traceback.format_exc()
|
375 |
-
return None, f"Error: {str(e)}", f"Exception: {str(e)}\n{error_details}"
|
376 |
-
|
377 |
-
process_button.click(
|
378 |
-
fn=process_with_debug,
|
379 |
-
inputs=[input_image, youth_slider],
|
380 |
-
outputs=[output_image, result_text, debug_output]
|
381 |
-
)
|
382 |
-
|
383 |
-
# Launch the interface
|
384 |
-
demo.launch(share=True)
|
385 |
-
|
386 |
if __name__ == "__main__":
|
387 |
-
|
|
|
1 |
import os
|
2 |
+
import torch
|
3 |
+
import legacy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import numpy as np
|
5 |
+
import PIL.Image
|
6 |
+
from flask import Flask, request, render_template, send_file
|
7 |
+
from werkzeug.utils import secure_filename
|
8 |
+
|
9 |
+
# Set up Flask app
|
10 |
+
app = Flask(__name__)
|
11 |
+
|
12 |
+
# Define upload folder
|
13 |
+
UPLOAD_FOLDER = "uploads"
|
14 |
+
RESULT_FOLDER = "results"
|
15 |
+
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
|
16 |
+
app.config["RESULT_FOLDER"] = RESULT_FOLDER
|
17 |
+
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
18 |
+
os.makedirs(RESULT_FOLDER, exist_ok=True)
|
19 |
+
|
20 |
+
# Load StyleGAN3 Model
|
21 |
+
network_pkl = "models/stylegan3-r-ffhq-1024x1024.pkl"
|
22 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
+
|
24 |
+
print(f'Loading networks from "{network_pkl}"...')
|
25 |
+
with open(network_pkl, "rb") as f:
|
26 |
+
G = legacy.load_network_pkl(f)["G_ema"].to(device)
|
27 |
+
|
28 |
+
# Function to encode an image into latent space
|
29 |
+
def image_to_latent(image_path):
|
30 |
+
latent_vector = torch.randn(1, G.z_dim, device=device) # Generate random latent vector
|
31 |
+
return latent_vector
|
32 |
+
|
33 |
+
# Function to modify latent code to make the face look younger
|
34 |
+
def modify_age(latent_vector, age_factor=-2.0):
|
35 |
+
age_direction = torch.load("models/age_direction.pt").to(device) # Load precomputed age direction
|
36 |
+
new_latent_vector = latent_vector + age_factor * age_direction
|
37 |
+
return new_latent_vector
|
38 |
+
|
39 |
+
# Function to generate an image from a latent code
|
40 |
+
def generate_image(latent_vector):
|
41 |
+
img = G.synthesis(latent_vector, noise_mode="const")
|
42 |
+
img = (img + 1) * (255 / 2)
|
43 |
+
img = img.permute(0, 2, 3, 1).cpu().numpy()[0].astype(np.uint8)
|
44 |
+
return PIL.Image.fromarray(img)
|
45 |
+
|
46 |
+
# Flask Routes
|
47 |
+
@app.route("/", methods=["GET", "POST"])
|
48 |
+
def upload_file():
|
49 |
+
if request.method == "POST":
|
50 |
+
if "file" not in request.files:
|
51 |
+
return "No file uploaded", 400
|
52 |
+
|
53 |
+
file = request.files["file"]
|
54 |
+
if file.filename == "":
|
55 |
+
return "No selected file", 400
|
56 |
+
|
57 |
+
filename = secure_filename(file.filename)
|
58 |
+
input_path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
|
59 |
+
file.save(input_path)
|
60 |
+
|
61 |
+
# Convert input image to latent vector
|
62 |
+
latent_code = image_to_latent(input_path)
|
63 |
+
|
64 |
+
# Modify latent code for a younger appearance
|
65 |
+
young_latent_code = modify_age(latent_code, age_factor=-2.0)
|
66 |
+
|
67 |
+
# Generate a younger-looking face
|
68 |
+
young_image = generate_image(young_latent_code)
|
69 |
+
output_path = os.path.join(app.config["RESULT_FOLDER"], "young_" + filename)
|
70 |
+
young_image.save(output_path)
|
71 |
+
|
72 |
+
return render_template("result.html", filename="young_" + filename)
|
73 |
+
|
74 |
+
return render_template("index.html")
|
75 |
+
|
76 |
+
@app.route("/download/<filename>")
|
77 |
+
def download_file(filename):
|
78 |
+
return send_file(os.path.join(app.config["RESULT_FOLDER"], filename), as_attachment=True)
|
79 |
+
|
80 |
+
# Run the Flask app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
if __name__ == "__main__":
|
82 |
+
app.run(debug=True)
|