Update app.py
Browse files
app.py
CHANGED
@@ -12,51 +12,38 @@ from huggingface_hub import login
|
|
12 |
|
13 |
# --- 1. Configuration and Authentication ---
|
14 |
|
15 |
-
# <<< START: CODE UPDATE >>>
|
16 |
-
# Load configuration from Hugging Face Secrets instead of hardcoding.
|
17 |
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
|
18 |
GCP_LOCATION = os.environ.get("GCP_LOCATION")
|
19 |
-
# <<< END: CODE UPDATE >>>
|
20 |
|
21 |
# --- Authentication and Sanity Checks Block ---
|
22 |
|
23 |
-
# Part A: Hugging Face Hub Authentication
|
24 |
hf_token = os.environ.get("HF_TOKEN")
|
25 |
if hf_token:
|
26 |
print("Hugging Face token found. Logging in.")
|
27 |
login(token=hf_token)
|
28 |
else:
|
29 |
-
print("WARNING: Hugging Face token ('HF_TOKEN') not found.
|
30 |
|
31 |
-
# Part B: Google Cloud Authentication
|
32 |
creds_json_str = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
33 |
|
34 |
-
# Check if all necessary secrets are loaded
|
35 |
if not all([GCP_PROJECT_ID, GCP_LOCATION, creds_json_str]):
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
if not
|
41 |
-
|
42 |
-
error_message = f"FATAL: The following required secrets are missing: {', '.join(missing_secrets)}. Please set them in the Space settings."
|
43 |
print(error_message)
|
44 |
-
# Define a dummy function to show a clear error in the UI.
|
45 |
def generate_video(prompt):
|
46 |
raise gr.Error(error_message)
|
47 |
else:
|
48 |
-
|
49 |
-
print("All required secrets (GCP Project, Location, Credentials, HF Token) are loaded.")
|
50 |
-
|
51 |
-
# Construct API URLs now that we have the project and location
|
52 |
MODEL_ID = "veo-3.0-generate-preview"
|
53 |
API_ENDPOINT = f"{GCP_LOCATION}-aiplatform.googleapis.com"
|
54 |
PREDICT_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:predictLongRunning"
|
55 |
FETCH_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:fetchPredictOperation"
|
56 |
|
57 |
-
with open("gcp_creds.json", "w") as f:
|
58 |
-
f.write(creds_json_str)
|
59 |
-
|
60 |
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
|
61 |
credentials, _ = google.auth.load_credentials_from_file("gcp_creds.json", scopes=SCOPES)
|
62 |
|
@@ -69,51 +56,48 @@ else:
|
|
69 |
def generate_video(prompt: str):
|
70 |
if not prompt:
|
71 |
raise gr.Error("Prompt cannot be empty.")
|
72 |
-
|
73 |
-
yield "Status: Authenticating and submitting job...", None
|
74 |
-
|
75 |
try:
|
76 |
-
|
77 |
-
|
78 |
-
"Authorization": f"Bearer {access_token}",
|
79 |
-
"Content-Type": "application/json",
|
80 |
-
}
|
81 |
-
payload = {
|
82 |
-
"instances": [{"prompt": prompt}],
|
83 |
-
"parameters": {
|
84 |
-
"aspectRatio": "16:9", "sampleCount": 1, "durationSeconds": 8,
|
85 |
-
"personGeneration": "allow_all", "addWatermark": True,
|
86 |
-
"includeRaiReason": True, "generateAudio": True,
|
87 |
-
}
|
88 |
-
}
|
89 |
response = requests.post(PREDICT_URL, headers=headers, json=payload)
|
90 |
-
response.raise_for_status()
|
91 |
operation_name = response.json()["name"]
|
92 |
print(f"Successfully submitted job. Operation Name: {operation_name}")
|
93 |
-
|
94 |
-
MAX_POLL_ATTEMPTS = 60
|
95 |
for i in range(MAX_POLL_ATTEMPTS):
|
96 |
-
yield f"Status: Polling
|
97 |
-
|
98 |
-
headers["Authorization"] = f"Bearer {access_token}"
|
99 |
fetch_payload = {"operationName": operation_name}
|
100 |
poll_response = requests.post(FETCH_URL, headers=headers, json=fetch_payload)
|
101 |
poll_response.raise_for_status()
|
102 |
poll_result = poll_response.json()
|
103 |
-
|
104 |
if poll_result.get("done"):
|
105 |
-
print("Job finished
|
|
|
106 |
response_data = poll_result.get("response", {})
|
107 |
if "videos" in response_data and response_data["videos"]:
|
108 |
video_base64 = response_data["videos"][0]["bytesBase64Encoded"]
|
109 |
video_bytes = base64.b64decode(video_base64)
|
110 |
-
|
111 |
-
|
112 |
-
f.write(video_bytes)
|
113 |
-
yield "Status: Done! Video generated.", temp_video_path
|
114 |
return
|
115 |
else:
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
time.sleep(10)
|
118 |
raise gr.Error("Operation timed out.")
|
119 |
except Exception as e:
|
@@ -131,14 +115,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
131 |
with gr.Column(scale=1):
|
132 |
status_output = gr.Markdown("Status: Ready")
|
133 |
video_output = gr.Video(label="Generated Video", interactive=False)
|
134 |
-
gr.Examples(
|
135 |
-
|
136 |
-
inputs=prompt_input,
|
137 |
-
)
|
138 |
-
submit_button.click(
|
139 |
-
fn=generate_video,
|
140 |
-
inputs=prompt_input,
|
141 |
-
outputs=[status_output, video_output]
|
142 |
-
)
|
143 |
|
144 |
demo.launch()
|
|
|
12 |
|
13 |
# --- 1. Configuration and Authentication ---
|
14 |
|
|
|
|
|
15 |
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
|
16 |
GCP_LOCATION = os.environ.get("GCP_LOCATION")
|
|
|
17 |
|
18 |
# --- Authentication and Sanity Checks Block ---
|
19 |
|
|
|
20 |
hf_token = os.environ.get("HF_TOKEN")
|
21 |
if hf_token:
|
22 |
print("Hugging Face token found. Logging in.")
|
23 |
login(token=hf_token)
|
24 |
else:
|
25 |
+
print("WARNING: Hugging Face token ('HF_TOKEN') not found.")
|
26 |
|
|
|
27 |
creds_json_str = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
28 |
|
|
|
29 |
if not all([GCP_PROJECT_ID, GCP_LOCATION, creds_json_str]):
|
30 |
+
missing_secrets = [s for s, v in {
|
31 |
+
"GCP_PROJECT_ID": GCP_PROJECT_ID,
|
32 |
+
"GCP_LOCATION": GCP_LOCATION,
|
33 |
+
"GOOGLE_APPLICATION_CREDENTIALS_JSON": creds_json_str
|
34 |
+
}.items() if not v]
|
35 |
+
error_message = f"FATAL: Missing required secrets: {', '.join(missing_secrets)}."
|
|
|
36 |
print(error_message)
|
|
|
37 |
def generate_video(prompt):
|
38 |
raise gr.Error(error_message)
|
39 |
else:
|
40 |
+
print("All required secrets are loaded.")
|
|
|
|
|
|
|
41 |
MODEL_ID = "veo-3.0-generate-preview"
|
42 |
API_ENDPOINT = f"{GCP_LOCATION}-aiplatform.googleapis.com"
|
43 |
PREDICT_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:predictLongRunning"
|
44 |
FETCH_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:fetchPredictOperation"
|
45 |
|
46 |
+
with open("gcp_creds.json", "w") as f: f.write(creds_json_str)
|
|
|
|
|
47 |
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
|
48 |
credentials, _ = google.auth.load_credentials_from_file("gcp_creds.json", scopes=SCOPES)
|
49 |
|
|
|
56 |
def generate_video(prompt: str):
|
57 |
if not prompt:
|
58 |
raise gr.Error("Prompt cannot be empty.")
|
59 |
+
yield "Status: Submitting job...", None
|
|
|
|
|
60 |
try:
|
61 |
+
headers = {"Authorization": f"Bearer {get_access_token()}", "Content-Type": "application/json"}
|
62 |
+
payload = {"instances": [{"prompt": prompt}], "parameters": {"aspectRatio": "16:9", "sampleCount": 1, "durationSeconds": 8, "personGeneration": "allow_all", "addWatermark": True, "includeRaiReason": True, "generateAudio": True}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
response = requests.post(PREDICT_URL, headers=headers, json=payload)
|
64 |
+
response.raise_for_status()
|
65 |
operation_name = response.json()["name"]
|
66 |
print(f"Successfully submitted job. Operation Name: {operation_name}")
|
67 |
+
MAX_POLL_ATTEMPTS = 60
|
|
|
68 |
for i in range(MAX_POLL_ATTEMPTS):
|
69 |
+
yield f"Status: Polling (Attempt {i+1}/{MAX_POLL_ATTEMPTS})...", None
|
70 |
+
headers["Authorization"] = f"Bearer {get_access_token()}"
|
|
|
71 |
fetch_payload = {"operationName": operation_name}
|
72 |
poll_response = requests.post(FETCH_URL, headers=headers, json=fetch_payload)
|
73 |
poll_response.raise_for_status()
|
74 |
poll_result = poll_response.json()
|
|
|
75 |
if poll_result.get("done"):
|
76 |
+
print("Job finished.")
|
77 |
+
print(f"Full response payload: {json.dumps(poll_result, indent=2)}") # For debugging
|
78 |
response_data = poll_result.get("response", {})
|
79 |
if "videos" in response_data and response_data["videos"]:
|
80 |
video_base64 = response_data["videos"][0]["bytesBase64Encoded"]
|
81 |
video_bytes = base64.b64decode(video_base64)
|
82 |
+
with open("generated_video.mp4", "wb") as f: f.write(video_bytes)
|
83 |
+
yield "Status: Done!", "generated_video.mp4"
|
|
|
|
|
84 |
return
|
85 |
else:
|
86 |
+
# <<< START: IMPROVED ERROR HANDLING >>>
|
87 |
+
error_message = "Video generation failed."
|
88 |
+
# Check for a specific error message in the operation response
|
89 |
+
if "error" in poll_result:
|
90 |
+
error_details = poll_result["error"].get("message", "No details provided.")
|
91 |
+
error_message += f"\nAPI Error: {error_details}"
|
92 |
+
# Check for a specific RAI reason
|
93 |
+
elif "raiResult" in response_data:
|
94 |
+
rai_reason = response_data.get("raiMediaFilteredReason", "Unknown reason.")
|
95 |
+
error_message += f"\nReason: Content was blocked by safety filters ({rai_reason})."
|
96 |
+
else:
|
97 |
+
error_message += "\nReason: The API did not return a video or a specific error."
|
98 |
+
|
99 |
+
raise gr.Error(error_message)
|
100 |
+
# <<< END: IMPROVED ERROR HANDLING >>>
|
101 |
time.sleep(10)
|
102 |
raise gr.Error("Operation timed out.")
|
103 |
except Exception as e:
|
|
|
115 |
with gr.Column(scale=1):
|
116 |
status_output = gr.Markdown("Status: Ready")
|
117 |
video_output = gr.Video(label="Generated Video", interactive=False)
|
118 |
+
gr.Examples(["A high-speed drone shot flying through a futuristic city with flying vehicles."], inputs=prompt_input)
|
119 |
+
submit_button.click(fn=generate_video, inputs=prompt_input, outputs=[status_output, video_output])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
demo.launch()
|