Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# app.py
|
2 |
|
3 |
import os
|
4 |
import gradio as gr
|
@@ -10,13 +10,11 @@ import google.auth
|
|
10 |
import google.auth.transport.requests
|
11 |
from huggingface_hub import login
|
12 |
|
13 |
-
# --- 1. Configuration and Authentication ---
|
14 |
-
|
15 |
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
|
16 |
GCP_LOCATION = os.environ.get("GCP_LOCATION")
|
17 |
|
18 |
-
# --- Authentication and Sanity Checks Block ---
|
19 |
-
|
20 |
hf_token = os.environ.get("HF_TOKEN")
|
21 |
if hf_token:
|
22 |
print("Hugging Face token found. Logging in.")
|
@@ -27,92 +25,101 @@ else:
|
|
27 |
creds_json_str = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
28 |
|
29 |
if not all([GCP_PROJECT_ID, GCP_LOCATION, creds_json_str]):
|
30 |
-
missing_secrets = [s for s, v in {
|
31 |
-
"GCP_PROJECT_ID": GCP_PROJECT_ID,
|
32 |
-
"GCP_LOCATION": GCP_LOCATION,
|
33 |
-
"GOOGLE_APPLICATION_CREDENTIALS_JSON": creds_json_str
|
34 |
-
}.items() if not v]
|
35 |
error_message = f"FATAL: Missing required secrets: {', '.join(missing_secrets)}."
|
36 |
print(error_message)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
headers = {"Authorization": f"Bearer {get_access_token()}", "Content-Type": "application/json"}
|
62 |
-
payload = {"instances": [{"prompt": prompt}], "parameters": {"aspectRatio": "16:9", "sampleCount": 1, "durationSeconds": 8, "personGeneration": "allow_all", "addWatermark": True, "includeRaiReason": True, "generateAudio": True}}
|
63 |
-
response = requests.post(PREDICT_URL, headers=headers, json=payload)
|
64 |
-
response.raise_for_status()
|
65 |
-
operation_name = response.json()["name"]
|
66 |
-
print(f"Successfully submitted job. Operation Name: {operation_name}")
|
67 |
-
MAX_POLL_ATTEMPTS = 60
|
68 |
-
for i in range(MAX_POLL_ATTEMPTS):
|
69 |
-
yield f"Status: Polling (Attempt {i+1}/{MAX_POLL_ATTEMPTS})...", None
|
70 |
-
headers["Authorization"] = f"Bearer {get_access_token()}"
|
71 |
-
fetch_payload = {"operationName": operation_name}
|
72 |
-
poll_response = requests.post(FETCH_URL, headers=headers, json=fetch_payload)
|
73 |
-
poll_response.raise_for_status()
|
74 |
-
poll_result = poll_response.json()
|
75 |
-
if poll_result.get("done"):
|
76 |
-
print("Job finished.")
|
77 |
-
# <<< CHANGE: The debugging line below is now commented out for cleaner logs. >>>
|
78 |
-
# print(f"Full response payload: {json.dumps(poll_result, indent=2)}")
|
79 |
-
|
80 |
-
response_data = poll_result.get("response", {})
|
81 |
-
if "videos" in response_data and response_data["videos"]:
|
82 |
-
video_base64 = response_data["videos"][0]["bytesBase64Encoded"]
|
83 |
-
video_bytes = base64.b64decode(video_base64)
|
84 |
-
with open("generated_video.mp4", "wb") as f: f.write(video_bytes)
|
85 |
-
yield "Status: Done!", "generated_video.mp4"
|
86 |
-
return
|
87 |
-
else:
|
88 |
-
error_message = "Video generation failed."
|
89 |
-
if "error" in poll_result:
|
90 |
-
error_details = poll_result["error"].get("message", "No details provided.")
|
91 |
-
error_message += f"\nAPI Error: {error_details}"
|
92 |
-
elif "raiResult" in response_data:
|
93 |
-
rai_reason = response_data.get("raiMediaFilteredReason", "Unknown reason.")
|
94 |
-
error_message += f"\nReason: Content was blocked by safety filters ({rai_reason})."
|
95 |
-
else:
|
96 |
-
error_message += "\nReason: The API did not return a video or a specific error."
|
97 |
-
raise gr.Error(error_message)
|
98 |
-
time.sleep(10)
|
99 |
-
raise gr.Error("Operation timed out.")
|
100 |
-
except Exception as e:
|
101 |
-
print(f"An error occurred: {e}")
|
102 |
-
raise gr.Error(str(e))
|
103 |
|
104 |
-
#
|
105 |
-
|
106 |
-
gr.
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
with gr.Column(scale=1):
|
113 |
-
status_output = gr.Markdown("Status: Ready")
|
114 |
-
video_output = gr.Video(label="Generated Video", interactive=False)
|
115 |
-
gr.Examples(["A high-speed drone shot flying through a futuristic city with flying vehicles."], inputs=prompt_input)
|
116 |
-
submit_button.click(fn=generate_video, inputs=prompt_input, outputs=[status_output, video_output])
|
117 |
|
|
|
118 |
demo.launch()
|
|
|
1 |
+
# app.py (API-Only Version)
|
2 |
|
3 |
import os
|
4 |
import gradio as gr
|
|
|
10 |
import google.auth.transport.requests
|
11 |
from huggingface_hub import login
|
12 |
|
13 |
+
# --- 1. Configuration and Authentication (Unchanged) ---
|
|
|
14 |
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
|
15 |
GCP_LOCATION = os.environ.get("GCP_LOCATION")
|
16 |
|
17 |
+
# --- Authentication and Sanity Checks Block (Unchanged) ---
|
|
|
18 |
hf_token = os.environ.get("HF_TOKEN")
|
19 |
if hf_token:
|
20 |
print("Hugging Face token found. Logging in.")
|
|
|
25 |
creds_json_str = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")
|
26 |
|
27 |
if not all([GCP_PROJECT_ID, GCP_LOCATION, creds_json_str]):
|
28 |
+
missing_secrets = [s for s, v in {"GCP_PROJECT_ID": GCP_PROJECT_ID, "GCP_LOCATION": GCP_LOCATION, "GOOGLE_APPLICATION_CREDENTIALS_JSON": creds_json_str}.items() if not v]
|
|
|
|
|
|
|
|
|
29 |
error_message = f"FATAL: Missing required secrets: {', '.join(missing_secrets)}."
|
30 |
print(error_message)
|
31 |
+
# This initial error will still be raised if the app can't start
|
32 |
+
raise RuntimeError(error_message)
|
33 |
+
|
34 |
+
print("All required secrets are loaded. Initializing API service.")
|
35 |
+
MODEL_ID = "veo-3.0-generate-preview"
|
36 |
+
API_ENDPOINT = f"{GCP_LOCATION}-aiplatform.googleapis.com"
|
37 |
+
PREDICT_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:predictLongRunning"
|
38 |
+
FETCH_URL = f"https://{API_ENDPOINT}/v1/projects/{GCP_PROJECT_ID}/locations/{GCP_LOCATION}/publishers/google/models/{MODEL_ID}:fetchPredictOperation"
|
39 |
+
|
40 |
+
with open("gcp_creds.json", "w") as f: f.write(creds_json_str)
|
41 |
+
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
|
42 |
+
credentials, _ = google.auth.load_credentials_from_file("gcp_creds.json", scopes=SCOPES)
|
43 |
+
|
44 |
+
def get_access_token():
|
45 |
+
auth_req = google.auth.transport.requests.Request()
|
46 |
+
credentials.refresh(auth_req)
|
47 |
+
return credentials.token
|
48 |
|
49 |
+
# --- 2. Core Video Generation Logic (Refactored for API) ---
|
50 |
+
# The function now returns a final JSON object instead of yielding updates.
|
51 |
+
def generate_video_api(prompt: str):
|
52 |
+
if not prompt:
|
53 |
+
return {"status": "error", "message": "Prompt cannot be empty."}
|
54 |
+
|
55 |
+
try:
|
56 |
+
headers = {"Authorization": f"Bearer {get_access_token()}", "Content-Type": "application/json"}
|
57 |
+
payload = {"instances": [{"prompt": prompt}], "parameters": {"aspectRatio": "16:9", "sampleCount": 1, "durationSeconds": 8, "personGeneration": "allow_all", "addWatermark": True, "includeRaiReason": True, "generateAudio": True}}
|
58 |
+
|
59 |
+
# Submit job
|
60 |
+
response = requests.post(PREDICT_URL, headers=headers, json=payload)
|
61 |
+
response.raise_for_status()
|
62 |
+
operation_name = response.json()["name"]
|
63 |
+
print(f"Successfully submitted job. Operation Name: {operation_name}")
|
64 |
+
|
65 |
+
# Poll for result
|
66 |
+
MAX_POLL_ATTEMPTS = 60
|
67 |
+
for i in range(MAX_POLL_ATTEMPTS):
|
68 |
+
print(f"Polling (Attempt {i+1}/{MAX_POLL_ATTEMPTS})...")
|
69 |
+
time.sleep(10) # Wait before polling
|
70 |
+
|
71 |
+
headers["Authorization"] = f"Bearer {get_access_token()}"
|
72 |
+
fetch_payload = {"operationName": operation_name}
|
73 |
+
poll_response = requests.post(FETCH_URL, headers=headers, json=fetch_payload)
|
74 |
+
poll_response.raise_for_status()
|
75 |
+
poll_result = poll_response.json()
|
76 |
|
77 |
+
if poll_result.get("done"):
|
78 |
+
print("Job finished.")
|
79 |
+
response_data = poll_result.get("response", {})
|
80 |
+
|
81 |
+
# Case 1: Success, video is present
|
82 |
+
if "videos" in response_data and response_data["videos"]:
|
83 |
+
video_base64 = response_data["videos"][0]["bytesBase64Encoded"]
|
84 |
+
return {"status": "success", "video_base64": video_base64}
|
85 |
+
|
86 |
+
# Case 2: Failure, an error message is present
|
87 |
+
error_message = "Video generation failed."
|
88 |
+
if "error" in poll_result:
|
89 |
+
error_details = poll_result["error"].get("message", "No details provided.")
|
90 |
+
error_message += f" API Error: {error_details}"
|
91 |
+
elif "raiResult" in response_data:
|
92 |
+
rai_reason = response_data.get("raiMediaFilteredReason", "Unknown reason.")
|
93 |
+
error_message += f" Content was blocked by safety filters ({rai_reason})."
|
94 |
+
else:
|
95 |
+
error_message += " The API did not return a video or a specific error."
|
96 |
+
|
97 |
+
return {"status": "error", "message": error_message}
|
98 |
+
|
99 |
+
return {"status": "error", "message": "Operation timed out."}
|
100 |
+
|
101 |
+
except requests.exceptions.HTTPError as e:
|
102 |
+
print(f"HTTP Error: {e.response.text}")
|
103 |
+
return {"status": "error", "message": f"API Error: {e.response.status_code}. Details: {e.response.text}"}
|
104 |
+
except Exception as e:
|
105 |
+
print(f"An unexpected error occurred: {e}")
|
106 |
+
return {"status": "error", "message": f"An unexpected error occurred: {str(e)}"}
|
107 |
|
108 |
+
# --- 3. Gradio API Definition (No UI) ---
|
109 |
+
# We define the components that make up the API contract, but they are not made visible.
|
110 |
+
with gr.Blocks() as demo:
|
111 |
+
# Define the inputs and outputs for the API
|
112 |
+
prompt_input = gr.Textbox(label="prompt", visible=False)
|
113 |
+
output_json = gr.JSON(label="result", visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
+
# Create the API endpoint named "predict"
|
116 |
+
# This will be available at /run/predict
|
117 |
+
gr.Interface(
|
118 |
+
fn=generate_video_api,
|
119 |
+
inputs=prompt_input,
|
120 |
+
outputs=output_json,
|
121 |
+
api_name="predict"
|
122 |
+
)
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
+
# The launch() call is still needed to start the web server that listens for API calls.
|
125 |
demo.launch()
|