File size: 1,648 Bytes
f74e492 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 0f57b67 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 f74e492 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 af1e795 378ed8f f74e492 378ed8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
import google.generativeai as genai
# API key input
api_key = "AIzaSyDsrgHAnNWDJqWWzq3oFAbUy5W40cUT0dY"
genai.configure(api_key=api_key)
def describe_video(video_file):
try:
print(f"Uploading file...")
uploaded_video = genai.upload_file(path=video_file)
print(f"Completed upload: {uploaded_video.uri}")
import time
while uploaded_video.state.name == "PROCESSING":
print("Waiting for video to be processed.")
time.sleep(10)
uploaded_video = genai.get_file(uploaded_video.name)
if uploaded_video.state.name == "FAILED":
raise ValueError(uploaded_video.state.name)
print(f"Video processing complete: " + uploaded_video.uri)
prompt = "Describe this video."
# Set the model to Gemini 1.5 Pro.
model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest")
# Make the LLM request.
print("Making LLM inference request...")
response = model.generate_content(
[prompt, uploaded_video], request_options={"timeout": 600}
)
print(response.text)
genai.delete_file(uploaded_video.name)
print(f"Deleted file {uploaded_video.uri}")
return response.text
except Exception as e:
return f"An error occurred: {e}"
# Create the Gradio interface
iface = gr.Interface(
fn=describe_video,
inputs=gr.Video(),
outputs=gr.Textbox(),
title="Video Description with Gemini",
description="Upload a video to get a description using Google Gemini",
)
# Launch the interface
iface.launch() |