File size: 1,627 Bytes
f74e492 378ed8f 05588a0 378ed8f 7fe77ba 05588a0 378ed8f 05588a0 0f57b67 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 7fe77ba f74e492 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 378ed8f 05588a0 7fe77ba 378ed8f 7fe77ba 378ed8f f74e492 378ed8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
import google.generativeai as genai
# API key input
api_key = "AIzaSyDsrgHAnNWDJqWWzq3oFAbUy5W40cUT0dY"
genai.configure(api_key=api_key)
def describe_video(pp,video_file):
try:
print(f"Uploading file...")
uploaded_video = genai.upload_file(path=video_file)
print(f"Completed upload: {uploaded_video.uri}")
import time
while uploaded_video.state.name == "PROCESSING":
print("Waiting for video to be processed.")
time.sleep(10)
uploaded_video = genai.get_file(uploaded_video.name)
if uploaded_video.state.name == "FAILED":
raise ValueError(uploaded_video.state.name)
print(f"Video processing complete: " + uploaded_video.uri)
prompt = pp
# Set the model to Gemini 1.5 Pro.
model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest")
# Make the LLM request.
print("Making LLM inference request...")
response = model.generate_content(
[prompt, uploaded_video], request_options={"timeout": 600}
)
print(response.text)
genai.delete_file(uploaded_video.name)
print(f"Deleted file {uploaded_video.uri}")
return response.text
except Exception as e:
return f"An error occurred: {e}"
# Create the Gradio interface
iface = gr.Interface(
fn=describe_video,
inputs=[gr.Textbox(),gr.Video()],
outputs=gr.Textbox(),
title="y Video.",
description="Pose des questions sur la vidéo et obtient une réponse.",
)
# Launch the interface
iface.launch() |