Docfile commited on
Commit
378ed8f
·
verified ·
1 Parent(s): eaaa834

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -60
app.py CHANGED
@@ -1,63 +1,127 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
 
 
 
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ import google.generativeai as genai
3
+ import cv2
4
+ import os
5
+ import shutil
6
+
7
+ # Set up your API key
8
+ genai.configure(api_key="YOUR_API_KEY")
9
+
10
+ # Constants
11
+ FRAME_EXTRACTION_DIRECTORY = "/content/frames"
12
+ FRAME_PREFIX = "_frame"
13
+
14
+ # Function to create/cleanup frame output directory
15
+ def create_frame_output_dir(output_dir):
16
+ if not os.path.exists(output_dir):
17
+ os.makedirs(output_dir)
18
+ else:
19
+ shutil.rmtree(output_dir)
20
+ os.makedirs(output_dir)
21
+
22
+ # Function to extract frames from video
23
+ def extract_frame_from_video(video_file_path):
24
+ create_frame_output_dir(FRAME_EXTRACTION_DIRECTORY)
25
+ vidcap = cv2.VideoCapture(video_file_path)
26
+ fps = vidcap.get(cv2.CAP_PROP_FPS)
27
+ frame_duration = 1 / fps
28
+ output_file_prefix = os.path.basename(video_file_path).replace('.', '_')
29
+ frame_count = 0
30
+ count = 0
31
+ while vidcap.isOpened():
32
+ success, frame = vidcap.read()
33
+ if not success:
34
+ break
35
+ if int(count / fps) == frame_count:
36
+ min = frame_count // 60
37
+ sec = frame_count % 60
38
+ time_string = f"{min:02d}:{sec:02d}"
39
+ image_name = f"{output_file_prefix}{FRAME_PREFIX}{time_string}.jpg"
40
+ output_filename = os.path.join(FRAME_EXTRACTION_DIRECTORY, image_name)
41
+ cv2.imwrite(output_filename, frame)
42
+ frame_count += 1
43
+ count += 1
44
+ vidcap.release()
45
+ return frame_count
46
+
47
+ # Class to represent a file
48
+ class File:
49
+ def __init__(self, file_path: str, display_name: str = None):
50
+ self.file_path = file_path
51
+ if display_name:
52
+ self.display_name = display_name
53
+ self.timestamp = self.get_timestamp(file_path)
54
+
55
+ def set_file_response(self, response):
56
+ self.response = response
57
+
58
+ def get_timestamp(self, filename):
59
+ parts = filename.split(FRAME_PREFIX)
60
+ if len(parts) != 2:
61
+ return None
62
+ return parts[1].split('.')[0]
63
 
64
+ # Function to upload files to Gemini
65
+ def upload_files(files_to_upload):
66
+ uploaded_files = []
67
+ for file in files_to_upload:
68
+ response = genai.upload_file(path=file.file_path)
69
+ file.set_file_response(response)
70
+ uploaded_files.append(file)
71
+ return uploaded_files
72
+
73
+ # Function to generate description using Gemini
74
+ def generate_description(uploaded_files):
75
+ prompt = "Describe this video."
76
+ model = genai.GenerativeModel(model_name="models/gemini-1.5-pro-latest")
77
+ request = [prompt]
78
+ for file in uploaded_files:
79
+ request.append(file.timestamp)
80
+ request.append(file.response)
81
+ response = model.generate_content(request, request_options={"timeout": 600})
82
+ return response.text
83
+
84
+ # Function to delete files from Gemini
85
+ def delete_files(uploaded_files):
86
+ for file in uploaded_files:
87
+ genai.delete_file(file.response.name)
88
+
89
+ # Gradio interface
90
+ def process_video(video_file):
91
+ try:
92
+ # Extract frames
93
+ frame_count = extract_frame_from_video(video_file.name)
94
+
95
+ # Prepare files for upload
96
+ files = os.listdir(FRAME_EXTRACTION_DIRECTORY)
97
+ files = sorted(files)
98
+ files_to_upload = []
99
+ for file in files:
100
+ files_to_upload.append(
101
+ File(file_path=os.path.join(FRAME_EXTRACTION_DIRECTORY, file))
102
+ )
103
+
104
+ # Upload files to Gemini
105
+ uploaded_files = upload_files(files_to_upload)
106
+
107
+ # Generate description
108
+ description = generate_description(uploaded_files)
109
+
110
+ # Delete files from Gemini
111
+ delete_files(uploaded_files)
112
+
113
+ return f"Video processed successfully! Description:\n\n{description}"
114
+ except Exception as e:
115
+ return f"An error occurred: {str(e)}"
116
+
117
+ # Create Gradio interface
118
+ iface = gr.Interface(
119
+ fn=process_video,
120
+ inputs=gr.Video(type="filepath"),
121
+ outputs=gr.Textbox(),
122
+ title="Video Description with Gemini",
123
+ description="Upload a video to get a description using Google Gemini",
124
+ )
125
 
126
+ # Launch the interface
127
+ iface.launch()