Docfile commited on
Commit
1222a26
·
verified ·
1 Parent(s): 119f8e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -34
app.py CHANGED
@@ -1,39 +1,39 @@
1
  import gradio as gr
 
 
 
2
 
 
 
 
3
 
4
- import os
5
- token=os.environ.get("TOKEN")
6
- os.environ["GOOGLE_API_KEY"] = token
7
-
8
- safe = [
9
- {
10
- "category": "HARM_CATEGORY_HARASSMENT",
11
- "threshold": "BLOCK_NONE",
12
- },
13
- {
14
- "category": "HARM_CATEGORY_HATE_SPEECH",
15
- "threshold": "BLOCK_NONE",
16
- },
17
- {
18
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
19
- "threshold": "BLOCK_NONE",
20
- },
21
- {
22
- "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
23
- "threshold": "BLOCK_NONE",
24
- },
25
- ]
26
- from llama_index.llms.gemini import Gemini
27
- from llama_index.multi_modal_llms.gemini import GeminiMultiModal
28
-
29
- from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
30
 
 
 
 
 
 
 
 
31
 
32
- # Chargez l'image
 
33
 
34
- gemini_pro = GeminiMultiModal(model_name="models/gemini-pro-vision")
35
 
36
- llm = Gemini(model="models/gemini-1.5-pro")
 
 
 
 
 
37
 
38
  e =""
39
  # Fonction pour générer le contenu
@@ -41,16 +41,41 @@ async def generate_content(pro,image):
41
  global e
42
 
43
  if not image:
44
- response = await llm.acomplete(pro,safety_settings=safe)
45
  print(response)
46
  e = response.text
47
  print(e)
48
 
49
  else:
50
- #response = model.generate_content([pro, image])
51
- response_acomplete = await llm.acomplete(prompt=pro, image_documents=image,safety_settings=safe)
52
- print(response_acomplete)
53
- e = response_acomplete
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  return e
55
 
56
 
 
1
  import gradio as gr
2
+ from PIL import Image
3
+ import google.generativeai as genai
4
+ import time
5
 
6
+ # Configure the API key directly in the script
7
+ API_KEY = 'AIzaSyDnnYRJ49VUm_2FiKhNubv85g6KCDjcNSc'
8
+ genai.configure(api_key=API_KEY)
9
 
10
+ # Generation configuration
11
+ generation_config = {
12
+ "temperature": 1,
13
+ "top_p": 0.95,
14
+ "top_k": 64,
15
+ "max_output_tokens": 8192,
16
+ "response_mime_type": "text/plain",
17
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # Safety settings
20
+ safety_settings = [
21
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
22
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
23
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
24
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
25
+ ]
26
 
27
+ # Model name
28
+ MODEL_NAME = "gemini-1.5-pro-latest"
29
 
 
30
 
31
+ # Create the model
32
+ model = genai.GenerativeModel(
33
+ model_name=MODEL_NAME,
34
+ safety_settings=safety_settings,
35
+ generation_config=generation_config,
36
+ )
37
 
38
  e =""
39
  # Fonction pour générer le contenu
 
41
  global e
42
 
43
  if not image:
44
+ response = model.generate_content(pro)
45
  print(response)
46
  e = response.text
47
  print(e)
48
 
49
  else:
50
+ print(f"Uploading file...")
51
+ uploaded_video = genai.upload_file(path=image)
52
+ print(f"Completed upload: {uploaded_video.uri}")
53
+
54
+
55
+ while uploaded_video.state.name == "PROCESSING":
56
+ print("Waiting for video to be processed.")
57
+ time.sleep(2)
58
+ uploaded_video = genai.get_file(uploaded_video.name)
59
+
60
+ if uploaded_video.state.name == "FAILED":
61
+ raise ValueError(uploaded_video.state.name)
62
+
63
+ print(f"Video processing complete: " + uploaded_video.uri)
64
+
65
+ print("Making LLM inference request...")
66
+ response = model.generate_content(
67
+ [prompt, uploaded_video], request_options={"timeout": 600}
68
+ )
69
+
70
+ genai.delete_file(uploaded_video.name)
71
+ print(f"Deleted file {uploaded_video.uri}")
72
+
73
+
74
+
75
+
76
+
77
+
78
+ e = response
79
  return e
80
 
81