Spaces:
Sleeping
Sleeping
File size: 2,483 Bytes
271a3d2 1222a26 2a1073e 1222a26 24488cc 1222a26 7d9bce7 1222a26 6f1f950 1222a26 7d9bce7 1222a26 6f1f950 feefb08 24488cc 7d9bce7 feefb08 d95235c 1222a26 7d9bce7 d95235c 6d9d7d5 d95235c 347aaa7 1222a26 347aaa7 1222a26 347aaa7 1222a26 347aaa7 1222a26 f1df67d 24488cc 68289b0 f1df67d f3feba6 c52147b f8a6de1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import gradio as gr
from PIL import Image
import google.generativeai as genai
import time
import pathlib
# Configure the API key directly in the script
API_KEY = 'AIzaSyDnnYRJ49VUm_2FiKhNubv85g6KCDjcNSc'
genai.configure(api_key=API_KEY)
# Generation configuration
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
# Safety settings
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]
# Model name
MODEL_NAME = "gemini-1.5-pro-latest"
# Create the model
model = genai.GenerativeModel(
model_name=MODEL_NAME,
safety_settings=safety_settings,
generation_config=generation_config,
)
e =""
# Fonction pour générer le contenu
async def generate_content(pro,image):
global e
if not image:
response = model.generate_content(pro)
print(response)
e = response.text
print(e)
else:
'''
print(f"Uploading file...")
uploaded_video = genai.upload_file(path=image)
print(f"Completed upload: {uploaded_video.uri}")
while uploaded_video.state.name == "PROCESSING":
print("Waiting for video to be processed.")
time.sleep(2)
uploaded_video = genai.get_file(uploaded_video.name)
if uploaded_video.state.name == "FAILED":
raise ValueError(uploaded_video.state.name)
print(f"Video processing complete: " + uploaded_video.uri)
print("Making LLM inference request...") '''
image_input = {
'mime_type': 'image/jpeg',
'data': pathlib.Path(image).read_bytes()
}
response = model.generate_content(
[prompt, image_input], request_options={"timeout": 600}
)
#genai.delete_file(uploaded_video.name)
#print(f"Deleted file {uploaded_video.uri}")
e = response
return e
markdown = r"""
e
""".format(e)
# Interface Gradio
iface = gr.Interface(fn=generate_content, inputs=[gr.Textbox(),gr.Image(type='pil')], outputs= gr.Markdown(markdown, latex_delimiters=[{ "left":"$$", "right":"$$", "display": True }]))
iface.launch() |