Spaces:
Sleeping
Sleeping
File size: 1,583 Bytes
271a3d2 7d9bce7 24488cc 314c1fc 7d9bce7 6f1f950 24488cc 7d9bce7 313a738 6f1f950 feefb08 24488cc 7d9bce7 feefb08 d95235c 7d9bce7 d95235c 6d9d7d5 d95235c 7d9bce7 119f8e4 7d9bce7 f1df67d 24488cc 68289b0 f1df67d f3feba6 c52147b f8a6de1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import gradio as gr
import os
token=os.environ.get("TOKEN")
os.environ["GOOGLE_API_KEY"] = token
safe = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
]
from llama_index.llms.gemini import Gemini
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
# Chargez l'image
gemini_pro = GeminiMultiModal(model_name="models/gemini-pro-vision")
llm = Gemini(model="models/gemini-1.5-pro")
e =""
# Fonction pour générer le contenu
async def generate_content(pro,image):
global e
if not image:
response = await llm.acomplete(pro,safety_settings=safe)
print(response)
e = response.text
print(e)
else:
#response = model.generate_content([pro, image])
response_acomplete = await llm.acomplete(prompt=pro, image_documents=image,safety_settings=safe)
print(response_acomplete)
e = response_acomplete
return e
markdown = r"""
e
""".format(e)
# Interface Gradio
iface = gr.Interface(fn=generate_content, inputs=[gr.Textbox(),gr.Image(type='pil')], outputs= gr.Markdown(markdown, latex_delimiters=[{ "left":"$$", "right":"$$", "display": True }]))
iface.launch() |