Spaces:
Running
on
Zero
Running
on
Zero
MCP ready
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ from transformers import pipeline
|
|
52 |
|
53 |
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
|
54 |
|
55 |
-
@spaces.GPU(
|
56 |
def get_llm_idea(user_prompt):
|
57 |
agent_maker_sys = f"""
|
58 |
You are an AI whose job is to help users create their own chatbot whose personality will reflect the character and scene atmosphere from an image described by users.
|
@@ -86,6 +86,25 @@ Here's another example to help you, but only provide one on the end: If a user t
|
|
86 |
|
87 |
|
88 |
def infer(image_in):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
gr.Info("Getting image description...")
|
90 |
"""
|
91 |
if cap_type == "Fictional" :
|
@@ -185,4 +204,4 @@ with gr.Blocks(css=css) as demo:
|
|
185 |
]
|
186 |
)
|
187 |
|
188 |
-
demo.queue().launch(show_api=False, show_error=True)
|
|
|
52 |
|
53 |
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
|
54 |
|
55 |
+
@spaces.GPU()
|
56 |
def get_llm_idea(user_prompt):
|
57 |
agent_maker_sys = f"""
|
58 |
You are an AI whose job is to help users create their own chatbot whose personality will reflect the character and scene atmosphere from an image described by users.
|
|
|
86 |
|
87 |
|
88 |
def infer(image_in):
|
89 |
+
"""
|
90 |
+
Generate a system prompt idea for a language model based on the content of an input image.
|
91 |
+
|
92 |
+
This function performs two steps:
|
93 |
+
1. It uses a vision-language model (Kosmos-2) to generate a descriptive caption of the input image.
|
94 |
+
2. It then uses a text generation pipeline (Zephyr-7B) to create a chatbot configuration from that caption,
|
95 |
+
including a title, system prompt, and example user message.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
image_in (str): The filepath to an image representing a character, scene, or setting.
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
Tuple[str, str]:
|
102 |
+
- The generated caption describing the image.
|
103 |
+
- A suggested LLM system prompt structure including:
|
104 |
+
- A chatbot title
|
105 |
+
- A system message defining the bot’s personality
|
106 |
+
- An example user input message
|
107 |
+
"""
|
108 |
gr.Info("Getting image description...")
|
109 |
"""
|
110 |
if cap_type == "Fictional" :
|
|
|
204 |
]
|
205 |
)
|
206 |
|
207 |
+
demo.queue().launch(show_api=False, show_error=True, ssr_mode=False, mcp_server=True)
|