Nymbo commited on
Commit
cd28068
·
verified ·
1 Parent(s): e51b520

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -17
app.py CHANGED
@@ -630,7 +630,7 @@ CSS_STYLES = """
630
  }
631
  /* Default: add subtitle under titles */
632
  .gradio-container h1::after {
633
- content: "Fetch Webpage | Search DuckDuckGo | Code Interpreter | Kokoro TTS | Flux Krea (Serverless)";
634
  display: block;
635
  font-size: 1rem;
636
  font-weight: 500;
@@ -665,14 +665,15 @@ kokoro_interface = gr.Interface(
665
  )
666
 
667
  # ==========================
668
- # Flux Krea (Serverless)
669
  # ==========================
670
 
671
  HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
672
 
673
 
674
- def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
675
  prompt: Annotated[str, "Text description of the image to generate."],
 
676
  negative_prompt: Annotated[str, "What should NOT appear in the image." ] = (
677
  "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, "
678
  "missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, "
@@ -686,8 +687,9 @@ def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
686
  height: Annotated[int, "Output height in pixels (64–1216, multiple of 32 recommended)."] = 1024,
687
  ) -> Image.Image:
688
  """
689
- Generate a single image from a text prompt using the black-forest-labs/FLUX.1-Krea-dev
690
- model via Hugging Face Inference (serverless). Returns a PIL image.
 
691
 
692
  Notes (MCP):
693
  - Per the latest Gradio MCP docs, images returned from tools are handled by the server and
@@ -696,6 +698,8 @@ def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
696
 
697
  Args:
698
  prompt (str): Text description of the image to generate.
 
 
699
  negative_prompt (str): What should NOT appear in the image.
700
  steps (int): Number of denoising steps (1–100). Higher can improve quality.
701
  cfg_scale (float): Guidance scale (1–20). Higher = follow the prompt more closely.
@@ -726,7 +730,7 @@ def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
726
  image = client.text_to_image(
727
  prompt=enhanced_prompt,
728
  negative_prompt=negative_prompt,
729
- model="black-forest-labs/FLUX.1-Krea-dev",
730
  width=width,
731
  height=height,
732
  num_inference_steps=steps,
@@ -741,7 +745,7 @@ def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
741
  # If we reach here, all providers failed
742
  msg = str(last_error) if last_error else "Unknown error"
743
  if "404" in msg:
744
- raise gr.Error("Model not found. Ensure 'FLUX.1-Krea-dev' is accessible with your HF token.")
745
  if "503" in msg:
746
  raise gr.Error("The model is warming up. Please try again shortly.")
747
  if "401" in msg or "403" in msg:
@@ -749,10 +753,11 @@ def Flux_Krea_Generate( # <-- MCP tool #5 (Flux Krea - Serverless)
749
  raise gr.Error(f"Image generation failed: {msg}")
750
 
751
 
752
- flux_krea_interface = gr.Interface(
753
- fn=Flux_Krea_Generate,
754
  inputs=[
755
  gr.Textbox(label="Prompt", placeholder="Enter a prompt", lines=2),
 
756
  gr.Textbox(
757
  label="Negative Prompt",
758
  value=(
@@ -772,28 +777,28 @@ flux_krea_interface = gr.Interface(
772
  gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Height"),
773
  ],
774
  outputs=gr.Image(label="Generated Image"),
775
- title="Flux Krea (Serverless)",
776
  description=(
777
- "<div style=\"text-align:center\">Generate images with FLUX.1-Krea-dev via Hugging Face Inference. "
778
- "No input image required. 'Strength' is omitted. Set HF_READ_TOKEN as needed.</div>"
779
  ),
780
  api_description=(
781
- "Generate a single image from a text prompt using black-forest-labs/FLUX.1-Krea-dev on Hugging Face Inference. "
782
- "Parameters: prompt (str), negative_prompt (str), steps (int, 1–100), cfg_scale (float, 1–20), "
783
  "sampler (str, label only), seed (int, -1=random), width/height (int, 64–1216). Returns a PIL.Image."
784
  ),
785
  allow_flagging="never",
786
  )
787
 
788
- # Build tabbed app including Flux Krea
789
  demo = gr.TabbedInterface(
790
- interface_list=[fetch_interface, concise_interface, code_interface, kokoro_interface, flux_krea_interface],
791
  tab_names=[
792
  "Fetch Webpage",
793
  "DuckDuckGo Search",
794
  "Python Code Executor",
795
  "Kokoro TTS",
796
- "Flux Krea (Serverless)",
797
  ],
798
  title="Tools MCP",
799
  theme="Nymbo/Nymbo_Theme",
 
630
  }
631
  /* Default: add subtitle under titles */
632
  .gradio-container h1::after {
633
+ content: "Fetch Webpage | Search DuckDuckGo | Code Interpreter | Kokoro TTS | Image Generation";
634
  display: block;
635
  font-size: 1rem;
636
  font-weight: 500;
 
665
  )
666
 
667
  # ==========================
668
+ # Image Generation (Serverless)
669
  # ==========================
670
 
671
  HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
672
 
673
 
674
+ def Image_Generation( # <-- MCP tool #5 (Image Generation)
675
  prompt: Annotated[str, "Text description of the image to generate."],
676
+ model_id: Annotated[str, "Hugging Face model id in the form 'creator/model-name' (e.g., black-forest-labs/FLUX.1-Krea-dev)."] = "black-forest-labs/FLUX.1-Krea-dev",
677
  negative_prompt: Annotated[str, "What should NOT appear in the image." ] = (
678
  "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, "
679
  "missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, "
 
687
  height: Annotated[int, "Output height in pixels (64–1216, multiple of 32 recommended)."] = 1024,
688
  ) -> Image.Image:
689
  """
690
+ Generate a single image from a text prompt using a Hugging Face model via
691
+ serverless Inference. Returns a PIL image. By default, the model is
692
+ black-forest-labs/FLUX.1-Krea-dev.
693
 
694
  Notes (MCP):
695
  - Per the latest Gradio MCP docs, images returned from tools are handled by the server and
 
698
 
699
  Args:
700
  prompt (str): Text description of the image to generate.
701
+ model_id (str): The Hugging Face model id (creator/model-name). Defaults to
702
+ "black-forest-labs/FLUX.1-Krea-dev".
703
  negative_prompt (str): What should NOT appear in the image.
704
  steps (int): Number of denoising steps (1–100). Higher can improve quality.
705
  cfg_scale (float): Guidance scale (1–20). Higher = follow the prompt more closely.
 
730
  image = client.text_to_image(
731
  prompt=enhanced_prompt,
732
  negative_prompt=negative_prompt,
733
+ model=model_id,
734
  width=width,
735
  height=height,
736
  num_inference_steps=steps,
 
745
  # If we reach here, all providers failed
746
  msg = str(last_error) if last_error else "Unknown error"
747
  if "404" in msg:
748
+ raise gr.Error(f"Model not found or unavailable: {model_id}. Check the id and your HF token access.")
749
  if "503" in msg:
750
  raise gr.Error("The model is warming up. Please try again shortly.")
751
  if "401" in msg or "403" in msg:
 
753
  raise gr.Error(f"Image generation failed: {msg}")
754
 
755
 
756
+ image_generation_interface = gr.Interface(
757
+ fn=Image_Generation,
758
  inputs=[
759
  gr.Textbox(label="Prompt", placeholder="Enter a prompt", lines=2),
760
+ gr.Textbox(label="Model", value="black-forest-labs/FLUX.1-Krea-dev", placeholder="creator/model-name"),
761
  gr.Textbox(
762
  label="Negative Prompt",
763
  value=(
 
777
  gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Height"),
778
  ],
779
  outputs=gr.Image(label="Generated Image"),
780
+ title="Image Generation",
781
  description=(
782
+ "<div style=\"text-align:center\">Generate images via Hugging Face Inference. "
783
+ "Default model: black-forest-labs/FLUX.1-Krea-dev. Set HF_READ_TOKEN as needed.</div>"
784
  ),
785
  api_description=(
786
+ "Generate a single image from a text prompt using a Hugging Face model (serverless Inference). "
787
+ "Parameters: prompt (str), model_id (str, creator/model-name), negative_prompt (str), steps (int, 1–100), cfg_scale (float, 1–20), "
788
  "sampler (str, label only), seed (int, -1=random), width/height (int, 64–1216). Returns a PIL.Image."
789
  ),
790
  allow_flagging="never",
791
  )
792
 
793
+ # Build tabbed app including Image Generation
794
  demo = gr.TabbedInterface(
795
+ interface_list=[fetch_interface, concise_interface, code_interface, kokoro_interface, image_generation_interface],
796
  tab_names=[
797
  "Fetch Webpage",
798
  "DuckDuckGo Search",
799
  "Python Code Executor",
800
  "Kokoro TTS",
801
+ "Image Generation",
802
  ],
803
  title="Tools MCP",
804
  theme="Nymbo/Nymbo_Theme",