chansung commited on
Commit
e3f7eb9
·
verified ·
1 Parent(s): 8d5dd22

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. .claude/settings.local.json +3 -1
  2. auto_diffusers.log +0 -0
  3. gradio_app.py +340 -323
.claude/settings.local.json CHANGED
@@ -55,7 +55,9 @@
55
  "Bash(rm /Users/deep-diver/Developers/auto-diffusers/optimization_docs.txt)",
56
  "Bash(grep -n -B5 -A15 \"container\\|margin\\|padding\" /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
57
  "Bash(grep -n \"\\\"\\\"\\\"\" /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
58
- "Bash(grep -n '\"\"\"' /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)"
 
 
59
  ],
60
  "deny": []
61
  },
 
55
  "Bash(rm /Users/deep-diver/Developers/auto-diffusers/optimization_docs.txt)",
56
  "Bash(grep -n -B5 -A15 \"container\\|margin\\|padding\" /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
57
  "Bash(grep -n \"\\\"\\\"\\\"\" /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
58
+ "Bash(grep -n '\"\"\"' /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
59
+ "Bash(timeout 15s python gradio_app.py)",
60
+ "Bash(grep -n \"gpu_name_custom\" gradio_app.py)"
61
  ],
62
  "deny": []
63
  },
auto_diffusers.log CHANGED
The diff for this file is too large to render. See raw diff
 
gradio_app.py CHANGED
@@ -114,10 +114,10 @@ class GradioAutodiffusers:
114
  try:
115
  # Create manual hardware specs
116
  # Parse dtype selection
117
- if dtype_selection == "Auto (Let AI decide)":
118
  user_dtype = None
119
  else:
120
- user_dtype = dtype_selection
121
 
122
  manual_specs = {
123
  'platform': platform,
@@ -210,7 +210,7 @@ def create_gradio_interface():
210
  .main-container {
211
  max-width: 1400px;
212
  margin: 0 auto;
213
- padding: 2rem;
214
  /* Removed position: relative that can interfere with dropdown positioning */
215
  }
216
 
@@ -234,20 +234,22 @@ def create_gradio_interface():
234
  .glass-card {
235
  background: rgba(255, 255, 255, 0.25) !important;
236
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
237
- border-radius: 20px !important;
238
  box-shadow:
239
  0 8px 32px rgba(0, 0, 0, 0.1),
240
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
 
241
  /* Removed backdrop-filter and transforms that break dropdown positioning */
242
  }
243
 
244
  .ultra-glass {
245
  background: rgba(255, 255, 255, 0.15) !important;
246
  border: 1px solid rgba(255, 255, 255, 0.3) !important;
247
- border-radius: 24px !important;
248
  box-shadow:
249
  0 12px 40px rgba(0, 0, 0, 0.15),
250
  inset 0 1px 0 rgba(255, 255, 255, 0.3) !important;
 
251
  /* Removed backdrop-filter that interferes with dropdown positioning */
252
  }
253
 
@@ -259,12 +261,15 @@ def create_gradio_interface():
259
  rgba(59, 130, 246, 0.9) 100%) !important;
260
  backdrop-filter: blur(20px) !important;
261
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
262
- border-radius: 24px !important;
263
  box-shadow:
264
  0 20px 60px rgba(124, 58, 237, 0.3),
265
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
266
  position: relative;
267
  overflow: hidden;
 
 
 
268
  }
269
 
270
  .hero-header::before {
@@ -298,7 +303,7 @@ def create_gradio_interface():
298
  font-weight: 700 !important;
299
  font-size: 1.1rem !important;
300
  padding: 1rem 3rem !important;
301
- border-radius: 16px !important;
302
  box-shadow:
303
  0 8px 32px rgba(102, 126, 234, 0.4),
304
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
@@ -495,135 +500,132 @@ def create_gradio_interface():
495
  }
496
 
497
 
498
- /* Code Areas - Ultra Premium Styling */
499
  .code-container {
500
- background: linear-gradient(145deg,
501
- rgba(15, 23, 42, 0.98) 0%,
502
- rgba(30, 41, 59, 0.95) 50%,
503
- rgba(15, 23, 42, 0.98) 100%) !important;
504
- backdrop-filter: blur(30px) !important;
505
- border: 2px solid transparent !important;
506
- background-clip: padding-box !important;
507
- border-radius: 20px !important;
508
- position: relative !important;
509
  overflow: hidden !important;
510
- box-shadow:
511
- 0 20px 60px rgba(0, 0, 0, 0.4),
512
- 0 8px 32px rgba(15, 23, 42, 0.3),
513
- inset 0 1px 0 rgba(255, 255, 255, 0.1),
514
- inset 0 -1px 0 rgba(71, 85, 105, 0.2) !important;
515
- }
516
-
517
- .code-container::before {
518
- content: '';
519
- position: absolute;
520
- top: 0;
521
- left: 0;
522
- right: 0;
523
- bottom: 0;
524
- background: linear-gradient(45deg,
525
- rgba(99, 102, 241, 0.1) 0%,
526
- rgba(139, 92, 246, 0.1) 25%,
527
- rgba(59, 130, 246, 0.1) 50%,
528
- rgba(139, 92, 246, 0.1) 75%,
529
- rgba(99, 102, 241, 0.1) 100%) !important;
530
- border-radius: 20px !important;
531
- z-index: -1 !important;
532
- animation: code-shimmer 3s ease-in-out infinite !important;
533
- }
534
-
535
- @keyframes code-shimmer {
536
- 0%, 100% { opacity: 0.3; }
537
- 50% { opacity: 0.6; }
538
  }
539
 
540
  /* Code editor styling */
541
  .code-container .cm-editor {
542
- background: transparent !important;
543
- border-radius: 16px !important;
544
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', 'Fira Code', monospace !important;
545
- font-size: 13px !important;
546
- line-height: 1.6 !important;
547
- }
548
-
549
- .code-container .cm-focused {
550
- outline: none !important;
551
- box-shadow: 0 0 0 2px rgba(99, 102, 241, 0.4) !important;
552
  }
553
 
 
554
  .code-container .cm-content {
 
555
  padding: 1.5rem !important;
556
- color: #e2e8f0 !important;
 
 
 
 
 
557
  }
558
 
559
  .code-container .cm-line {
560
  padding-left: 0.5rem !important;
 
 
 
561
  }
562
 
563
- /* Syntax highlighting for Python */
564
- .code-container .cm-keyword { color: #f472b6 !important; }
565
- .code-container .cm-string { color: #34d399 !important; }
566
- .code-container .cm-comment { color: #94a3b8 !important; font-style: italic !important; }
567
- .code-container .cm-number { color: #fbbf24 !important; }
568
- .code-container .cm-variable { color: #60a5fa !important; }
569
- .code-container .cm-function { color: #a78bfa !important; }
570
- .code-container .cm-operator { color: #fb7185 !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571
 
572
  /* Code header styling */
573
  .code-container label {
574
- background: linear-gradient(90deg,
575
- rgba(99, 102, 241, 0.9) 0%,
576
- rgba(139, 92, 246, 0.9) 50%,
577
- rgba(59, 130, 246, 0.9) 100%) !important;
578
- color: white !important;
579
- padding: 1rem 1.5rem !important;
580
- border-radius: 16px 16px 0 0 !important;
581
  font-weight: 600 !important;
582
- font-size: 1rem !important;
583
- letter-spacing: 0.025em !important;
584
- text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3) !important;
585
  margin: 0 !important;
586
  border: none !important;
587
- box-shadow: 0 4px 12px rgba(99, 102, 241, 0.2) !important;
588
  }
589
 
590
 
591
  /* Custom scrollbar for code area */
592
  .code-container .cm-scroller::-webkit-scrollbar {
593
- width: 8px !important;
594
- height: 8px !important;
595
  }
596
 
597
  .code-container .cm-scroller::-webkit-scrollbar-track {
598
- background: rgba(15, 23, 42, 0.3) !important;
599
- border-radius: 4px !important;
600
  }
601
 
602
  .code-container .cm-scroller::-webkit-scrollbar-thumb {
603
- background: linear-gradient(135deg,
604
- rgba(99, 102, 241, 0.6) 0%,
605
- rgba(139, 92, 246, 0.6) 100%) !important;
606
- border-radius: 4px !important;
607
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
608
  }
609
 
610
  .code-container .cm-scroller::-webkit-scrollbar-thumb:hover {
611
- background: linear-gradient(135deg,
612
- rgba(99, 102, 241, 0.8) 0%,
613
- rgba(139, 92, 246, 0.8) 100%) !important;
614
  }
615
 
616
  /* Line numbers styling */
617
  .code-container .cm-lineNumbers {
618
- background: rgba(15, 23, 42, 0.3) !important;
619
- color: rgba(148, 163, 184, 0.6) !important;
620
- border-right: 1px solid rgba(71, 85, 105, 0.3) !important;
621
  padding-right: 0.5rem !important;
622
  }
623
 
624
  .code-container .cm-lineNumbers .cm-gutterElement {
625
- color: rgba(148, 163, 184, 0.5) !important;
626
- font-weight: 500 !important;
627
  }
628
 
629
  /* Memory Analysis Cards */
@@ -703,6 +705,17 @@ def create_gradio_interface():
703
  background: #ffffff !important;
704
  }
705
 
 
 
 
 
 
 
 
 
 
 
 
706
  /* Mobile Responsive Styles */
707
  @media (max-width: 768px) {
708
  .main-container {
@@ -794,100 +807,50 @@ def create_gradio_interface():
794
 
795
  with gr.Column(elem_classes="main-container"):
796
  # Ultra Premium Header
797
- with gr.Row():
798
- with gr.Column(scale=1):
799
- gr.HTML("""
800
- <div class="hero-header floating" style="text-align: center; padding: 1.5rem 1rem; margin-bottom: 1.5rem; position: relative;">
801
- <div style="position: relative; z-index: 2;">
802
- <h1 style="color: white; font-size: 2.2rem; margin: 0; font-weight: 800; text-shadow: 0 4px 8px rgba(0,0,0,0.3); letter-spacing: -0.02em; background: linear-gradient(135deg, #ffffff 0%, #f8fafc 50%, #e2e8f0 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text;">
803
- Auto Diffusers Config
804
- </h1>
805
- <h2 style="color: rgba(255,255,255,0.95); font-size: 1.2rem; margin: 0.3rem 0 0.8rem 0; font-weight: 600; text-shadow: 0 2px 4px rgba(0,0,0,0.2);">
806
- Hardware-Optimized Code Generator
807
- </h2>
808
- <p style="color: rgba(255,255,255,0.9); font-size: 1rem; margin: 0; font-weight: 400; text-shadow: 0 2px 4px rgba(0,0,0,0.2); max-width: 500px; margin: 0 auto; line-height: 1.5;">
809
- Generate optimized diffusion model code for your hardware
810
- </p>
811
- <div style="margin-top: 1rem;">
812
- <span style="display: inline-block; background: rgba(255,255,255,0.2); padding: 0.5rem 1rem; border-radius: 20px; color: white; font-size: 0.9rem; backdrop-filter: blur(10px); border: 1px solid rgba(255,255,255,0.3);">
813
- 🤖 Powered by Google Gemini 2.5
814
- </span>
815
- </div>
816
- </div>
817
  </div>
818
- """)
 
 
819
 
820
  # Main Content Area
821
 
822
  # Hardware Selection Section
823
  with gr.Group(elem_classes="glass-card"):
824
- gr.HTML("""
825
- <div class="section-header" style="text-align: center;">
826
- <h3 style="margin: 0 0 0.5rem 0; color: #1e293b; font-size: 1.5rem; font-weight: 700;">
827
- ⚙️ Hardware Specifications
828
- </h3>
829
- <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
830
- Configure your system hardware for optimal code generation
831
- </p>
832
- </div>
833
- """)
834
-
835
- with gr.Row():
836
- with gr.Column(scale=1):
837
  platform = gr.Dropdown(
838
  choices=["Linux", "Darwin", "Windows"],
839
  label="🖥️ Platform",
840
  value="Linux",
841
  info="Your operating system"
842
  )
843
-
844
- gpu_vendor = gr.Dropdown(
845
- choices=[
846
- "Custom (Manual Input)",
847
- "NVIDIA Consumer (GeForce RTX)",
848
- "NVIDIA Professional (RTX A-Series)",
849
- "NVIDIA Data Center",
850
- "Apple Silicon",
851
- "AMD",
852
- "Intel",
853
- "CPU Only"
854
- ],
855
- label="🎮 GPU Vendor/Category",
856
- value="Custom (Manual Input)",
857
- info="Select your GPU category"
858
- )
859
-
860
- gpu_series = gr.Dropdown(
861
- choices=[],
862
- label="📊 GPU Series",
863
- visible=False,
864
- interactive=True,
865
- info="Choose your GPU series"
866
- )
867
-
868
- gpu_model = gr.Dropdown(
869
- choices=[],
870
- label="🔧 GPU Model",
871
- visible=False,
872
- interactive=True,
873
- info="Select your specific GPU model"
874
- )
875
-
876
- gpu_name_custom = gr.Textbox(
877
- label="💾 Custom GPU Name",
878
- placeholder="e.g., RTX 4090, GTX 1080 Ti",
879
- visible=True,
880
- info="Enter your GPU name manually"
881
- )
882
-
883
- gpu_name = gr.Textbox(
884
- label="Selected GPU",
885
- visible=False
886
- )
887
-
888
- with gr.Column(scale=1):
889
  vram_gb = gr.Number(
890
- label="🎯 VRAM/Memory (GB)",
891
  value=8,
892
  minimum=0,
893
  maximum=200,
@@ -901,87 +864,116 @@ def create_gradio_interface():
901
  info="Total system memory"
902
  )
903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
904
  # Model Configuration Section
905
  with gr.Group(elem_classes="glass-card"):
906
- gr.HTML("""
907
- <div class="section-header" style="text-align: center;">
908
- <h3 style="margin: 0 0 0.5rem 0; color: #1e293b; font-size: 1.5rem; font-weight: 700;">
909
- 🤖 Model Configuration
910
- </h3>
911
- <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
912
- Configure the AI model and generation parameters
913
- </p>
914
- </div>
915
- """)
916
-
917
- with gr.Row():
918
- with gr.Column(scale=1):
919
- model_name = gr.Textbox(
920
- label="🏷️ Model Name",
921
- value="black-forest-labs/FLUX.1-schnell",
922
- placeholder="e.g., black-forest-labs/FLUX.1-schnell",
923
- info="HuggingFace model identifier"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
924
  )
925
-
926
  dtype_selection = gr.Dropdown(
927
- choices=["Auto (Let AI decide)", "torch.float32", "torch.float16", "torch.bfloat16"],
928
- label="⚡ Data Type (dtype)",
929
- value="Auto (Let AI decide)",
930
- info="Precision mode - Auto is recommended"
931
  )
932
-
933
- with gr.Column(scale=1):
934
- with gr.Row():
935
- width = gr.Number(
936
- label="📏 Width (px)",
937
- value=1360,
938
- minimum=256,
939
- maximum=2048,
940
- step=64,
941
- info="Image width"
942
- )
943
- height = gr.Number(
944
- label="📐 Height (px)",
945
- value=768,
946
- minimum=256,
947
- maximum=2048,
948
- step=64,
949
- info="Image height"
950
- )
951
-
952
  inference_steps = gr.Number(
953
- label="🔄 Inference Steps",
954
  value=4,
955
  minimum=1,
956
  maximum=50,
957
- info="Number of denoising steps (higher = better quality, slower)"
958
  )
959
 
960
- # Memory Analysis Section
961
- with gr.Group(elem_classes="ultra-glass"):
962
- gr.HTML("""
963
- <div class="section-header" style="text-align: center;">
964
- <h3 style="margin: 0 0 0.5rem 0; color: #1e293b; font-size: 1.5rem; font-weight: 700;">
965
- 🧠 Memory Analysis
966
- </h3>
967
- <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
968
- Real-time analysis of model memory requirements and optimization strategies
969
- </p>
970
- </div>
971
- """)
 
 
 
 
972
 
973
- memory_analysis_output = gr.Markdown(
974
- value="✨ Select a model and configure your hardware to see memory requirements and optimization recommendations.",
975
- elem_classes="memory-card"
976
- )
977
 
978
  # Generate Button
979
  with gr.Row():
980
  with gr.Column():
981
- gr.HTML("""
982
- <div style="text-align: center; margin: 2rem 0;">
983
- </div>
984
- """)
985
  generate_btn = gr.Button(
986
  "✨ Generate Optimized Code",
987
  variant="primary",
@@ -991,85 +983,54 @@ def create_gradio_interface():
991
 
992
  # Generated Code Section
993
  with gr.Group(elem_classes="ultra-glass"):
994
- gr.HTML("""
995
- <div class="section-header" style="text-align: center; position: relative; overflow: hidden;">
996
- <div style="position: absolute; top: 0; left: 0; right: 0; bottom: 0; background: linear-gradient(45deg, rgba(99, 102, 241, 0.1), rgba(139, 92, 246, 0.1)); border-radius: 16px; z-index: -1;"></div>
997
- <h3 style="margin: 0 0 0.5rem 0; color: #1e293b; font-size: 1.5rem; font-weight: 700; text-shadow: 0 2px 4px rgba(0,0,0,0.1);">
998
- 💻 Generated Code
999
- </h3>
1000
- <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
1001
- ✨ Ultra-optimized Python code with hardware-specific acceleration
1002
- </p>
1003
- <div style="margin-top: 1rem; padding: 0.75rem 1.5rem; background: linear-gradient(90deg, rgba(34, 197, 94, 0.1), rgba(59, 130, 246, 0.1)); border-radius: 12px; border: 1px solid rgba(34, 197, 94, 0.2);">
1004
- <span style="color: #059669; font-weight: 600; font-size: 0.9rem;">
1005
- 🚀 Ready-to-run • Memory optimized • Performance tuned
1006
- </span>
1007
- </div>
1008
- </div>
1009
- """)
1010
-
1011
- # Code Summary
1012
- code_summary = gr.Markdown(
1013
- value="🎯 Generated code summary will appear here after generation.",
1014
- elem_classes="memory-card"
1015
- )
1016
-
1017
  # Code Output
1018
  code_output = gr.Code(
1019
- label="🚀 Hardware-Optimized Diffusion Pipeline",
1020
  language="python",
1021
  lines=20,
1022
  interactive=True,
1023
  show_label=True,
1024
  elem_classes="code-container",
1025
- value="# 🎨 Your optimized diffusion code will appear here after generation\n# Click 'Generate Optimized Code' to create hardware-specific Python code\n\nprint('✨ Ready to generate amazing AI art with optimized performance!')"
 
1026
  )
1027
 
1028
  def on_gpu_vendor_change(vendor):
1029
  """Handle GPU vendor selection and update series dropdown."""
1030
  if vendor == "Custom (Manual Input)":
1031
- return (gr.update(visible=True),
1032
- gr.update(visible=False, choices=[]),
1033
  gr.update(visible=False, choices=[]),
1034
  "", gr.update())
1035
  elif vendor == "CPU Only":
1036
- return (gr.update(visible=False),
1037
- gr.update(visible=False, choices=[]),
1038
  gr.update(visible=False, choices=[]),
1039
  "", 0)
1040
  elif vendor == "NVIDIA Consumer (GeForce RTX)":
1041
- return (gr.update(visible=False),
1042
- gr.update(visible=True, choices=["RTX 50 Series", "RTX 40 Series", "RTX 30 Series"]),
1043
  gr.update(visible=False, choices=[]),
1044
  "", gr.update())
1045
  elif vendor == "NVIDIA Professional (RTX A-Series)":
1046
- return (gr.update(visible=False),
1047
- gr.update(visible=True, choices=["RTX A6000 Series", "RTX A5000 Series", "RTX A4000 Series"]),
1048
  gr.update(visible=False, choices=[]),
1049
  "", gr.update())
1050
  elif vendor == "NVIDIA Data Center":
1051
- return (gr.update(visible=False),
1052
- gr.update(visible=True, choices=["Blackwell (B-Series)", "Hopper (H-Series)", "Ada Lovelace (L-Series)", "Ampere (A-Series)", "Volta/Tesla"]),
1053
  gr.update(visible=False, choices=[]),
1054
  "", gr.update())
1055
  elif vendor == "Apple Silicon":
1056
- return (gr.update(visible=False),
1057
- gr.update(visible=True, choices=["M4 Series", "M3 Series", "M2 Series", "M1 Series"]),
1058
  gr.update(visible=False, choices=[]),
1059
  "", gr.update())
1060
  elif vendor == "AMD":
1061
- return (gr.update(visible=False),
1062
- gr.update(visible=True, choices=["Radeon RX 7000", "Radeon RX 6000", "Instinct MI Series"]),
1063
  gr.update(visible=False, choices=[]),
1064
  "", gr.update())
1065
  elif vendor == "Intel":
1066
- return (gr.update(visible=False),
1067
- gr.update(visible=True, choices=["Arc A-Series"]),
1068
  gr.update(visible=False, choices=[]),
1069
  "", gr.update())
1070
  else:
1071
- return (gr.update(visible=True),
1072
- gr.update(visible=False, choices=[]),
1073
  gr.update(visible=False, choices=[]),
1074
  "", gr.update())
1075
 
@@ -1146,10 +1107,10 @@ def create_gradio_interface():
1146
  else:
1147
  return model, gr.update()
1148
 
1149
- def get_final_gpu_name(vendor, series, model, custom_name):
1150
- """Get the final GPU name based on vendor selection or custom input."""
1151
  if vendor == "Custom (Manual Input)":
1152
- return custom_name
1153
  elif vendor == "CPU Only":
1154
  return ""
1155
  elif model and "(" in model and "GB" in model:
@@ -1157,7 +1118,44 @@ def create_gradio_interface():
1157
  elif model:
1158
  return model
1159
  else:
1160
- return custom_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161
 
1162
  def update_memory_analysis(model_name, vram_gb):
1163
  """Update memory analysis in real-time based on selections."""
@@ -1195,7 +1193,7 @@ def create_gradio_interface():
1195
  gpu_vendor.change(
1196
  on_gpu_vendor_change,
1197
  inputs=[gpu_vendor],
1198
- outputs=[gpu_name_custom, gpu_series, gpu_model, gpu_name, vram_gb]
1199
  ).then(
1200
  update_memory_analysis,
1201
  inputs=[model_name, vram_gb],
@@ -1218,12 +1216,6 @@ def create_gradio_interface():
1218
  outputs=memory_analysis_output
1219
  )
1220
 
1221
- # Update memory analysis when custom GPU name changes
1222
- gpu_name_custom.change(
1223
- update_memory_analysis,
1224
- inputs=[model_name, vram_gb],
1225
- outputs=memory_analysis_output
1226
- )
1227
 
1228
  # Update memory analysis when model name or VRAM changes
1229
  model_name.change(
@@ -1244,6 +1236,53 @@ def create_gradio_interface():
1244
  inputs=[model_name, vram_gb],
1245
  outputs=memory_analysis_output
1246
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247
 
1248
  def create_code_summary(generated_code, model_name, final_gpu_name, vram_gb):
1249
  """Create a concise summary of the generated code."""
@@ -1309,9 +1348,9 @@ def create_gradio_interface():
1309
 
1310
  return '\n'.join(filtered_lines)
1311
 
1312
- def generate_with_combined_gpu_name(gpu_vendor, gpu_series, gpu_model, gpu_name_custom, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps):
1313
- """Generate code with the correct GPU name from multi-level selection or custom input, including memory analysis."""
1314
- final_gpu_name = get_final_gpu_name(gpu_vendor, gpu_series, gpu_model, gpu_name_custom)
1315
 
1316
  # Constant prompt text
1317
  prompt_text = "A cat holding a sign that says hello world"
@@ -1392,47 +1431,25 @@ def create_gradio_interface():
1392
  code_collapsed = gr.State(value=False)
1393
  full_code_storage = gr.State(value="")
1394
 
1395
- def generate_and_store_code(gpu_vendor, gpu_series, gpu_model, gpu_name_custom, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps):
1396
- """Generate code and return summary, code for display, and full code for storage."""
1397
  summary, full_code = generate_with_combined_gpu_name(
1398
- gpu_vendor, gpu_series, gpu_model, gpu_name_custom, vram_gb, ram_gb, platform,
1399
  model_name, dtype_selection, width, height, inference_steps
1400
  )
1401
- return summary, full_code, full_code, False # summary, display_code, stored_code, reset_collapsed_state
1402
 
1403
  generate_btn.click(
1404
  generate_and_store_code,
1405
  inputs=[
1406
- gpu_vendor, gpu_series, gpu_model, gpu_name_custom, vram_gb, ram_gb, platform,
1407
  model_name, dtype_selection, width, height, inference_steps
1408
  ],
1409
- outputs=[code_summary, code_output, full_code_storage, code_collapsed]
1410
  )
1411
 
1412
 
1413
 
1414
- # Ultra Premium Footer
1415
- gr.HTML("""
1416
- <div class="ultra-glass" style="text-align: center; padding: 3rem 2rem; margin-top: 4rem; position: relative; overflow: hidden;">
1417
- <div style="position: relative; z-index: 2;">
1418
- <h4 style="color: #1e293b; font-size: 1.3rem; margin: 0 0 1rem 0; font-weight: 700;">
1419
- ✨ Pro Tips & Insights
1420
- </h4>
1421
- <p style="color: #475569; font-size: 1rem; margin: 0 0 1.5rem 0; font-weight: 500; line-height: 1.6; max-width: 600px; margin: 0 auto;">
1422
- 🚀 The generated code includes hardware-specific optimizations for memory efficiency and peak performance<br>
1423
- 🎯 Fine-tuned for your exact GPU configuration and model requirements
1424
- </p>
1425
- <div style="margin-top: 2rem;">
1426
- <span style="display: inline-block; background: rgba(124, 58, 237, 0.1); padding: 0.75rem 1.5rem; border-radius: 20px; color: #7c3aed; font-size: 0.9rem; backdrop-filter: blur(10px); border: 1px solid rgba(124, 58, 237, 0.2); margin: 0 0.5rem;">
1427
- 🤖 Powered by Google Gemini 2.5
1428
- </span>
1429
- <span style="display: inline-block; background: rgba(236, 72, 153, 0.1); padding: 0.75rem 1.5rem; border-radius: 20px; color: #ec4899; font-size: 0.9rem; backdrop-filter: blur(10px); border: 1px solid rgba(236, 72, 153, 0.2); margin: 0 0.5rem;">
1430
- ❤️ Built for the Community
1431
- </span>
1432
- </div>
1433
- </div>
1434
- </div>
1435
- """)
1436
 
1437
  return interface
1438
 
@@ -1442,7 +1459,7 @@ def main():
1442
  interface = create_gradio_interface()
1443
  interface.launch(
1444
  server_name="0.0.0.0",
1445
- server_port=7860,
1446
  share=True,
1447
  show_error=True
1448
  )
 
114
  try:
115
  # Create manual hardware specs
116
  # Parse dtype selection
117
+ if dtype_selection == "Auto":
118
  user_dtype = None
119
  else:
120
+ user_dtype = f"torch.{dtype_selection}"
121
 
122
  manual_specs = {
123
  'platform': platform,
 
210
  .main-container {
211
  max-width: 1400px;
212
  margin: 0 auto;
213
+ padding: 1rem;
214
  /* Removed position: relative that can interfere with dropdown positioning */
215
  }
216
 
 
234
  .glass-card {
235
  background: rgba(255, 255, 255, 0.25) !important;
236
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
237
+ border-radius: 8px !important;
238
  box-shadow:
239
  0 8px 32px rgba(0, 0, 0, 0.1),
240
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
241
+ margin-bottom: 1rem !important;
242
  /* Removed backdrop-filter and transforms that break dropdown positioning */
243
  }
244
 
245
  .ultra-glass {
246
  background: rgba(255, 255, 255, 0.15) !important;
247
  border: 1px solid rgba(255, 255, 255, 0.3) !important;
248
+ border-radius: 10px !important;
249
  box-shadow:
250
  0 12px 40px rgba(0, 0, 0, 0.15),
251
  inset 0 1px 0 rgba(255, 255, 255, 0.3) !important;
252
+ margin-bottom: 1rem !important;
253
  /* Removed backdrop-filter that interferes with dropdown positioning */
254
  }
255
 
 
261
  rgba(59, 130, 246, 0.9) 100%) !important;
262
  backdrop-filter: blur(20px) !important;
263
  border: 1px solid rgba(255, 255, 255, 0.2) !important;
264
+ border-radius: 10px !important;
265
  box-shadow:
266
  0 20px 60px rgba(124, 58, 237, 0.3),
267
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
268
  position: relative;
269
  overflow: hidden;
270
+ width: 100% !important;
271
+ max-width: 100% !important;
272
+ box-sizing: border-box !important;
273
  }
274
 
275
  .hero-header::before {
 
303
  font-weight: 700 !important;
304
  font-size: 1.1rem !important;
305
  padding: 1rem 3rem !important;
306
+ border-radius: 8px !important;
307
  box-shadow:
308
  0 8px 32px rgba(102, 126, 234, 0.4),
309
  inset 0 1px 0 rgba(255, 255, 255, 0.2) !important;
 
500
  }
501
 
502
 
503
+ /* Code Areas - Moderate Clean Styling */
504
  .code-container {
505
+ background: rgba(248, 250, 252, 0.95) !important;
506
+ border: 1px solid rgba(226, 232, 240, 0.8) !important;
507
+ border-radius: 6px !important;
508
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1) !important;
 
 
 
 
 
509
  overflow: hidden !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  }
511
 
512
  /* Code editor styling */
513
  .code-container .cm-editor {
514
+ background: #ffffff !important;
515
+ border-radius: 4px !important;
516
  font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', 'Fira Code', monospace !important;
517
+ font-size: 14px !important;
518
+ line-height: 1.5 !important;
 
 
 
 
 
519
  }
520
 
521
+ /* Enable soft wrapping for code content */
522
  .code-container .cm-content {
523
+ white-space: pre-wrap !important;
524
  padding: 1.5rem !important;
525
+ color: #374151 !important;
526
+ }
527
+
528
+ .code-container .cm-focused {
529
+ outline: none !important;
530
+ box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.3) !important;
531
  }
532
 
533
  .code-container .cm-line {
534
  padding-left: 0.5rem !important;
535
+ white-space: pre-wrap !important;
536
+ word-wrap: break-word !important;
537
+ overflow-wrap: break-word !important;
538
  }
539
 
540
+ /* Force wrapping ONLY - NO SCROLLING */
541
+ .code-container .cm-editor {
542
+ white-space: pre-wrap !important;
543
+ overflow-x: hidden !important;
544
+ }
545
+
546
+ .code-container .cm-scroller {
547
+ overflow-x: hidden !important;
548
+ width: 100% !important;
549
+ }
550
+
551
+ .code-container .cm-editor .cm-content {
552
+ white-space: pre-wrap !important;
553
+ word-break: break-all !important;
554
+ overflow-wrap: anywhere !important;
555
+ width: 100% !important;
556
+ max-width: 100% !important;
557
+ }
558
+
559
+ .code-container .cm-editor .cm-line {
560
+ white-space: pre-wrap !important;
561
+ word-break: break-all !important;
562
+ overflow-wrap: anywhere !important;
563
+ width: 100% !important;
564
+ max-width: 100% !important;
565
+ box-sizing: border-box !important;
566
+ }
567
+
568
+ /* Force the entire code container to have no horizontal overflow */
569
+ .code-container,
570
+ .code-container * {
571
+ overflow-x: hidden !important;
572
+ max-width: 100% !important;
573
+ }
574
+
575
+ /* Moderate syntax highlighting for Python */
576
+ .code-container .cm-keyword { color: #7c3aed !important; }
577
+ .code-container .cm-string { color: #059669 !important; }
578
+ .code-container .cm-comment { color: #6b7280 !important; font-style: italic !important; }
579
+ .code-container .cm-number { color: #dc2626 !important; }
580
+ .code-container .cm-variable { color: #1e40af !important; }
581
+ .code-container .cm-function { color: #7c2d12 !important; }
582
+ .code-container .cm-operator { color: #374151 !important; }
583
 
584
  /* Code header styling */
585
  .code-container label {
586
+ background: rgba(99, 102, 241, 0.1) !important;
587
+ color: #374151 !important;
588
+ padding: 0.75rem 1.25rem !important;
589
+ border-radius: 4px 4px 0 0 !important;
 
 
 
590
  font-weight: 600 !important;
591
+ font-size: 0.95rem !important;
 
 
592
  margin: 0 !important;
593
  border: none !important;
594
+ border-bottom: 1px solid rgba(226, 232, 240, 0.8) !important;
595
  }
596
 
597
 
598
  /* Custom scrollbar for code area */
599
  .code-container .cm-scroller::-webkit-scrollbar {
600
+ width: 6px !important;
601
+ height: 6px !important;
602
  }
603
 
604
  .code-container .cm-scroller::-webkit-scrollbar-track {
605
+ background: rgba(243, 244, 246, 0.8) !important;
606
+ border-radius: 3px !important;
607
  }
608
 
609
  .code-container .cm-scroller::-webkit-scrollbar-thumb {
610
+ background: rgba(156, 163, 175, 0.8) !important;
611
+ border-radius: 3px !important;
 
 
 
612
  }
613
 
614
  .code-container .cm-scroller::-webkit-scrollbar-thumb:hover {
615
+ background: rgba(107, 114, 128, 0.9) !important;
 
 
616
  }
617
 
618
  /* Line numbers styling */
619
  .code-container .cm-lineNumbers {
620
+ background: rgba(249, 250, 251, 0.8) !important;
621
+ color: rgba(156, 163, 175, 0.8) !important;
622
+ border-right: 1px solid rgba(229, 231, 235, 0.8) !important;
623
  padding-right: 0.5rem !important;
624
  }
625
 
626
  .code-container .cm-lineNumbers .cm-gutterElement {
627
+ color: rgba(156, 163, 175, 0.7) !important;
628
+ font-weight: 400 !important;
629
  }
630
 
631
  /* Memory Analysis Cards */
 
705
  background: #ffffff !important;
706
  }
707
 
708
+ /* Accordion title styling */
709
+ .gradio-accordion .label-wrap {
710
+ font-size: 1.5rem !important;
711
+ font-weight: 600 !important;
712
+ }
713
+
714
+ .gradio-accordion summary {
715
+ font-size: 1.5rem !important;
716
+ font-weight: 600 !important;
717
+ }
718
+
719
  /* Mobile Responsive Styles */
720
  @media (max-width: 768px) {
721
  .main-container {
 
807
 
808
  with gr.Column(elem_classes="main-container"):
809
  # Ultra Premium Header
810
+ gr.HTML("""
811
+ <div class="hero-header floating" style="text-align: center; padding: 1.5rem 1rem; margin-bottom: 1rem; position: relative;">
812
+ <div style="position: relative; z-index: 2;">
813
+ <h1 style="color: white; font-size: 2.2rem; margin: 0; font-weight: 800; text-shadow: 0 4px 8px rgba(0,0,0,0.3); letter-spacing: -0.02em; background: linear-gradient(135deg, #ffffff 0%, #f8fafc 50%, #e2e8f0 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text;">
814
+ Auto Diffusers Config
815
+ </h1>
816
+ <h2 style="color: rgba(255,255,255,0.95); font-size: 1.2rem; margin: 0.3rem 0 0.8rem 0; font-weight: 600; text-shadow: 0 2px 4px rgba(0,0,0,0.2);">
817
+ Hardware-Optimized Code Generator
818
+ </h2>
819
+ <p style="color: rgba(255,255,255,0.9); font-size: 1rem; margin: 0; font-weight: 400; text-shadow: 0 2px 4px rgba(0,0,0,0.2); max-width: 500px; margin: 0 auto; line-height: 1.5;">
820
+ Generate optimized diffusion model code for your hardware
821
+ </p>
822
+ <div style="margin-top: 1rem;">
823
+ <span style="display: inline-block; background: rgba(255,255,255,0.2); padding: 0.5rem 1rem; border-radius: 20px; color: white; font-size: 0.9rem; backdrop-filter: blur(10px); border: 1px solid rgba(255,255,255,0.3);">
824
+ 🤖 Powered by Google Gemini 2.5
825
+ </span>
 
 
 
 
826
  </div>
827
+ </div>
828
+ </div>
829
+ """)
830
 
831
  # Main Content Area
832
 
833
  # Hardware Selection Section
834
  with gr.Group(elem_classes="glass-card"):
835
+ with gr.Accordion("⚙️ Hardware Specifications", open=False) as hardware_accordion:
836
+ gr.HTML("""
837
+ <div class="section-header" style="text-align: center;">
838
+ <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
839
+ Configure your system hardware for optimal code generation
840
+ </p>
841
+ </div>
842
+ """)
843
+
844
+ # Platform, VRAM, and RAM in a single row
845
+ with gr.Row():
 
 
846
  platform = gr.Dropdown(
847
  choices=["Linux", "Darwin", "Windows"],
848
  label="🖥️ Platform",
849
  value="Linux",
850
  info="Your operating system"
851
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
852
  vram_gb = gr.Number(
853
+ label="🎯 VRAM (GB)",
854
  value=8,
855
  minimum=0,
856
  maximum=200,
 
864
  info="Total system memory"
865
  )
866
 
867
+ # GPU configuration on separate lines
868
+ gpu_vendor = gr.Dropdown(
869
+ choices=[
870
+ "Custom (Manual Input)",
871
+ "NVIDIA Consumer (GeForce RTX)",
872
+ "NVIDIA Professional (RTX A-Series)",
873
+ "NVIDIA Data Center",
874
+ "Apple Silicon",
875
+ "AMD",
876
+ "Intel",
877
+ "CPU Only"
878
+ ],
879
+ label="🎮 GPU Vendor/Category",
880
+ value="Custom (Manual Input)",
881
+ info="Select your GPU category"
882
+ )
883
+
884
+ gpu_series = gr.Dropdown(
885
+ choices=[],
886
+ label="📊 GPU Series",
887
+ visible=False,
888
+ interactive=True,
889
+ info="Choose your GPU series"
890
+ )
891
+
892
+ gpu_model = gr.Dropdown(
893
+ choices=[],
894
+ label="🔧 GPU Model",
895
+ visible=False,
896
+ interactive=True,
897
+ info="Select your specific GPU model"
898
+ )
899
+
900
+ gpu_name = gr.Textbox(
901
+ label="Selected GPU",
902
+ visible=False
903
+ )
904
+
905
  # Model Configuration Section
906
  with gr.Group(elem_classes="glass-card"):
907
+ with gr.Accordion("🤖 Model Configuration", open=False) as model_accordion:
908
+ gr.HTML("""
909
+ <div class="section-header" style="text-align: center;">
910
+ <p style="margin: 0; color: #64748b; font-size: 1rem; font-weight: 500;">
911
+ Configure the AI model and generation parameters
912
+ </p>
913
+ </div>
914
+ """)
915
+
916
+ # Model Name - Full width on its own row
917
+ model_name = gr.Textbox(
918
+ label="🏷️ Model Name",
919
+ value="black-forest-labs/FLUX.1-schnell",
920
+ placeholder="e.g., black-forest-labs/FLUX.1-schnell",
921
+ info="HuggingFace model identifier"
922
+ )
923
+
924
+ # Other parameters in 4-column layout
925
+ with gr.Row():
926
+ width = gr.Number(
927
+ label="📏 Width (px)",
928
+ value=1360,
929
+ minimum=256,
930
+ maximum=2048,
931
+ step=64,
932
+ info="Image width"
933
+ )
934
+ height = gr.Number(
935
+ label="📐 Height (px)",
936
+ value=768,
937
+ minimum=256,
938
+ maximum=2048,
939
+ step=64,
940
+ info="Image height"
941
  )
 
942
  dtype_selection = gr.Dropdown(
943
+ choices=["Auto", "float32", "float16", "bfloat16"],
944
+ label="⚡ dtype",
945
+ value="Auto",
946
+ info="Precision mode"
947
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
  inference_steps = gr.Number(
949
+ label="🔄 Inf. Steps",
950
  value=4,
951
  minimum=1,
952
  maximum=50,
953
+ info="Denoising steps"
954
  )
955
 
956
+ # Memory Analysis Subsection (inside Model Configuration)
957
+ gr.HTML("""
958
+ <div style="margin: 1.5rem 0 0.5rem 0; padding-top: 1rem; border-top: 1px solid rgba(226, 232, 240, 0.6);">
959
+ <h4 style="margin: 0 0 0.5rem 0; color: #374151; font-size: 1.1rem; font-weight: 600;">
960
+ 🧠 Memory Analysis
961
+ </h4>
962
+ <p style="margin: 0; color: #6b7280; font-size: 0.9rem;">
963
+ Real-time analysis of model memory requirements and optimization strategies
964
+ </p>
965
+ </div>
966
+ """)
967
+
968
+ memory_analysis_output = gr.Markdown(
969
+ value="✨ Select a model and configure your hardware to see memory requirements and optimization recommendations.",
970
+ elem_classes="memory-card"
971
+ )
972
 
 
 
 
 
973
 
974
  # Generate Button
975
  with gr.Row():
976
  with gr.Column():
 
 
 
 
977
  generate_btn = gr.Button(
978
  "✨ Generate Optimized Code",
979
  variant="primary",
 
983
 
984
  # Generated Code Section
985
  with gr.Group(elem_classes="ultra-glass"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
986
  # Code Output
987
  code_output = gr.Code(
988
+ label="Generated Code",
989
  language="python",
990
  lines=20,
991
  interactive=True,
992
  show_label=True,
993
  elem_classes="code-container",
994
+ show_line_numbers=False,
995
+ value="# Your optimized diffusion code will appear here after generation\n# Click 'Generate Optimized Code' to create hardware-specific Python code\n\nprint('Ready to generate AI art with optimized performance!')"
996
  )
997
 
998
  def on_gpu_vendor_change(vendor):
999
  """Handle GPU vendor selection and update series dropdown."""
1000
  if vendor == "Custom (Manual Input)":
1001
+ return (gr.update(visible=False, choices=[]),
 
1002
  gr.update(visible=False, choices=[]),
1003
  "", gr.update())
1004
  elif vendor == "CPU Only":
1005
+ return (gr.update(visible=False, choices=[]),
 
1006
  gr.update(visible=False, choices=[]),
1007
  "", 0)
1008
  elif vendor == "NVIDIA Consumer (GeForce RTX)":
1009
+ return (gr.update(visible=True, choices=["RTX 50 Series", "RTX 40 Series", "RTX 30 Series"]),
 
1010
  gr.update(visible=False, choices=[]),
1011
  "", gr.update())
1012
  elif vendor == "NVIDIA Professional (RTX A-Series)":
1013
+ return (gr.update(visible=True, choices=["RTX A6000 Series", "RTX A5000 Series", "RTX A4000 Series"]),
 
1014
  gr.update(visible=False, choices=[]),
1015
  "", gr.update())
1016
  elif vendor == "NVIDIA Data Center":
1017
+ return (gr.update(visible=True, choices=["Blackwell (B-Series)", "Hopper (H-Series)", "Ada Lovelace (L-Series)", "Ampere (A-Series)", "Volta/Tesla"]),
 
1018
  gr.update(visible=False, choices=[]),
1019
  "", gr.update())
1020
  elif vendor == "Apple Silicon":
1021
+ return (gr.update(visible=True, choices=["M4 Series", "M3 Series", "M2 Series", "M1 Series"]),
 
1022
  gr.update(visible=False, choices=[]),
1023
  "", gr.update())
1024
  elif vendor == "AMD":
1025
+ return (gr.update(visible=True, choices=["Radeon RX 7000", "Radeon RX 6000", "Instinct MI Series"]),
 
1026
  gr.update(visible=False, choices=[]),
1027
  "", gr.update())
1028
  elif vendor == "Intel":
1029
+ return (gr.update(visible=True, choices=["Arc A-Series"]),
 
1030
  gr.update(visible=False, choices=[]),
1031
  "", gr.update())
1032
  else:
1033
+ return (gr.update(visible=False, choices=[]),
 
1034
  gr.update(visible=False, choices=[]),
1035
  "", gr.update())
1036
 
 
1107
  else:
1108
  return model, gr.update()
1109
 
1110
+ def get_final_gpu_name(vendor, series, model):
1111
+ """Get the final GPU name based on vendor selection."""
1112
  if vendor == "Custom (Manual Input)":
1113
+ return "Custom GPU"
1114
  elif vendor == "CPU Only":
1115
  return ""
1116
  elif model and "(" in model and "GB" in model:
 
1118
  elif model:
1119
  return model
1120
  else:
1121
+ return vendor if vendor != "Custom (Manual Input)" else "Custom GPU"
1122
+
1123
+ def update_hardware_accordion_title(platform, gpu_vendor, gpu_model, vram_gb, ram_gb):
1124
+ """Update hardware accordion title with current configuration."""
1125
+ final_gpu = get_final_gpu_name(gpu_vendor, "", gpu_model)
1126
+ if not final_gpu:
1127
+ final_gpu = gpu_vendor if gpu_vendor != "Custom (Manual Input)" else "Custom GPU"
1128
+
1129
+ # Extract GPU name and VRAM for cleaner display
1130
+ gpu_display = final_gpu
1131
+ if gpu_model and "(" in gpu_model and "GB" in gpu_model:
1132
+ # Extract clean GPU name with VRAM from model selection
1133
+ gpu_display = gpu_model
1134
+ elif final_gpu and vram_gb:
1135
+ gpu_display = f"{final_gpu} ({vram_gb}GB)"
1136
+
1137
+ return f"⚙️ Hardware: {platform} | {gpu_display} | {ram_gb}GB RAM"
1138
+
1139
+ def update_model_accordion_title(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text=""):
1140
+ """Update model accordion title with current configuration including memory info."""
1141
+ model_short = model_name.split("/")[-1] if "/" in model_name else model_name
1142
+ dtype_short = dtype_selection
1143
+
1144
+ # Extract memory info for title
1145
+ memory_info = ""
1146
+ if memory_analysis_text and not memory_analysis_text.startswith("Select a model") and "Error" not in memory_analysis_text:
1147
+ lines = memory_analysis_text.split('\n')
1148
+ for line in lines:
1149
+ if "Memory Requirements:" in line or "estimated" in line.lower():
1150
+ if "GB" in line:
1151
+ import re
1152
+ gb_match = re.search(r'(\d+\.?\d*)\s*GB', line)
1153
+ if gb_match:
1154
+ memory_info = f" | {gb_match.group(1)}GB req"
1155
+ break
1156
+
1157
+ return f"🤖 Model: {model_short} | {dtype_short} | {width}×{height} | {inference_steps} steps{memory_info}"
1158
+
1159
 
1160
  def update_memory_analysis(model_name, vram_gb):
1161
  """Update memory analysis in real-time based on selections."""
 
1193
  gpu_vendor.change(
1194
  on_gpu_vendor_change,
1195
  inputs=[gpu_vendor],
1196
+ outputs=[gpu_series, gpu_model, gpu_name, vram_gb]
1197
  ).then(
1198
  update_memory_analysis,
1199
  inputs=[model_name, vram_gb],
 
1216
  outputs=memory_analysis_output
1217
  )
1218
 
 
 
 
 
 
 
1219
 
1220
  # Update memory analysis when model name or VRAM changes
1221
  model_name.change(
 
1236
  inputs=[model_name, vram_gb],
1237
  outputs=memory_analysis_output
1238
  )
1239
+
1240
+ # Create wrapper functions that return gr.update for accordion labels
1241
+ def update_hardware_accordion(platform, gpu_vendor, gpu_model, vram_gb, ram_gb):
1242
+ title = update_hardware_accordion_title(platform, gpu_vendor, gpu_model, vram_gb, ram_gb)
1243
+ return gr.update(label=title)
1244
+
1245
+ def update_model_accordion(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text=""):
1246
+ title = update_model_accordion_title(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text)
1247
+ return gr.update(label=title)
1248
+
1249
+ # Load initial accordion titles on startup
1250
+ interface.load(
1251
+ update_hardware_accordion,
1252
+ inputs=[platform, gpu_vendor, gpu_model, vram_gb, ram_gb],
1253
+ outputs=hardware_accordion
1254
+ )
1255
+
1256
+ interface.load(
1257
+ update_model_accordion,
1258
+ inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
1259
+ outputs=model_accordion
1260
+ )
1261
+
1262
+ # Accordion title update event handlers
1263
+
1264
+ # Hardware accordion title updates
1265
+ for component in [platform, gpu_vendor, gpu_model, vram_gb, ram_gb]:
1266
+ component.change(
1267
+ update_hardware_accordion,
1268
+ inputs=[platform, gpu_vendor, gpu_model, vram_gb, ram_gb],
1269
+ outputs=hardware_accordion
1270
+ )
1271
+
1272
+ # Model accordion title updates (including memory analysis)
1273
+ for component in [model_name, dtype_selection, width, height, inference_steps]:
1274
+ component.change(
1275
+ update_model_accordion,
1276
+ inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
1277
+ outputs=model_accordion
1278
+ )
1279
+
1280
+ # Update model accordion when memory analysis changes
1281
+ memory_analysis_output.change(
1282
+ update_model_accordion,
1283
+ inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
1284
+ outputs=model_accordion
1285
+ )
1286
 
1287
  def create_code_summary(generated_code, model_name, final_gpu_name, vram_gb):
1288
  """Create a concise summary of the generated code."""
 
1348
 
1349
  return '\n'.join(filtered_lines)
1350
 
1351
+ def generate_with_combined_gpu_name(gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps):
1352
+ """Generate code with the correct GPU name from multi-level selection, including memory analysis."""
1353
+ final_gpu_name = get_final_gpu_name(gpu_vendor, gpu_series, gpu_model)
1354
 
1355
  # Constant prompt text
1356
  prompt_text = "A cat holding a sign that says hello world"
 
1431
  code_collapsed = gr.State(value=False)
1432
  full_code_storage = gr.State(value="")
1433
 
1434
+ def generate_and_store_code(gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps):
1435
+ """Generate code and return code for display and full code for storage."""
1436
  summary, full_code = generate_with_combined_gpu_name(
1437
+ gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform,
1438
  model_name, dtype_selection, width, height, inference_steps
1439
  )
1440
+ return full_code, full_code, False # display_code, stored_code, reset_collapsed_state
1441
 
1442
  generate_btn.click(
1443
  generate_and_store_code,
1444
  inputs=[
1445
+ gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform,
1446
  model_name, dtype_selection, width, height, inference_steps
1447
  ],
1448
+ outputs=[code_output, full_code_storage, code_collapsed]
1449
  )
1450
 
1451
 
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453
 
1454
  return interface
1455
 
 
1459
  interface = create_gradio_interface()
1460
  interface.launch(
1461
  server_name="0.0.0.0",
1462
+ server_port=7861,
1463
  share=True,
1464
  show_error=True
1465
  )