PSNbst commited on
Commit
e789b65
·
verified ·
1 Parent(s): 0b350f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -4,6 +4,7 @@ from PIL import Image, ImageChops, ImageFilter
4
  from transformers import CLIPProcessor, CLIPModel, BlipProcessor, BlipForConditionalGeneration
5
  import torch
6
  import matplotlib.pyplot as plt
 
7
 
8
  # 初始化模型
9
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
@@ -138,8 +139,8 @@ with gr.Blocks() as demo:
138
 
139
  api_key_input = gr.Textbox(label="API Key", placeholder="输入您的 API Key", type="password")
140
  api_type_input = gr.Radio(label="API 类型", choices=["GPT", "DeepSeek"], value="GPT")
141
- images_a_input = gr.File(label="上传文件夹A图片", file_types=[".png", ".jpg", ".jpeg"], file_count="multiple")
142
- images_b_input = gr.File(label="上传文件夹B图片", file_types=[".png", ".jpg", ".jpeg"], file_count="multiple")
143
  analyze_button = gr.Button("开始批量分析")
144
 
145
  with gr.Row():
 
4
  from transformers import CLIPProcessor, CLIPModel, BlipProcessor, BlipForConditionalGeneration
5
  import torch
6
  import matplotlib.pyplot as plt
7
+ import numpy as np # 修复未导入问题
8
 
9
  # 初始化模型
10
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
 
139
 
140
  api_key_input = gr.Textbox(label="API Key", placeholder="输入您的 API Key", type="password")
141
  api_type_input = gr.Radio(label="API 类型", choices=["GPT", "DeepSeek"], value="GPT")
142
+ images_a_input = gr.File(label="上传文件夹A图片", file_types=[".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".gif", ".webp"], file_count="multiple")
143
+ images_b_input = gr.File(label="上传文件夹B图片", file_types=[".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".gif", ".webp"], file_count="multiple")
144
  analyze_button = gr.Button("开始批量分析")
145
 
146
  with gr.Row():