Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -68,5 +68,346 @@ model_cfg = "sam2_hiera_s.yaml"
|
|
68 |
sam2_model = build_sam2(model_cfg, sam2_checkpoint, device="cuda")
|
69 |
predictor = SAM2ImagePredictor(sam2_model)
|
70 |
|
71 |
-
checkpoint_path = "
|
72 |
-
predictor.model.load_state_dict(torch.load(checkpoint_path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
sam2_model = build_sam2(model_cfg, sam2_checkpoint, device="cuda")
|
69 |
predictor = SAM2ImagePredictor(sam2_model)
|
70 |
|
71 |
+
checkpoint_path = "sam2_lr0.0001_wd0.01_900.torch"
|
72 |
+
predictor.model.load_state_dict(torch.load(checkpoint_path))
|
73 |
+
|
74 |
+
def display_thread(thread_id):
|
75 |
+
for message in client.beta.threads.messages.list(thread_id=thread_id):
|
76 |
+
display(message.content[0].text.value)
|
77 |
+
|
78 |
+
def read_file(filepath, max_pages=None):
|
79 |
+
if filepath.endswith('.pdf'):
|
80 |
+
return read_pdf(filepath, max_pages)
|
81 |
+
elif filepath.endswith('.txt'):
|
82 |
+
return read_text_file(filepath)
|
83 |
+
elif filepath.endswith('.docx'):
|
84 |
+
return read_docx(filepath)
|
85 |
+
elif filepath.endswith('.xlsx'):
|
86 |
+
return read_xlsx(filepath)
|
87 |
+
elif filepath.endswith('.pptx'):
|
88 |
+
return read_pptx(filepath)
|
89 |
+
else:
|
90 |
+
raise ValueError("Unsupported file type")
|
91 |
+
|
92 |
+
def read_pdf(filepath, max_pages=None):
|
93 |
+
reader = PdfReader(filepath)
|
94 |
+
pdf_text = ""
|
95 |
+
page_number = 0
|
96 |
+
for page in reader.pages:
|
97 |
+
page_number += 1
|
98 |
+
if max_pages and (page_number > max_pages):
|
99 |
+
break
|
100 |
+
page_text = page.extract_text()
|
101 |
+
|
102 |
+
# Check if page_text is None before proceeding
|
103 |
+
if page_text:
|
104 |
+
# Replace multiple newlines with a space to make it readable
|
105 |
+
page_text = re.sub(r'\n+', ' ', page_text)
|
106 |
+
pdf_text += page_text + f"\nPage Number: {page_number}\n"
|
107 |
+
else:
|
108 |
+
pdf_text += f"\n[No extractable text on Page {page_number}]\n"
|
109 |
+
|
110 |
+
return pdf_text
|
111 |
+
|
112 |
+
calc_similarity = lambda x, y: 1 - spatial.distance.cosine(x.data[0].embedding, y.data[0].embedding)
|
113 |
+
|
114 |
+
def pretty_print(df):
|
115 |
+
return display(HTML(df.to_html().replace("\\n", "<br>")))
|
116 |
+
|
117 |
+
|
118 |
+
def read_directory(directory):
|
119 |
+
assert os.path.exists(directory)
|
120 |
+
|
121 |
+
res_dict = {}
|
122 |
+
|
123 |
+
for filename in os.listdir(directory):
|
124 |
+
if filename.endswith(('pdf', 'txt', 'docx', 'pptx')):
|
125 |
+
filepath = os.path.join(directory, filename)
|
126 |
+
text = read_file(filepath, 2)
|
127 |
+
res_dict[filename] = (filepath, text)
|
128 |
+
|
129 |
+
df = pd.DataFrame(res_dict).T
|
130 |
+
df = df.reset_index()
|
131 |
+
df.columns = ["Filename", "Filepath", "Text"]
|
132 |
+
return df
|
133 |
+
|
134 |
+
# Initialize GPT tokenizer
|
135 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
136 |
+
tokenizer.model_max_length = int(1e30)
|
137 |
+
|
138 |
+
def ask_chatbot(question, context, m):
|
139 |
+
max_context_tokens = 16385 # Adjust based on the maximum allowable context tokens
|
140 |
+
truncated_context = truncate_context(context, max_context_tokens)
|
141 |
+
response = client.chat.completions.create(
|
142 |
+
model=m,
|
143 |
+
messages=[
|
144 |
+
{"role": "system", "content": """You are an expert doctor who treats chronic wounds, and you know every single thing about wounds and how to treat them as well as preventing them from getting worse.
|
145 |
+
The user will provide the following inputs: Name, Gender, Age, Pre-existing Medical Conditions, Wound Part of Body, Wound Classficiation, Colors of the Wounds (as percents out of 100).
|
146 |
+
Please provide the medical advice in 2 concise paragraphs that must incorporate the following key features everytime:
|
147 |
+
|
148 |
+
1. **Wound Risk Score (1-100):** You will be given a PDF and you shall review it and use it to aid in your risk score generation. The wound risk score should be between 1-100! Of course, any color percentages **less than 3** shouldn't be taken into consideration when making the score.
|
149 |
+
**Make sure to be specific!**
|
150 |
+
2. **Medical Advice:** Give the patient bulleted directions on how to monitor and care for their wound. **Make sure to include if the person needs to go see a doctor as soon as possible.**"""},
|
151 |
+
{"role": "user", "content": truncated_context},
|
152 |
+
{"role": "user", "content": question}
|
153 |
+
]
|
154 |
+
)
|
155 |
+
return response.choices[0].message.content
|
156 |
+
|
157 |
+
def truncate_context(context, max_tokens):
|
158 |
+
tokens = tokenizer.encode(context)
|
159 |
+
if len(tokens) > max_tokens:
|
160 |
+
truncated_tokens = tokens[:max_tokens]
|
161 |
+
return tokenizer.decode(truncated_tokens)
|
162 |
+
return context
|
163 |
+
|
164 |
+
|
165 |
+
file_content = read_file("Wound Healing Risk Assessment.pdf")
|
166 |
+
|
167 |
+
api_key = "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" # @param {"type":"string"}
|
168 |
+
|
169 |
+
%env OPENAI_API_KEY = {api_key}
|
170 |
+
client = OpenAI()
|
171 |
+
model="gpt-4o-mini"
|
172 |
+
|
173 |
+
assistant = client.beta.assistants.create(
|
174 |
+
name="Wound Treater",
|
175 |
+
instructions="""You are an expert doctor who treats chronic wounds, and you know every single thing about wounds and how to treat them as well as preventing them from getting worse.
|
176 |
+
The user will provide the following inputs: Name, Gender, Age, Pre-existing Medical Conditions, Wound Part of Body, Wound Classficiation, Colors of the Wounds (as percents out of 100).
|
177 |
+
Please provide the medical advice in 2 concise paragraphs that must incorporate the following key features everytime:
|
178 |
+
|
179 |
+
1. **Wound Risk Score (1-100):** Generate a wound risk score from 1-100, 1 being no risk and 100 being going to see a medical professional immediately! Of course, any color percentages **less than 3** shouldn't be taken into consideration when making the score.
|
180 |
+
**Make sure to be specific and list the components of the wound risk score.**
|
181 |
+
2. **Medical Advice:** Give the patient directions on how to monitor and care for their wound. **Make sure to include if the person needs to go see a doctor as soon as possible.**""",
|
182 |
+
model=model)
|
183 |
+
|
184 |
+
def get_assistant_response(name="None", gender="None", age="None", conditions="None", bodyPart="None", typeWound="None", red="None", orange="None", yellow="None", magenta="None", white="None", gray="None", black="None"):
|
185 |
+
thread = client.beta.threads.create()
|
186 |
+
|
187 |
+
input_text = "Name: " + str(name) + ", Gender: " + str(gender) + ", Age: " + str(age) + ", Pre-Existing Medical Conditions: " + str(conditions) + ", Part of Body: " + str(bodyPart) + ", Type of Wound: " + str(typeWound) + ", Wound Colors (Red, Orange, Yellow, Magenta, White, Gray, Black): [" + str(red) + ", " + str(orange) + ", " + str(yellow) + ", " + str(magenta) + ", " + str(white) + ", " + str(gray) + ", " + str(black) + "]"
|
188 |
+
|
189 |
+
message = client.beta.threads.messages.create(
|
190 |
+
thread_id=thread.id,
|
191 |
+
role="user",
|
192 |
+
content=input_text)
|
193 |
+
run = client.beta.threads.runs.create(
|
194 |
+
thread_id=thread.id,
|
195 |
+
assistant_id=assistant.id,
|
196 |
+
)
|
197 |
+
sleep(15)
|
198 |
+
|
199 |
+
return input_text, client.beta.threads.messages.list(thread.id).data[0].content[0].text.value
|
200 |
+
|
201 |
+
def get_response_with_context(name="None", gender="None", age="None", conditions="None", bodyPart="None", typeWound="None", red="None", orange="None", yellow="None", magenta="None", white="None", gray="None", black="None"):
|
202 |
+
input_text = "Name: " + str(name) + ", Gender: " + str(gender) + ", Age: " + str(age) + ", Pre-Existing Medical Conditions: " + str(conditions) + ", Part of Body: " + str(bodyPart) + ", Type of Wound: " + str(typeWound) + ", Wound Colors (Red, Orange, Yellow, Magenta, White, Gray, Black): [" + str(red) + ", " + str(orange) + ", " + str(yellow) + ", " + str(magenta) + ", " + str(white) + ", " + str(gray) + ", " + str(black) + "]"
|
203 |
+
|
204 |
+
response = ask_chatbot(input_text, file_content, model)
|
205 |
+
|
206 |
+
return input_text, response
|
207 |
+
|
208 |
+
|
209 |
+
wounds = []
|
210 |
+
learn = load_learner('model.pkl')
|
211 |
+
|
212 |
+
def one_step_inference(image_path, threshold=0.5):
|
213 |
+
image = cv2.imread(image_path)
|
214 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
215 |
+
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
216 |
+
|
217 |
+
predictor.set_image(image)
|
218 |
+
high_res_features = [feat[-1].unsqueeze(0) for feat in predictor._features["high_res_feats"]]
|
219 |
+
|
220 |
+
with torch.no_grad():
|
221 |
+
sparse_embeddings, dense_embeddings = predictor.model.sam_prompt_encoder(points=None, boxes=None, masks=None)
|
222 |
+
low_res_masks, _, _, _ = predictor.model.sam_mask_decoder(
|
223 |
+
image_embeddings=predictor._features["image_embed"][-1].unsqueeze(0),
|
224 |
+
image_pe=predictor.model.sam_prompt_encoder.get_dense_pe(),
|
225 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
226 |
+
dense_prompt_embeddings=dense_embeddings,
|
227 |
+
multimask_output=False,
|
228 |
+
repeat_image=False, # Fixed argument
|
229 |
+
high_res_features=high_res_features,)
|
230 |
+
|
231 |
+
mask = predictor._transforms.postprocess_masks(low_res_masks, predictor._orig_hw[-1])
|
232 |
+
final_mask = (mask > threshold).cpu().detach().numpy()[0][0]
|
233 |
+
final_mask = final_mask.astype("uint8")
|
234 |
+
|
235 |
+
selected_pixels = cv2.bitwise_and(image_rgb, image_rgb, mask=final_mask)
|
236 |
+
selected_pixels = image_hsv[final_mask == 1]
|
237 |
+
colors = classify_colors(selected_pixels)
|
238 |
+
|
239 |
+
return colors["Red"], colors["Orange"], colors["Yellow"], colors["Magenta"], colors["White"], colors["Gray"], colors["Black"]
|
240 |
+
|
241 |
+
# Replace this function in your original code
|
242 |
+
def classify_colors(hsv_pixels):
|
243 |
+
# Define color ranges in HSV
|
244 |
+
color_ranges = {
|
245 |
+
'Red': [(0, 50, 50), (10, 255, 255)], # Red wraps around
|
246 |
+
'Red2': [(170, 50, 50), (179, 255, 255)],
|
247 |
+
'Orange': [(11, 50, 50), (25, 255, 255)],
|
248 |
+
'Yellow': [(26, 50, 50), (35, 255, 255)],
|
249 |
+
'Green': [(36, 50, 50), (85, 255, 255)],
|
250 |
+
'Cyan': [(86, 50, 50), (95, 255, 255)],
|
251 |
+
'Blue': [(96, 50, 50), (130, 255, 255)],
|
252 |
+
'Purple': [(131, 50, 50), (160, 255, 255)],
|
253 |
+
'Magenta': [(161, 50, 50), (169, 255, 255)],
|
254 |
+
'White': [(0, 0, 200), (179, 55, 255)], # High brightness, low saturation
|
255 |
+
'Gray': [(0, 0, 50), (179, 50, 200)], # Low saturation, varying brightness
|
256 |
+
'Black': [(0, 0, 0), (179, 50, 50)] # Low brightness
|
257 |
+
}
|
258 |
+
# Flatten the HSV pixels to process as a single list
|
259 |
+
hsv_pixels = hsv_pixels.reshape(-1, 3)
|
260 |
+
# Initialize counts for each color
|
261 |
+
color_counts = {color: 0 for color in color_ranges}
|
262 |
+
# Total number of pixels
|
263 |
+
total_pixels = hsv_pixels.shape[0]
|
264 |
+
# Classify each pixel
|
265 |
+
for pixel in hsv_pixels:
|
266 |
+
h, s, v = pixel
|
267 |
+
for color, ranges in color_ranges.items():
|
268 |
+
if isinstance(ranges[0], tuple): # Handles multiple ranges (e.g., red)
|
269 |
+
lower = ranges[0]
|
270 |
+
upper = ranges[1]
|
271 |
+
if (lower[0] <= h <= upper[0] or lower[0] > upper[0] and (h >= lower[0] or h <= upper[0])) \
|
272 |
+
and lower[1] <= s <= upper[1] and lower[2] <= v <= upper[2]:
|
273 |
+
color_counts[color] += 1
|
274 |
+
break
|
275 |
+
else:
|
276 |
+
lower, upper = ranges
|
277 |
+
if lower[0] <= h <= upper[0] and lower[1] <= s <= upper[1] and lower[2] <= v <= upper[2]:
|
278 |
+
color_counts[color] += 1
|
279 |
+
break
|
280 |
+
# Calculate percentages
|
281 |
+
color_counts["Red"] += color_counts["Red2"]
|
282 |
+
del color_counts["Red2"]
|
283 |
+
if(total_pixels == 0):
|
284 |
+
total_pixels = 1
|
285 |
+
color_percentages = {color: (count / total_pixels) * 100 for color, count in color_counts.items()}
|
286 |
+
return color_percentages
|
287 |
+
|
288 |
+
def predict_image(image_path):
|
289 |
+
img = PILImage.create(image_path)
|
290 |
+
pred, pred_idx, probs = learn.predict(img)
|
291 |
+
|
292 |
+
return pred
|
293 |
+
|
294 |
+
def reveal_group():
|
295 |
+
return gr.update(visible=True)
|
296 |
+
|
297 |
+
def hide_group():
|
298 |
+
return gr.update(visible=False)
|
299 |
+
|
300 |
+
# Add new wound to the list
|
301 |
+
def add_wound(image, partOfBody):
|
302 |
+
wounds.append({"image": image, "description": partOfBody})
|
303 |
+
return image, partOfBody
|
304 |
+
|
305 |
+
def clear_inputs(image, partOfBody):
|
306 |
+
image=None
|
307 |
+
partOfBody=""
|
308 |
+
return image, partOfBody
|
309 |
+
|
310 |
+
# Initialize Gradio app
|
311 |
+
with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
312 |
+
gr.Markdown("<center><h1>Welcome to WoundView!</h1></center>")
|
313 |
+
|
314 |
+
# Sign-up Group
|
315 |
+
with gr.Group() as sign_up:
|
316 |
+
gr.Markdown("<center><h2>New User</h2></center>")
|
317 |
+
name = gr.Textbox(label="Full Name", placeholder="Enter your name here...")
|
318 |
+
gender = gr.Radio(["Male", "Female"], label="Gender")
|
319 |
+
age = gr.Number(label="Age")
|
320 |
+
conditions = gr.CheckboxGroup(["Diabetes", "Peripheral Arterial Disease", "Venous Insufficiency", "Obesity", "Smoking", ], label="Pre-Existing Medical Conditions")
|
321 |
+
gr.Markdown("<span style='color: red;'>Some fields were left empty. Please fill them out!</span>", visible=False)
|
322 |
+
sign_up_btn = gr.Button(value="Sign Up", variant="secondary")
|
323 |
+
|
324 |
+
# Home Group
|
325 |
+
with gr.Group(visible=False) as home:
|
326 |
+
gr.Markdown("<center><h2>Wounds</h2></center>")
|
327 |
+
with gr.Row(visible=False) as wound_display:
|
328 |
+
wound_image = gr.Image()
|
329 |
+
with gr.Column():
|
330 |
+
wound_title = gr.Markdown("<center><h2>Wound Description</h2></center>")
|
331 |
+
with gr.Row():
|
332 |
+
gr.Markdown("<center>Part of Body:</center>")
|
333 |
+
wound_desc = gr.Textbox(container=False)
|
334 |
+
with gr.Row():
|
335 |
+
gr.Markdown("<center>Type of Wound:</center>")
|
336 |
+
wound_classification = gr.Textbox(container=False)
|
337 |
+
gr.Markdown("<center><h4>Colors:</h4></center>")
|
338 |
+
with gr.Row():
|
339 |
+
gr.Markdown("<center>Red:</center>")
|
340 |
+
red_percent = gr.Textbox(container=False)
|
341 |
+
with gr.Row():
|
342 |
+
gr.Markdown("<center>Orange:</center>")
|
343 |
+
orange_percent = gr.Textbox(container=False)
|
344 |
+
with gr.Row():
|
345 |
+
gr.Markdown("<center>Yellow:</center>")
|
346 |
+
yellow_percent = gr.Textbox(container=False)
|
347 |
+
with gr.Row():
|
348 |
+
gr.Markdown("<center>Magenta:</center>")
|
349 |
+
magenta_percent = gr.Textbox(container=False)
|
350 |
+
with gr.Row():
|
351 |
+
gr.Markdown("<center>White:</center>")
|
352 |
+
white_percent = gr.Textbox(container=False)
|
353 |
+
with gr.Row():
|
354 |
+
gr.Markdown("<center>Gray:</center>")
|
355 |
+
gray_percent = gr.Textbox(container=False)
|
356 |
+
with gr.Row():
|
357 |
+
gr.Markdown("<center>Black:</center>")
|
358 |
+
black_percent = gr.Textbox(container=False)
|
359 |
+
ai_chat_btn = gr.Button(value="AI ChatBot")
|
360 |
+
add_new_btn = gr.Button(value="Add New")
|
361 |
+
|
362 |
+
# Add New Group
|
363 |
+
with gr.Group(visible=False) as add_new:
|
364 |
+
gr.Markdown("<center><h2>Add New Wound</h2></center>")
|
365 |
+
with gr.Row():
|
366 |
+
image = gr.Image(label="Picture of wound", type="filepath")
|
367 |
+
partOfBody = gr.Radio(["Head", "Arm", "Hand", "Back", "Stomach", "Leg", "Foot"], label="What part of the body is the wound on?")
|
368 |
+
with gr.Row():
|
369 |
+
confirm_add_new_btn = gr.Button(value="Confirm")
|
370 |
+
cancel_add_new_btn = gr.Button(value="Cancel")
|
371 |
+
|
372 |
+
with gr.Group(visible=False) as ai_chat:
|
373 |
+
gr.Markdown("<center><h2>AI Chat</h2></center>")
|
374 |
+
with gr.Column() as gpt:
|
375 |
+
gr.Markdown("<center><h3>Chat GPT</h3></center>")
|
376 |
+
chatGPTInput = gr.Textbox(container=False)
|
377 |
+
chatGPTOutput = gr.Textbox(container=False)
|
378 |
+
cancel_ai_chat_btn = gr.Button(value="Cancel")
|
379 |
+
|
380 |
+
# Button Click Events
|
381 |
+
sign_up_btn.click(hide_group, outputs=sign_up).then(reveal_group, outputs=home)
|
382 |
+
|
383 |
+
add_new_btn.click(hide_group, outputs=home).then(reveal_group, outputs=add_new
|
384 |
+
).then(clear_inputs,
|
385 |
+
inputs=[image, partOfBody],
|
386 |
+
outputs=[image, partOfBody]
|
387 |
+
)
|
388 |
+
|
389 |
+
confirm_add_new_btn.click(add_wound,
|
390 |
+
inputs=[image, partOfBody],
|
391 |
+
outputs=[wound_image, wound_desc]
|
392 |
+
).then(reveal_group, outputs=home
|
393 |
+
).then(hide_group, outputs=add_new
|
394 |
+
).then(reveal_group, outputs=wound_display
|
395 |
+
).then(predict_image,
|
396 |
+
inputs=image,
|
397 |
+
outputs=wound_classification
|
398 |
+
).then(one_step_inference,
|
399 |
+
inputs=image,
|
400 |
+
outputs=[red_percent, orange_percent, yellow_percent, magenta_percent, white_percent, gray_percent, black_percent]
|
401 |
+
)
|
402 |
+
|
403 |
+
cancel_add_new_btn.click(hide_group, outputs=add_new).then(reveal_group, outputs=home)
|
404 |
+
|
405 |
+
ai_chat_btn.click(hide_group, outputs=home).then(reveal_group, outputs=ai_chat
|
406 |
+
).then(get_response_with_context,
|
407 |
+
inputs=[name, gender, age, conditions, partOfBody, wound_classification, red_percent, orange_percent, yellow_percent, magenta_percent, white_percent, gray_percent, black_percent],
|
408 |
+
outputs=[chatGPTInput, chatGPTOutput]
|
409 |
+
)
|
410 |
+
|
411 |
+
cancel_ai_chat_btn.click(hide_group, outputs=ai_chat).then(reveal_group, outputs=home)
|
412 |
+
|
413 |
+
demo.launch(share=True)
|