MahatirTusher commited on
Commit
3a8ed2e
·
verified ·
1 Parent(s): 5092eb8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -37
app.py CHANGED
@@ -3,6 +3,8 @@ import tensorflow as tf
3
  from tensorflow.keras.models import load_model
4
  import numpy as np
5
  from PIL import Image
 
 
6
 
7
  # Load the model
8
  try:
@@ -10,59 +12,70 @@ try:
10
  except Exception as e:
11
  raise RuntimeError(f"Error loading model: {e}")
12
 
13
- # Define the input shape
 
 
 
 
14
  input_shape = (224, 224, 3)
15
 
16
  def preprocess_image(image, target_size):
 
 
 
17
  """
18
- Preprocess the input image for the model.
19
- - Resize the image to the target size.
20
- - Normalize pixel values to the range [0, 1].
21
  """
22
- if image is None:
23
- raise ValueError("No image provided")
24
- image = image.convert("RGB") # Ensure the image is in RGB mode
25
- image = image.resize(target_size)
26
- image_array = np.array(image)
27
- image_array = image_array / 255.0 # Normalize the image
28
- return image_array
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  def predict(image):
31
- """
32
- Predict the class probabilities for the input image.
33
- - Preprocess the image.
34
- - Predict using the loaded model.
35
- - Return results as a dictionary with class labels and probabilities.
36
- """
37
  try:
38
- # Preprocess the image
39
- input_data = preprocess_image(image, (input_shape[0], input_shape[1]))
40
- input_data = np.expand_dims(input_data, axis=0) # Add batch dimension
41
 
42
- # Load class labels
43
- try:
44
- with open('./classes.txt', 'r') as file:
45
- class_labels = file.read().splitlines()
46
- except FileNotFoundError:
47
- raise RuntimeError("Class labels file 'classes.txt' not found.")
48
 
49
- if len(class_labels) != model.output_shape[-1]:
50
- raise ValueError("Mismatch between model output and class labels.")
51
-
52
- # Predict probabilities
53
- predictions = model.predict(input_data)
54
- results = {class_labels[i]: float(predictions[0][i]) for i in range(len(class_labels))}
55
- return results
56
  except Exception as e:
57
  return {"error": str(e)}
58
 
59
- # Create a Gradio interface
60
  iface = gr.Interface(
61
  fn=predict,
62
  inputs=gr.Image(type="pil"),
63
- outputs=gr.Label(num_top_classes=18), # Adjust num_top_classes as needed
 
 
 
64
  live=True
65
  )
66
 
67
- # Launch the Gradio interface
68
- iface.launch(server_name="0.0.0.0", server_port=7860)
 
3
  from tensorflow.keras.models import load_model
4
  import numpy as np
5
  from PIL import Image
6
+ import requests
7
+ import json
8
 
9
  # Load the model
10
  try:
 
12
  except Exception as e:
13
  raise RuntimeError(f"Error loading model: {e}")
14
 
15
+ # OpenRouter.ai Configuration
16
+ OPENROUTER_API_KEY = "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"
17
+ OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
18
+ MODEL_NAME = "mistralai/mistral-small-24b-instruct-2501:free" # Mistral model via OpenRouter
19
+
20
  input_shape = (224, 224, 3)
21
 
22
  def preprocess_image(image, target_size):
23
+ # ... (keep your existing preprocessing code) ...
24
+
25
+ def get_medical_guidelines(wound_type):
26
  """
27
+ Fetch medical guidelines using OpenRouter.ai's Mistral model.
 
 
28
  """
29
+ headers = {
30
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
31
+ "Content-Type": "application/json",
32
+ "HTTP-Referer": "https://your-huggingface-space-url.com", # Optional
33
+ "X-Title": "Wound Classifier" # Optional
34
+ }
35
+
36
+ prompt = f"""
37
+ As a medical professional, provide detailed guidelines for treating a {wound_type} wound.
38
+ Include steps for first aid, precautions, and when to seek professional help.
39
+ """
40
+
41
+ data = {
42
+ "model": MODEL_NAME,
43
+ "messages": [{"role": "user", "content": prompt}],
44
+ "temperature": 0.7 # Adjust for creativity vs. precision
45
+ }
46
+
47
+ try:
48
+ response = requests.post(OPENROUTER_API_URL, headers=headers, json=data)
49
+ response.raise_for_status()
50
+ result = response.json()
51
+ return result["choices"][0]["message"]["content"]
52
+ except Exception as e:
53
+ return f"Error fetching guidelines: {str(e)}"
54
 
55
  def predict(image):
 
 
 
 
 
 
56
  try:
57
+ # ... (keep your existing preprocessing and prediction code) ...
 
 
58
 
59
+ # After getting `predicted_class`:
60
+ guidelines = get_medical_guidelines(predicted_class)
 
 
 
 
61
 
62
+ return {
63
+ "predictions": results, # Your existing classification results
64
+ "treatment_guidelines": guidelines # From OpenRouter.ai
65
+ }
66
+
 
 
67
  except Exception as e:
68
  return {"error": str(e)}
69
 
70
+ # Update Gradio interface to show both outputs
71
  iface = gr.Interface(
72
  fn=predict,
73
  inputs=gr.Image(type="pil"),
74
+ outputs=[
75
+ gr.Label(num_top_classes=18, label="Classification Results"),
76
+ gr.Textbox(label="Medical Guidelines", lines=5)
77
+ ],
78
  live=True
79
  )
80
 
81
+ iface.launch(server_name="0.0.0.0", server_port=7860)