File size: 4,674 Bytes
5092eb8
 
 
 
 
3a8ed2e
 
5092eb8
 
 
 
971f77f
5092eb8
971f77f
5092eb8
3a8ed2e
 
 
971f77f
3a8ed2e
5092eb8
 
 
7ce08d2
971f77f
 
 
 
 
 
 
 
 
3a8ed2e
 
7ce08d2
3a8ed2e
 
 
7ce08d2
060acd8
3a8ed2e
 
7ce08d2
971f77f
 
 
 
 
3a8ed2e
 
 
 
971f77f
3a8ed2e
 
 
971f77f
 
3a8ed2e
971f77f
 
 
 
 
 
 
 
 
 
 
 
3a8ed2e
971f77f
 
5092eb8
 
7ce08d2
5092eb8
7ce08d2
 
 
971f77f
5092eb8
7ce08d2
971f77f
 
 
 
 
 
7ce08d2
971f77f
7ce08d2
971f77f
7ce08d2
 
 
971f77f
 
 
 
060acd8
971f77f
7ce08d2
 
3a8ed2e
971f77f
7ce08d2
971f77f
7ce08d2
5092eb8
971f77f
 
5092eb8
7ce08d2
5092eb8
 
971f77f
3a8ed2e
7ce08d2
971f77f
3a8ed2e
971f77f
7ce08d2
971f77f
 
5092eb8
 
3a8ed2e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import gradio as gr
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
from PIL import Image
import requests
import json

# Load the model
try:
    model = load_model('wound_classifier_model_googlenet.h5')
    print("โœ… Model loaded successfully")
except Exception as e:
    raise RuntimeError(f"โŒ Model loading failed: {e}")

# OpenRouter.ai Configuration
OPENROUTER_API_KEY = "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"
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
MODEL_NAME = "mistralai/mistral-7b-instruct"  # Updated model name

input_shape = (224, 224, 3)

def preprocess_image(image, target_size):
    """Preprocess the input image for the model."""
    try:
        if image is None:
            raise ValueError("No image provided")
        image = image.convert("RGB")
        image = image.resize(target_size)
        return np.array(image) / 255.0
    except Exception as e:
        print(f"โš ๏ธ Image preprocessing error: {e}")
        raise

def get_medical_guidelines(wound_type):
    """Fetch medical guidelines using OpenRouter.ai's API."""
    headers = {
        "Authorization": f"Bearer {OPENROUTER_API_KEY}",
        "Content-Type": "application/json",
        "HTTP-Referer": "https://huggingface.co/spaces/MahatirTusher/Wound_Treatment",
        "X-Title": "Wound Treatment Advisor"
    }
    
    prompt = f"""As a medical professional, provide detailed guidelines for treating a {wound_type} wound. 
    Include: 
    1. First aid steps
    2. Precautions
    3. When to seek professional help
    Output in markdown with clear sections."""
    
    data = {
        "model": MODEL_NAME,
        "messages": [{"role": "user", "content": prompt}],
        "temperature": 0.5
    }
    
    try:
        print(f"๐Ÿš€ Sending request to OpenRouter API for {wound_type}...")
        response = requests.post(OPENROUTER_API_URL, headers=headers, json=data, timeout=10)
        response.raise_for_status()
        
        response_json = response.json()
        print("๐Ÿ”ง Raw API response:", json.dumps(response_json, indent=2))
        
        if "choices" not in response_json:
            return "โš ๏ธ API response format unexpected. Please check logs."
            
        return response_json["choices"][0]["message"]["content"]
        
    except requests.exceptions.HTTPError as e:
        print(f"โŒ HTTP Error: {e.response.status_code} - {e.response.text}")
        return f"API Error: {e.response.status_code} - Check console for details"
    except Exception as e:
        print(f"โš ๏ธ General API error: {str(e)}")
        return f"Error: {str(e)}"

def predict(image):
    """Main prediction function."""
    try:
        # Preprocess image
        input_data = preprocess_image(image, (input_shape[0], input_shape[1]))
        input_data = np.expand_dims(input_data, axis=0)
        print("๐Ÿ–ผ๏ธ Image preprocessed successfully")

        # Load class labels
        try:
            with open('classes.txt', 'r') as file:
                class_labels = file.read().splitlines()
            print("๐Ÿ“‹ Class labels loaded:", class_labels)
        except Exception as e:
            raise RuntimeError(f"Class labels loading failed: {e}")

        # Verify model compatibility
        if len(class_labels) != model.output_shape[-1]:
            raise ValueError(f"Model expects {model.output_shape[-1]} classes, found {len(class_labels)}")

        # Make prediction
        predictions = model.predict(input_data)
        print("๐Ÿ“Š Raw predictions:", predictions)
        
        results = {class_labels[i]: float(predictions[0][i]) 
                 for i in range(len(class_labels))}
        predicted_class = max(results, key=results.get)
        print(f"๐Ÿ† Predicted class: {predicted_class}")

        # Get medical guidelines
        guidelines = get_medical_guidelines(predicted_class)
        print("๐Ÿ“œ Guidelines generated successfully")

        return results, guidelines

    except Exception as e:
        print(f"๐Ÿ”ฅ Critical error in prediction: {str(e)}")
        return {"Error": str(e)}, ""

# Gradio Interface
iface = gr.Interface(
    fn=predict, 
    inputs=gr.Image(type="pil", label="Upload Wound Image"), 
    outputs=[
        gr.Label(num_top_classes=3, label="Classification Results"),
        gr.Markdown(label="Medical Guidelines")
    ],
    live=False,
    title="Wound Classification & Treatment Advisor",
    description="Upload a wound image for AI-powered classification and treatment guidelines.",
    allow_flagging="never"
)

iface.launch(server_name="0.0.0.0", server_port=7860)