nonzeroexit commited on
Commit
63d3a19
·
verified ·
1 Parent(s): 44f5cf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -90
app.py CHANGED
@@ -9,20 +9,17 @@ from transformers import BertTokenizer, BertModel
9
  from lime.lime_tabular import LimeTabularExplainer
10
  from math import expm1
11
 
12
- Load AMP Classifier
13
-
14
  model = joblib.load("RF.joblib")
15
  scaler = joblib.load("norm (4).joblib")
16
 
17
- Load ProtBert
18
-
19
  tokenizer = BertTokenizer.from_pretrained("Rostlab/prot_bert", do_lower_case=False)
20
  protbert_model = BertModel.from_pretrained("Rostlab/prot_bert")
21
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
  protbert_model = protbert_model.to(device).eval()
23
 
24
- Full list of selected features
25
-
26
  selected_features = ["_SolventAccessibilityC3", "_SecondaryStrC1", "_SecondaryStrC3", "_ChargeC1", "_PolarityC1",
27
  "_NormalizedVDWVC1", "_HydrophobicityC3", "_SecondaryStrT23", "_PolarizabilityD1001", "_PolarizabilityD2001",
28
  "_PolarizabilityD3001", "_SolventAccessibilityD1001", "_SolventAccessibilityD2001", "_SolventAccessibilityD3001",
@@ -50,100 +47,122 @@ selected_features = ["_SolventAccessibilityC3", "_SecondaryStrC1", "_SecondarySt
50
  "GearyAuto_Mutability30", "APAAC1", "APAAC4", "APAAC5", "APAAC6", "APAAC8", "APAAC9", "APAAC12", "APAAC13",
51
  "APAAC15", "APAAC18", "APAAC19", "APAAC24"]
52
 
53
- LIME Explainer Setup
54
-
55
  sample_data = np.random.rand(100, len(selected_features))
56
  explainer = LimeTabularExplainer(
57
- training_data=sample_data,
58
- feature_names=selected_features,
59
- class_names=["AMP", "Non-AMP"],
60
- mode="classification"
61
  )
62
 
 
63
  def extract_features(sequence):
64
- sequence = ''.join([aa for aa in sequence.upper() if aa in "ACDEFGHIKLMNPQRSTVWY"])
65
- if len(sequence) < 10:
66
- return "Error: Sequence too short."
67
- dipeptide_features = AAComposition.CalculateAADipeptideComposition(sequence)
68
- filtered_dipeptide_features = {k: dipeptide_features[k] for k in list(dipeptide_features.keys())[:420]}
69
- ctd_features = CTD.CalculateCTD(sequence)
70
- auto_features = Autocorrelation.CalculateAutoTotal(sequence)
71
- pseudo_features = PseudoAAC.GetAPseudoAAC(sequence, lamda=9)
72
- all_features_dict = {}
73
- all_features_dict.update(ctd_features)
74
- all_features_dict.update(filtered_dipeptide_features)
75
- all_features_dict.update(auto_features)
76
- all_features_dict.update(pseudo_features)
77
- feature_df_all = pd.DataFrame([all_features_dict])
78
- normalized_array = scaler.transform(feature_df_all.values)
79
- normalized_df = pd.DataFrame(normalized_array, columns=feature_df_all.columns)
80
- if not set(selected_features).issubset(set(normalized_df.columns)):
81
- return "Error: Some selected features are missing from computed features."
82
- selected_df = normalized_df[selected_features].fillna(0)
83
- return selected_df.values
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  def predictmic(sequence):
86
- sequence = ''.join([aa for aa in sequence.upper() if aa in "ACDEFGHIKLMNPQRSTVWY"])
87
- if len(sequence) < 10:
88
- return {"Error": "Sequence too short or invalid."}
89
- seq_spaced = ' '.join(list(sequence))
90
- tokens = tokenizer(seq_spaced, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
91
- tokens = {k: v.to(device) for k, v in tokens.items()}
92
- with torch.no_grad():
93
- outputs = protbert_model(**tokens)
94
- embedding = outputs.last_hidden_state.mean(dim=1).squeeze().cpu().numpy().reshape(1, -1)
95
- bacteria_config = {
96
- "E.coli": {"model": "coli_xgboost_model.pkl", "scaler": "coli_scaler.pkl", "pca": None},
97
- "S.aureus": {"model": "aur_xgboost_model.pkl", "scaler": "aur_scaler.pkl", "pca": None},
98
- "P.aeruginosa": {"model": "arg_xgboost_model.pkl", "scaler": "arg_scaler.pkl", "pca": None},
99
- "K.Pneumonia": {"model": "pne_mlp_model.pkl", "scaler": "pne_scaler.pkl", "pca": "pne_pca.pkl"}
100
- }
101
- mic_results = {}
102
- for bacterium, cfg in bacteria_config.items():
103
- try:
104
- scaler = joblib.load(cfg["scaler"])
105
- scaled = scaler.transform(embedding)
106
- transformed = joblib.load(cfg["pca"]).transform(scaled) if cfg["pca"] else scaled
107
- model = joblib.load(cfg["model"])
108
- mic_log = model.predict(transformed)[0]
109
- mic = round(expm1(mic_log), 3)
110
- mic_results[bacterium] = mic
111
- except Exception as e:
112
- mic_results[bacterium] = f"Error: {str(e)}"
113
- return mic_results
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  def full_prediction(sequence):
116
- features = extract_features(sequence)
117
- if isinstance(features, str):
118
- return features
119
- prediction = model.predict(features)[0]
120
- probabilities = model.predict_proba(features)[0]
121
- amp_result = "Antimicrobial Peptide (AMP)" if prediction == 0 else "Non-AMP"
122
- confidence = round(probabilities[0 if prediction == 0 else 1] * 100, 2)
123
- result = f"Prediction: {amp_result}\nConfidence: {confidence}%\n"
124
- if prediction == 0:
125
- mic_values = predictmic(sequence)
126
- result += "\nPredicted MIC Values (\u00b5M):\n"
127
- for org, mic in mic_values.items():
128
- result += f"- {org}: {mic}\n"
129
- else:
130
- result += "\nMIC prediction skipped for Non-AMP sequences.\n"
131
- explanation = explainer.explain_instance(
132
- data_row=features[0],
133
- predict_fn=model.predict_proba,
134
- num_features=10
135
- )
136
- result += "\nTop Features Influencing Prediction:\n"
137
- for feat, weight in explanation.as_list():
138
- result += f"- {feat}: {round(weight, 4)}\n"
139
- return result
 
 
 
 
 
 
140
 
 
141
  iface = gr.Interface(
142
- fn=full_prediction,
143
- inputs=gr.Textbox(label="Enter Protein Sequence"),
144
- outputs=gr.Textbox(label="Results"),
145
- title="AMP & MIC Predictor + LIME Explanation",
146
- description="Paste an amino acid sequence (\u226510 characters). Get AMP classification, MIC predictions, and LIME interpretability insights."
147
  )
148
 
149
- iface.launch(share=True)
 
9
  from lime.lime_tabular import LimeTabularExplainer
10
  from math import expm1
11
 
12
+ # Load AMP Classifier and Scaler
 
13
  model = joblib.load("RF.joblib")
14
  scaler = joblib.load("norm (4).joblib")
15
 
16
+ # Load ProtBert
 
17
  tokenizer = BertTokenizer.from_pretrained("Rostlab/prot_bert", do_lower_case=False)
18
  protbert_model = BertModel.from_pretrained("Rostlab/prot_bert")
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
  protbert_model = protbert_model.to(device).eval()
21
 
22
+ # Define selected features
 
23
  selected_features = ["_SolventAccessibilityC3", "_SecondaryStrC1", "_SecondaryStrC3", "_ChargeC1", "_PolarityC1",
24
  "_NormalizedVDWVC1", "_HydrophobicityC3", "_SecondaryStrT23", "_PolarizabilityD1001", "_PolarizabilityD2001",
25
  "_PolarizabilityD3001", "_SolventAccessibilityD1001", "_SolventAccessibilityD2001", "_SolventAccessibilityD3001",
 
47
  "GearyAuto_Mutability30", "APAAC1", "APAAC4", "APAAC5", "APAAC6", "APAAC8", "APAAC9", "APAAC12", "APAAC13",
48
  "APAAC15", "APAAC18", "APAAC19", "APAAC24"]
49
 
50
+ # Create dummy data for LIME initialization
 
51
  sample_data = np.random.rand(100, len(selected_features))
52
  explainer = LimeTabularExplainer(
53
+ training_data=sample_data,
54
+ feature_names=selected_features,
55
+ class_names=["AMP", "Non-AMP"],
56
+ mode="classification"
57
  )
58
 
59
+ # Feature extraction from sequence
60
  def extract_features(sequence):
61
+ sequence = ''.join([aa for aa in sequence.upper() if aa in "ACDEFGHIKLMNPQRSTVWY"])
62
+ if len(sequence) < 10:
63
+ return "Error: Sequence too short."
64
+
65
+ try:
66
+ dipeptide_features = AAComposition.CalculateAADipeptideComposition(sequence)
67
+ filtered_dipeptide_features = {k: dipeptide_features[k] for k in list(dipeptide_features.keys())[:420]}
68
+ ctd_features = CTD.CalculateCTD(sequence)
69
+ auto_features = Autocorrelation.CalculateAutoTotal(sequence)
70
+ pseudo_features = PseudoAAC.GetAPseudoAAC(sequence, lamda=9)
71
+
72
+ all_features_dict = {}
73
+ all_features_dict.update(ctd_features)
74
+ all_features_dict.update(filtered_dipeptide_features)
75
+ all_features_dict.update(auto_features)
76
+ all_features_dict.update(pseudo_features)
 
 
 
 
77
 
78
+ feature_df_all = pd.DataFrame([all_features_dict])
79
+ normalized_array = scaler.transform(feature_df_all.values)
80
+ normalized_df = pd.DataFrame(normalized_array, columns=feature_df_all.columns)
81
+
82
+ if not set(selected_features).issubset(normalized_df.columns):
83
+ return "Error: Some selected features are missing."
84
+
85
+ selected_df = normalized_df[selected_features].fillna(0)
86
+ return selected_df.values
87
+ except Exception as e:
88
+ return f"Error in feature extraction: {str(e)}"
89
+
90
+ # MIC prediction for AMP sequences
91
  def predictmic(sequence):
92
+ sequence = ''.join([aa for aa in sequence.upper() if aa in "ACDEFGHIKLMNPQRSTVWY"])
93
+ if len(sequence) < 10:
94
+ return {"Error": "Sequence too short or invalid."}
95
+
96
+ seq_spaced = ' '.join(list(sequence))
97
+ tokens = tokenizer(seq_spaced, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
98
+ tokens = {k: v.to(device) for k, v in tokens.items()}
99
+
100
+ with torch.no_grad():
101
+ outputs = protbert_model(**tokens)
102
+ embedding = outputs.last_hidden_state.mean(dim=1).squeeze().cpu().numpy().reshape(1, -1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
+ bacteria_config = {
105
+ "E.coli": {"model": "coli_xgboost_model.pkl", "scaler": "coli_scaler.pkl", "pca": None},
106
+ "S.aureus": {"model": "aur_xgboost_model.pkl", "scaler": "aur_scaler.pkl", "pca": None},
107
+ "P.aeruginosa": {"model": "arg_xgboost_model.pkl", "scaler": "arg_scaler.pkl", "pca": None},
108
+ "K.Pneumonia": {"model": "pne_mlp_model.pkl", "scaler": "pne_scaler.pkl", "pca": "pne_pca.pkl"}
109
+ }
110
+
111
+ mic_results = {}
112
+ for bacterium, cfg in bacteria_config.items():
113
+ try:
114
+ scaler = joblib.load(cfg["scaler"])
115
+ scaled = scaler.transform(embedding)
116
+ transformed = joblib.load(cfg["pca"]).transform(scaled) if cfg["pca"] else scaled
117
+ model = joblib.load(cfg["model"])
118
+ mic_log = model.predict(transformed)[0]
119
+ mic = round(expm1(mic_log), 3)
120
+ mic_results[bacterium] = mic
121
+ except Exception as e:
122
+ mic_results[bacterium] = f"Error: {str(e)}"
123
+
124
+ return mic_results
125
+
126
+ # Full prediction pipeline
127
  def full_prediction(sequence):
128
+ features = extract_features(sequence)
129
+ if isinstance(features, str):
130
+ return features
131
+
132
+ prediction = model.predict(features)[0]
133
+ probabilities = model.predict_proba(features)[0]
134
+ amp_result = "Antimicrobial Peptide (AMP)" if prediction == 0 else "Non-AMP"
135
+ confidence = round(probabilities[prediction] * 100, 2)
136
+
137
+ result = f"Prediction: {amp_result}\nConfidence: {confidence}%\n"
138
+
139
+ if prediction == 0:
140
+ mic_values = predictmic(sequence)
141
+ result += "\nPredicted MIC Values (μM):\n"
142
+ for org, mic in mic_values.items():
143
+ result += f"- {org}: {mic}\n"
144
+ else:
145
+ result += "\nMIC prediction skipped for Non-AMP sequences.\n"
146
+
147
+ explanation = explainer.explain_instance(
148
+ data_row=features[0],
149
+ predict_fn=model.predict_proba,
150
+ num_features=10
151
+ )
152
+
153
+ result += "\nTop Features Influencing Prediction:\n"
154
+ for feat, weight in explanation.as_list():
155
+ result += f"- {feat}: {round(weight, 4)}\n"
156
+
157
+ return result
158
 
159
+ # Gradio interface
160
  iface = gr.Interface(
161
+ fn=full_prediction,
162
+ inputs=gr.Textbox(label="Enter Protein Sequence"),
163
+ outputs=gr.Textbox(label="Results"),
164
+ title="AMP & MIC Predictor + LIME Explanation",
165
+ description="Paste an amino acid sequence (≥10 characters). Get AMP classification, MIC predictions, and LIME interpretability insights."
166
  )
167
 
168
+ iface.launch(share=True)