deepugaur commited on
Commit
86e1548
·
verified ·
1 Parent(s): f7cab5b

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -68
app.py DELETED
@@ -1,68 +0,0 @@
1
-
2
-
3
- import streamlit as st
4
- import numpy as np
5
- from tensorflow.keras.models import load_model
6
- from tensorflow.keras.preprocessing.sequence import pad_sequences
7
- from tensorflow.keras.preprocessing.text import Tokenizer
8
-
9
- # Constants
10
- MAX_LENGTH = 100
11
- TOKENIZER_PATH = "tokenizer.json" # Assuming the tokenizer is saved separately.
12
-
13
- # Load pre-trained model
14
- @st.cache_resource
15
- def load_trained_model():
16
- return load_model("deep_learning_model.h5")
17
-
18
- # Load tokenizer
19
- @st.cache_resource
20
- def load_tokenizer():
21
- import json
22
- from tensorflow.keras.preprocessing.text import tokenizer_from_json
23
- with open(TOKENIZER_PATH, "r") as f:
24
- tokenizer_data = json.load(f)
25
- return tokenizer_from_json(tokenizer_data)
26
-
27
- # Preprocessing function
28
- def preprocess_prompt(prompt, tokenizer, max_length):
29
- sequence = tokenizer.texts_to_sequences([prompt])
30
- padded_sequence = pad_sequences(sequence, maxlen=max_length)
31
- return padded_sequence
32
-
33
- # Predict function
34
- def detect_prompt(prompt, model, tokenizer, max_length):
35
- processed_prompt = preprocess_prompt(prompt, tokenizer, max_length)
36
- prediction = model.predict(processed_prompt)[0][0]
37
- class_label = "Malicious" if prediction >= 0.5 else "Valid"
38
- confidence_score = prediction * 100 if prediction >= 0.5 else (1 - prediction) * 100
39
- return class_label, confidence_score
40
-
41
- # Streamlit App
42
- st.title("Prompt Injection Detection App")
43
- st.write("Detect and prevent prompt injection attacks using a deep learning model.")
44
-
45
- # Load model and tokenizer
46
- model = load_trained_model()
47
- tokenizer = load_tokenizer()
48
-
49
- # Input Section
50
- user_input = st.text_area("Enter a prompt to test:", "")
51
- if st.button("Detect"):
52
- if user_input:
53
- label, confidence = detect_prompt(user_input, model, tokenizer, MAX_LENGTH)
54
- st.write(f"**Predicted Class:** {label}")
55
- st.write(f"**Confidence Score:** {confidence:.2f}%")
56
- else:
57
- st.warning("Please enter a prompt to test.")
58
-
59
- import os
60
-
61
- if st.button("Train Model"):
62
- os.system("python train_model.py")
63
- st.success("Model training complete. Saved as deep_learning_model.h5")
64
-
65
- if not os.path.exists("deep_learning_model.h5"):
66
- st.info("Training the model for the first time...")
67
- os.system("python train_model.py")
68
- st.success("Model trained successfully and saved as deep_learning_model.h5")