Spaces:
Sleeping
Sleeping
Create model.h5
Browse files
model.h5
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Example model training script
|
2 |
+
from tensorflow.keras.models import Sequential
|
3 |
+
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
|
4 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
5 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
6 |
+
import numpy as np
|
7 |
+
import pickle
|
8 |
+
|
9 |
+
# Sample dataset
|
10 |
+
texts = ["This is valid", "This is malicious", "Valid text", "Malicious text"]
|
11 |
+
labels = [0, 1, 0, 1] # 0: Valid, 1: Malicious
|
12 |
+
|
13 |
+
# Tokenization
|
14 |
+
tokenizer = Tokenizer(num_words=1000)
|
15 |
+
tokenizer.fit_on_texts(texts)
|
16 |
+
sequences = tokenizer.texts_to_sequences(texts)
|
17 |
+
padded_sequences = pad_sequences(sequences, maxlen=50)
|
18 |
+
|
19 |
+
# Save the tokenizer
|
20 |
+
with open("tokenizer.pkl", "wb") as f:
|
21 |
+
pickle.dump(tokenizer, f)
|
22 |
+
|
23 |
+
# Model architecture
|
24 |
+
model = Sequential([
|
25 |
+
Embedding(input_dim=1000, output_dim=64, input_length=50),
|
26 |
+
LSTM(64, return_sequences=False),
|
27 |
+
Dropout(0.5),
|
28 |
+
Dense(1, activation="sigmoid")
|
29 |
+
])
|
30 |
+
|
31 |
+
# Compile and train the model
|
32 |
+
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
|
33 |
+
model.fit(padded_sequences, np.array(labels), epochs=10)
|
34 |
+
|
35 |
+
# Save the model
|
36 |
+
model.save("model.h5")
|