Spaces:
Sleeping
Sleeping
test locally
#1
by
ElPremOoO
- opened
codebert_readability_scorer.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e0e0b83b0dc00e03dfc65c24acaf4b242bf97315f56247c4f6e6bc5ec9f0a50e
|
3 |
-
size 498672601
|
|
|
|
|
|
|
|
main.py
CHANGED
@@ -1,66 +1,69 @@
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
import torch
|
3 |
-
from transformers import RobertaTokenizer
|
4 |
import os
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
app = Flask(__name__)
|
7 |
|
8 |
-
# Load model and tokenizer
|
9 |
-
|
10 |
-
|
11 |
-
checkpoint = torch.load("codebert_readability_scorer.pth", map_location=torch.device('cpu'))
|
12 |
-
config = RobertaConfig.from_dict(checkpoint['config'])
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
return model
|
19 |
|
20 |
-
# Load components
|
21 |
-
try:
|
22 |
-
tokenizer = RobertaTokenizer.from_pretrained("./tokenizer_readability")
|
23 |
-
model = load_model()
|
24 |
-
print("Model and tokenizer loaded successfully!")
|
25 |
-
except Exception as e:
|
26 |
-
print(f"Error loading model: {str(e)}")
|
27 |
|
28 |
@app.route("/")
|
29 |
def home():
|
30 |
return request.url
|
31 |
|
32 |
-
|
|
|
|
|
33 |
def predict():
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
|
40 |
-
|
|
|
|
|
|
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
code,
|
45 |
-
truncation=True,
|
46 |
-
padding='max_length',
|
47 |
-
max_length=512,
|
48 |
-
return_tensors='pt'
|
49 |
-
)
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
except Exception as e:
|
63 |
-
return jsonify({"error": str(e)}), 500
|
64 |
|
|
|
|
|
65 |
if __name__ == "__main__":
|
66 |
-
|
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
import torch
|
3 |
+
from transformers import RobertaTokenizer
|
4 |
import os
|
5 |
+
from transformers import RobertaForSequenceClassification
|
6 |
+
import torch.serialization
|
7 |
+
import torch
|
8 |
+
from transformers import RobertaTokenizer, RobertaForSequenceClassification, Trainer, TrainingArguments
|
9 |
+
from torch.utils.data import Dataset
|
10 |
+
import pandas as pd
|
11 |
+
from sklearn.model_selection import train_test_split
|
12 |
+
import numpy as np
|
13 |
+
# Initialize Flask app
|
14 |
app = Flask(__name__)
|
15 |
|
16 |
+
# Load the trained model and tokenizer
|
17 |
+
tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base")
|
18 |
+
torch.serialization.add_safe_globals([RobertaForSequenceClassification])
|
|
|
|
|
19 |
|
20 |
+
model = torch.load("model.pth", map_location=torch.device('cpu'), weights_only=False) # Load the trained model
|
21 |
+
|
22 |
+
# Ensure the model is in evaluation mode
|
23 |
+
model.eval()
|
|
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
@app.route("/")
|
27 |
def home():
|
28 |
return request.url
|
29 |
|
30 |
+
|
31 |
+
# @app.route("/predict", methods=["POST"])
|
32 |
+
@app.route("/predict")
|
33 |
def predict():
|
34 |
+
print("Received code:", request.get_json()["code"])
|
35 |
+
code = request.get_json()["code"]
|
36 |
+
# Load saved weights and config
|
37 |
+
checkpoint = torch.load("codebert_vulnerability_scorer.pth")
|
38 |
+
config = RobertaConfig.from_dict(checkpoint['config'])
|
39 |
|
40 |
+
# Rebuild the model with correct architecture
|
41 |
+
model = RobertaForSequenceClassification(config)
|
42 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
43 |
+
model.eval()
|
44 |
|
45 |
+
# Load tokenizer
|
46 |
+
tokenizer = RobertaTokenizer.from_pretrained('./tokenizer')
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
# Prepare input
|
49 |
+
inputs = tokenizer(
|
50 |
+
code,
|
51 |
+
truncation=True,
|
52 |
+
padding='max_length',
|
53 |
+
max_length=512,
|
54 |
+
return_tensors='pt'
|
55 |
+
)
|
56 |
|
57 |
+
# Make prediction
|
58 |
+
with torch.no_grad():
|
59 |
+
outputs = model(**inputs)
|
60 |
|
61 |
+
score = torch.sigmoid(outputs.logits).item()
|
62 |
+
return score
|
63 |
+
|
64 |
|
|
|
|
|
65 |
|
66 |
+
|
67 |
+
# Run the Flask app
|
68 |
if __name__ == "__main__":
|
69 |
+
app.run(host="0.0.0.0", port=7860)
|
tokenizer_readability/merges.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_readability/special_tokens_map.json
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"bos_token": {
|
3 |
-
"content": "<s>",
|
4 |
-
"lstrip": false,
|
5 |
-
"normalized": true,
|
6 |
-
"rstrip": false,
|
7 |
-
"single_word": false
|
8 |
-
},
|
9 |
-
"cls_token": {
|
10 |
-
"content": "<s>",
|
11 |
-
"lstrip": false,
|
12 |
-
"normalized": true,
|
13 |
-
"rstrip": false,
|
14 |
-
"single_word": false
|
15 |
-
},
|
16 |
-
"eos_token": {
|
17 |
-
"content": "</s>",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": true,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
},
|
23 |
-
"mask_token": {
|
24 |
-
"content": "<mask>",
|
25 |
-
"lstrip": true,
|
26 |
-
"normalized": false,
|
27 |
-
"rstrip": false,
|
28 |
-
"single_word": false
|
29 |
-
},
|
30 |
-
"pad_token": {
|
31 |
-
"content": "<pad>",
|
32 |
-
"lstrip": false,
|
33 |
-
"normalized": true,
|
34 |
-
"rstrip": false,
|
35 |
-
"single_word": false
|
36 |
-
},
|
37 |
-
"sep_token": {
|
38 |
-
"content": "</s>",
|
39 |
-
"lstrip": false,
|
40 |
-
"normalized": true,
|
41 |
-
"rstrip": false,
|
42 |
-
"single_word": false
|
43 |
-
},
|
44 |
-
"unk_token": {
|
45 |
-
"content": "<unk>",
|
46 |
-
"lstrip": false,
|
47 |
-
"normalized": true,
|
48 |
-
"rstrip": false,
|
49 |
-
"single_word": false
|
50 |
-
}
|
51 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer_readability/tokenizer.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_readability/tokenizer_config.json
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"add_prefix_space": false,
|
3 |
-
"added_tokens_decoder": {
|
4 |
-
"0": {
|
5 |
-
"content": "<s>",
|
6 |
-
"lstrip": false,
|
7 |
-
"normalized": true,
|
8 |
-
"rstrip": false,
|
9 |
-
"single_word": false,
|
10 |
-
"special": true
|
11 |
-
},
|
12 |
-
"1": {
|
13 |
-
"content": "<pad>",
|
14 |
-
"lstrip": false,
|
15 |
-
"normalized": true,
|
16 |
-
"rstrip": false,
|
17 |
-
"single_word": false,
|
18 |
-
"special": true
|
19 |
-
},
|
20 |
-
"2": {
|
21 |
-
"content": "</s>",
|
22 |
-
"lstrip": false,
|
23 |
-
"normalized": true,
|
24 |
-
"rstrip": false,
|
25 |
-
"single_word": false,
|
26 |
-
"special": true
|
27 |
-
},
|
28 |
-
"3": {
|
29 |
-
"content": "<unk>",
|
30 |
-
"lstrip": false,
|
31 |
-
"normalized": true,
|
32 |
-
"rstrip": false,
|
33 |
-
"single_word": false,
|
34 |
-
"special": true
|
35 |
-
},
|
36 |
-
"50264": {
|
37 |
-
"content": "<mask>",
|
38 |
-
"lstrip": true,
|
39 |
-
"normalized": false,
|
40 |
-
"rstrip": false,
|
41 |
-
"single_word": false,
|
42 |
-
"special": true
|
43 |
-
}
|
44 |
-
},
|
45 |
-
"bos_token": "<s>",
|
46 |
-
"clean_up_tokenization_spaces": false,
|
47 |
-
"cls_token": "<s>",
|
48 |
-
"eos_token": "</s>",
|
49 |
-
"errors": "replace",
|
50 |
-
"extra_special_tokens": {},
|
51 |
-
"mask_token": "<mask>",
|
52 |
-
"model_max_length": 512,
|
53 |
-
"pad_token": "<pad>",
|
54 |
-
"sep_token": "</s>",
|
55 |
-
"tokenizer_class": "RobertaTokenizer",
|
56 |
-
"trim_offsets": true,
|
57 |
-
"unk_token": "<unk>"
|
58 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer_readability/vocab.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|