Spaces:
Running
Running
un-gcp-ize
Browse files- .dockerignore +8 -0
- .gitignore +2 -1
- COPY_THEN_EDIT.env +3 -0
- core-model-prediction/Dockerfile β Dockerfile +17 -20
- Dockerfile.d/gemma_check.py +26 -0
- NRC-Emotion-Lexicon.csv +0 -0
- README.md +51 -2
- cloudbuild.yaml +0 -28
- core-model-prediction/device_manager.py β device_manager.py +1 -0
- download-huggingface-model.py +87 -0
- core-model-prediction/gemma2b_dependencies.py β gemma2b_dependencies.py +3 -3
- core-model-prediction/hypothesis.py β hypothesis.py +1 -2
- core-model-prediction/main_model.py β main_model.py +3 -1
- {core-model-prediction/models β models}/albert_weights.pth +0 -0
- {core-model-prediction/models β models}/secondary_weights.joblib +0 -0
- core-model-prediction/prediction.py β prediction.py +0 -0
- public-prediction/get_gpt_answer.py +0 -17
- public-prediction/kafka_consumer.py +0 -91
- public-prediction/main.py +0 -6
- public-prediction/predict_custom_model.py +0 -42
- public-prediction/requirements.txt +0 -7
- core-model-prediction/requirements.txt β requirements.txt +4 -2
- {core-model-prediction/scalers β scalers}/scaler-normalized-text-length.joblib +0 -0
- {core-model-prediction/scalers β scalers}/scaler-not-normalized.joblib +0 -0
- {core-model-prediction/scalers β scalers}/secondary_scaler.joblib +0 -0
- core-model-prediction/secondary_model.py β secondary_model.py +0 -0
- core-model-prediction/secondary_model_dependencies.py β secondary_model_dependencies.py +0 -0
.dockerignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Dockerfile
|
2 |
+
.dockerignore
|
3 |
+
.gitignore
|
4 |
+
.hf_home
|
5 |
+
.env
|
6 |
+
venv
|
7 |
+
COPY_THEN_EDIT.env
|
8 |
+
__pycache__
|
.gitignore
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
__pycache__
|
2 |
.env
|
3 |
*.json
|
4 |
-
*.ipynb
|
|
|
|
1 |
__pycache__
|
2 |
.env
|
3 |
*.json
|
4 |
+
*.ipynb
|
5 |
+
google/gemma-2b
|
COPY_THEN_EDIT.env
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# For Huggingface Spaces, define in `Settings -> Variables and secrets` instead
|
2 |
+
|
3 |
+
HUGGINGFACE_TOKEN= # Required
|
core-model-prediction/Dockerfile β Dockerfile
RENAMED
@@ -7,31 +7,28 @@ WORKDIR /app
|
|
7 |
# Copy the current directory contents into the container at /app
|
8 |
COPY . /app
|
9 |
|
|
|
|
|
|
|
|
|
10 |
# Install any needed packages specified in requirements.txt
|
11 |
-
RUN
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Download NLTK data
|
15 |
RUN python -m nltk.downloader punkt wordnet averaged_perceptron_tagger
|
|
|
16 |
|
17 |
# Unzip wordnet
|
18 |
-
RUN
|
19 |
-
|
20 |
-
# HF Token args
|
21 |
-
ARG HF_TOKEN
|
22 |
-
|
23 |
-
# Download HuggingFace model
|
24 |
-
RUN python -c "from transformers import AutoTokenizer, AutoModelForCausalLM; \
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2b', token='$HF_TOKEN'); \
|
26 |
-
model = AutoModelForCausalLM.from_pretrained('google/gemma-2b', token='$HF_TOKEN'); \
|
27 |
-
tokenizer.save_pretrained('/app/gemma-2b'); \
|
28 |
-
model.save_pretrained('/app/gemma-2b')"
|
29 |
-
|
30 |
-
# Model env
|
31 |
-
ENV MODEL_DIR=gemma-2b
|
32 |
-
|
33 |
-
# Make port 8080 available to the world outside this container
|
34 |
-
EXPOSE 8080
|
35 |
|
36 |
# Run uvicorn
|
37 |
-
|
|
|
|
7 |
# Copy the current directory contents into the container at /app
|
8 |
COPY . /app
|
9 |
|
10 |
+
# For Huggingface
|
11 |
+
ENV HF_HOME=/app/.hf_home
|
12 |
+
RUN mkdir .hf_home && chmod -R 777 .hf_home
|
13 |
+
|
14 |
# Install any needed packages specified in requirements.txt
|
15 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
16 |
+
|
17 |
+
# Download Gemma if needed
|
18 |
+
# if building locally, use the flag: --secret id=dotenv,src=.env
|
19 |
+
# if using HF Spaces, define HUGGINGFACE_TOKEN in Settings -> Variables and secrets
|
20 |
+
RUN --mount=type=secret,id=dotenv \
|
21 |
+
--mount=type=secret,id=HUGGINGFACE_TOKEN \
|
22 |
+
python Dockerfile.d/gemma_check.py
|
23 |
|
24 |
# Download NLTK data
|
25 |
RUN python -m nltk.downloader punkt wordnet averaged_perceptron_tagger
|
26 |
+
RUN mv /root/nltk_data /nltk_data
|
27 |
|
28 |
# Unzip wordnet
|
29 |
+
RUN apt-get update && apt-get install -y unzip
|
30 |
+
RUN unzip /nltk_data/corpora/wordnet.zip -d /nltk_data/corpora/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
# Run uvicorn
|
33 |
+
EXPOSE 7860
|
34 |
+
CMD ["uvicorn", "prediction:app", "--host", "0.0.0.0", "--port", "7860"]
|
Dockerfile.d/gemma_check.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, subprocess
|
2 |
+
|
3 |
+
# Check if gemma-2b already exists
|
4 |
+
gemma_dir = os.path.join(os.getcwd(), ".hf_home/google/gemma-2b")
|
5 |
+
print("Checking", gemma_dir, "...")
|
6 |
+
if os.path.isdir(gemma_dir):
|
7 |
+
print("... already exists")
|
8 |
+
exit()
|
9 |
+
|
10 |
+
# Check if in a Hunggingface Space
|
11 |
+
if os.path.exists("/run/secrets/HUGGINGFACE_TOKEN"):
|
12 |
+
print("... prefetch not needed")
|
13 |
+
exit()
|
14 |
+
|
15 |
+
# Check if /run/secrets/dotenv file exists
|
16 |
+
if not os.path.isfile("/run/secrets/dotenv"):
|
17 |
+
print("... can't prefetch, can't find --secret dotenv file")
|
18 |
+
exit(1)
|
19 |
+
|
20 |
+
# Read the dotenv file and export the variables
|
21 |
+
with open("/run/secrets/dotenv") as dotenv_file:
|
22 |
+
for line in dotenv_file:
|
23 |
+
if '=' in line:
|
24 |
+
key, value = line.split("=", 1)
|
25 |
+
os.environ[key.strip()] = value.split("#", 1)[0].strip()
|
26 |
+
subprocess.run(["python", "download-huggingface-model.py", "google/gemma-2b"])
|
NRC-Emotion-Lexicon.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -1,5 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Interview AI Detector
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
## Overview
|
4 |
|
5 |
Interview AI Detector is a machine learning model designed to distinguish between human and AI-generated responses during interviews. The system is composed of two models:
|
@@ -29,7 +80,6 @@ The model is deployed on Google Vertex AI, with integration managed by a Kafka c
|
|
29 |
- **Output**: Final prediction indicating whether the response is human or AI-generated
|
30 |
|
31 |
## Deployment
|
32 |
-
|
33 |
- **Model Deployment**: Vertex AI
|
34 |
- **Kafka Consumer Deployment**: Compute Engine
|
35 |
- **API Framework**: FastAPI
|
@@ -66,7 +116,6 @@ The model is deployed on Google Vertex AI, with integration managed by a Kafka c
|
|
66 |
## Limitations
|
67 |
|
68 |
- The model is not designed for retraining. The current implementation focuses solely on deployment and prediction.
|
69 |
-
- The repository is meant for deployment purposes only and does not support local installation for development.
|
70 |
|
71 |
## Author
|
72 |
Yakobus Iryanto Prasethio
|
|
|
1 |
+
---
|
2 |
+
title: Interview AI Detector
|
3 |
+
emoji: π§
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: green
|
6 |
+
sdk: docker
|
7 |
+
pinned: false
|
8 |
+
---
|
9 |
+
|
10 |
# Interview AI Detector
|
11 |
|
12 |
+
## Playground
|
13 |
+
https://panduwana-interview-ai-detector.hf.space/docs
|
14 |
+
|
15 |
+
## Dev setup
|
16 |
+
Requirements:
|
17 |
+
- Python 3.10.x (may have problems with higher versions)
|
18 |
+
- Accept the agreement for https://huggingface.co/google/gemma-2b
|
19 |
+
|
20 |
+
```
|
21 |
+
git clone [email protected]:rekrutmen_plus/interview-ai-detector.git
|
22 |
+
cd interview-ai-detector
|
23 |
+
```
|
24 |
+
|
25 |
+
### With Docker
|
26 |
+
<details>
|
27 |
+
<summary>Local build/run</summary>
|
28 |
+
```
|
29 |
+
cp COPY_THEN_EDIT.env .env # then FILL IT OUT
|
30 |
+
pip install python-dotenv transformers huggingface_hub
|
31 |
+
python download-huggingface-model.py google/gemma-2b
|
32 |
+
docker build -t interview-ai-detector --secret id=dotenv,src=.env .
|
33 |
+
PORT=8080 docker run -p $PORT:7860 interview-ai-detector
|
34 |
+
```
|
35 |
+
Then open: http://localhost:8080/docs/
|
36 |
+
</details>
|
37 |
+
<details>
|
38 |
+
<summary>Using Huggingface Spaces</summary>
|
39 |
+
- define a secret: HUGGINGFACE_TOKEN in the space's settings
|
40 |
+
- git push --force
|
41 |
+
</details>
|
42 |
+
|
43 |
+
### Without Docker
|
44 |
+
```
|
45 |
+
python --version # ensure 3.10.x
|
46 |
+
pip install -r requirements.txt
|
47 |
+
python huggingface-download-model.py google/gemma-2b
|
48 |
+
python -m nltk.downloader punkt wordnet averaged_perceptron_tagger
|
49 |
+
unzip ~/nltk_data/corpora/wordnet.zip -d ~/nltk_data/corpora/
|
50 |
+
PORT=8080 uvicorn prediction:app --host 0.0.0.0 --port $PORT
|
51 |
+
```
|
52 |
+
Then open: http://localhost:8080/docs/
|
53 |
+
|
54 |
## Overview
|
55 |
|
56 |
Interview AI Detector is a machine learning model designed to distinguish between human and AI-generated responses during interviews. The system is composed of two models:
|
|
|
80 |
- **Output**: Final prediction indicating whether the response is human or AI-generated
|
81 |
|
82 |
## Deployment
|
|
|
83 |
- **Model Deployment**: Vertex AI
|
84 |
- **Kafka Consumer Deployment**: Compute Engine
|
85 |
- **API Framework**: FastAPI
|
|
|
116 |
## Limitations
|
117 |
|
118 |
- The model is not designed for retraining. The current implementation focuses solely on deployment and prediction.
|
|
|
119 |
|
120 |
## Author
|
121 |
Yakobus Iryanto Prasethio
|
cloudbuild.yaml
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
steps:
|
2 |
-
- name: "gcr.io/cloud-builders/docker"
|
3 |
-
dir: "core-model-prediction"
|
4 |
-
entrypoint: "bash"
|
5 |
-
secretEnv: ["_HF_TOKEN"]
|
6 |
-
args:
|
7 |
-
- -c
|
8 |
-
- |
|
9 |
-
docker build -t us-central1-docker.pkg.dev/${PROJECT_ID}/interview-ai-detector/model-prediction:latest --build-arg HF_TOKEN=$$_HF_TOKEN .
|
10 |
-
|
11 |
-
- name: "gcr.io/cloud-builders/docker"
|
12 |
-
args:
|
13 |
-
[
|
14 |
-
"push",
|
15 |
-
"us-central1-docker.pkg.dev/${PROJECT_ID}/interview-ai-detector/model-prediction:latest",
|
16 |
-
]
|
17 |
-
|
18 |
-
options:
|
19 |
-
pool:
|
20 |
-
name: 'projects/${PROJECT_ID}/locations/us-central1/workerPools/ai-detector-builder'
|
21 |
-
|
22 |
-
images:
|
23 |
-
- "us-central1-docker.pkg.dev/${PROJECT_ID}/interview-ai-detector/model-prediction:latest"
|
24 |
-
|
25 |
-
availableSecrets:
|
26 |
-
secretManager:
|
27 |
-
- versionName: "projects/${PROJECT_ID}/secrets/HF_TOKEN/versions/latest"
|
28 |
-
env: "_HF_TOKEN"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core-model-prediction/device_manager.py β device_manager.py
RENAMED
@@ -9,4 +9,5 @@ class DeviceManager:
|
|
9 |
cls._instance = super(DeviceManager, cls).__new__(cls)
|
10 |
cls._instance.device = torch.device(
|
11 |
"cuda" if torch.cuda.is_available() else "cpu")
|
|
|
12 |
return cls._instance.device
|
|
|
9 |
cls._instance = super(DeviceManager, cls).__new__(cls)
|
10 |
cls._instance.device = torch.device(
|
11 |
"cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
print("using cΜΆpΜΆuΜΆ cuda" if torch.cuda.is_available() else "using cΜΆuΜΆdΜΆaΜΆ cpu")
|
13 |
return cls._instance.device
|
download-huggingface-model.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import login, model_info, whoami
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from datetime import datetime
|
5 |
+
import json, argparse, sys, os
|
6 |
+
|
7 |
+
def is_interactive():
|
8 |
+
if sys.stdin.isatty():
|
9 |
+
return True
|
10 |
+
return hasattr(sys, 'ps1')
|
11 |
+
|
12 |
+
|
13 |
+
def try_interactive_login():
|
14 |
+
try:
|
15 |
+
login()
|
16 |
+
except KeyboardInterrupt:
|
17 |
+
print("^C")
|
18 |
+
exit()
|
19 |
+
|
20 |
+
|
21 |
+
def dumps(x):
|
22 |
+
def jsonable(obj):
|
23 |
+
d = {}
|
24 |
+
try:
|
25 |
+
json.dumps(obj)
|
26 |
+
except:
|
27 |
+
if isinstance(obj, datetime):
|
28 |
+
return obj.isoformat()
|
29 |
+
try:
|
30 |
+
d = vars(obj)
|
31 |
+
except:
|
32 |
+
return "..."
|
33 |
+
else:
|
34 |
+
return obj
|
35 |
+
for key, value in d.items():
|
36 |
+
d[key] = jsonable(value)
|
37 |
+
return d
|
38 |
+
return json.dumps(jsonable(x), indent=4, separators=(',', ': '))
|
39 |
+
|
40 |
+
parser = argparse.ArgumentParser()
|
41 |
+
parser.add_argument('model_id', type=str)
|
42 |
+
args = parser.parse_args()
|
43 |
+
|
44 |
+
model_id = args.model_id
|
45 |
+
path = os.path.join(os.getcwd(), ".hf_home", model_id)
|
46 |
+
print("Downloading to", path)
|
47 |
+
if os.path.exists(path):
|
48 |
+
print(f"{path} already exists, aborting. (To redownload, rm it first).")
|
49 |
+
exit()
|
50 |
+
|
51 |
+
try:
|
52 |
+
model_info(model_id)
|
53 |
+
except Exception as e:
|
54 |
+
print(e)
|
55 |
+
exit(1)
|
56 |
+
|
57 |
+
try:
|
58 |
+
user_info = whoami()
|
59 |
+
except Exception as e:
|
60 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
61 |
+
if not huggingface_token:
|
62 |
+
load_dotenv()
|
63 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
64 |
+
if huggingface_token:
|
65 |
+
print("Logging in with env HUGGINGFACE_TOKEN")
|
66 |
+
try:
|
67 |
+
login(huggingface_token)
|
68 |
+
except Exception as e:
|
69 |
+
print(e)
|
70 |
+
try_interactive_login()
|
71 |
+
elif is_interactive():
|
72 |
+
try_interactive_login()
|
73 |
+
else:
|
74 |
+
print("Missing env: HUGGINGFACE_TOKEN")
|
75 |
+
exit(1)
|
76 |
+
user_info = whoami()
|
77 |
+
print("Authenticated as:", dumps(user_info))
|
78 |
+
|
79 |
+
try:
|
80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
81 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
82 |
+
except OSError as e:
|
83 |
+
print(e)
|
84 |
+
exit(1)
|
85 |
+
|
86 |
+
tokenizer.save_pretrained(path)
|
87 |
+
model.save_pretrained(path)
|
core-model-prediction/gemma2b_dependencies.py β gemma2b_dependencies.py
RENAMED
@@ -13,9 +13,9 @@ class Gemma2BDependencies:
|
|
13 |
def __new__(cls):
|
14 |
if cls._instance is None:
|
15 |
cls._instance = super(Gemma2BDependencies, cls).__new__(cls)
|
16 |
-
|
17 |
-
cls._instance.tokenizer = AutoTokenizer.from_pretrained(
|
18 |
-
cls._instance.model = AutoModelForCausalLM.from_pretrained(
|
19 |
cls._instance.device = DeviceManager()
|
20 |
cls._instance.model.to(cls._instance.device)
|
21 |
return cls._instance
|
|
|
13 |
def __new__(cls):
|
14 |
if cls._instance is None:
|
15 |
cls._instance = super(Gemma2BDependencies, cls).__new__(cls)
|
16 |
+
hf_token = os.environ.get('HUGGINGFACE_TOKEN', None)
|
17 |
+
cls._instance.tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", token=hf_token)
|
18 |
+
cls._instance.model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=hf_token)
|
19 |
cls._instance.device = DeviceManager()
|
20 |
cls._instance.model.to(cls._instance.device)
|
21 |
return cls._instance
|
core-model-prediction/hypothesis.py β hypothesis.py
RENAMED
@@ -15,8 +15,7 @@ import zipfile
|
|
15 |
class BaseModelHypothesis:
|
16 |
def __init__(self):
|
17 |
self.analyzer = SentimentIntensityAnalyzer()
|
18 |
-
self.lexicon_df = pd.read_csv(
|
19 |
-
"https://storage.googleapis.com/interview-ai-detector/higher-accuracy-final-model/NRC-Emotion-Lexicon.csv")
|
20 |
self.emotion_lexicon = self.process_emotion_lexicon()
|
21 |
self.lemmatizer = nltk.stem.WordNetLemmatizer()
|
22 |
self.gemma2bdependencies = Gemma2BDependencies()
|
|
|
15 |
class BaseModelHypothesis:
|
16 |
def __init__(self):
|
17 |
self.analyzer = SentimentIntensityAnalyzer()
|
18 |
+
self.lexicon_df = pd.read_csv("NRC-Emotion-Lexicon.csv")
|
|
|
19 |
self.emotion_lexicon = self.process_emotion_lexicon()
|
20 |
self.lemmatizer = nltk.stem.WordNetLemmatizer()
|
21 |
self.gemma2bdependencies = Gemma2BDependencies()
|
core-model-prediction/main_model.py β main_model.py
RENAMED
@@ -3,6 +3,7 @@ from transformers import AlbertModel, AlbertTokenizerFast
|
|
3 |
import torch.nn as nn
|
4 |
import torch
|
5 |
import numpy as np
|
|
|
6 |
|
7 |
|
8 |
class AlbertSeparateTransformation(nn.Module):
|
@@ -68,7 +69,8 @@ class PredictMainModel:
|
|
68 |
|
69 |
self.model = AlbertSeparateTransformation(
|
70 |
self.albert_model).to(self.device)
|
71 |
-
|
|
|
72 |
|
73 |
def preprocess_input(self, text: str, additional_features: np.ndarray):
|
74 |
encoding = self.tokenizer.encode_plus(
|
|
|
3 |
import torch.nn as nn
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
+
import os
|
7 |
|
8 |
|
9 |
class AlbertSeparateTransformation(nn.Module):
|
|
|
69 |
|
70 |
self.model = AlbertSeparateTransformation(
|
71 |
self.albert_model).to(self.device)
|
72 |
+
|
73 |
+
self.model.load_state_dict(torch.load("models/albert_weights.pth", map_location=self.device))
|
74 |
|
75 |
def preprocess_input(self, text: str, additional_features: np.ndarray):
|
76 |
encoding = self.tokenizer.encode_plus(
|
{core-model-prediction/models β models}/albert_weights.pth
RENAMED
File without changes
|
{core-model-prediction/models β models}/secondary_weights.joblib
RENAMED
File without changes
|
core-model-prediction/prediction.py β prediction.py
RENAMED
File without changes
|
public-prediction/get_gpt_answer.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from langchain_openai import ChatOpenAI
|
2 |
-
from langchain_core.messages import HumanMessage, SystemMessage
|
3 |
-
|
4 |
-
|
5 |
-
class GetGPTAnswer:
|
6 |
-
def __init__(self):
|
7 |
-
self.llm_gpt4o = ChatOpenAI(model="gpt-4o")
|
8 |
-
|
9 |
-
def generate_gpt4o_answer(self, question: str):
|
10 |
-
messages = [
|
11 |
-
SystemMessage(
|
12 |
-
content="Please answer the following question based solely on your internal knowledge, without external references. Assume you are the human."),
|
13 |
-
HumanMessage(question)
|
14 |
-
]
|
15 |
-
|
16 |
-
gpt4_answer = self.llm_gpt4o.invoke(messages)
|
17 |
-
return gpt4_answer.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
public-prediction/kafka_consumer.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import requests
|
4 |
-
from kafka import KafkaConsumer
|
5 |
-
from get_gpt_answer import GetGPTAnswer
|
6 |
-
from typing import List
|
7 |
-
from concurrent.futures import ThreadPoolExecutor
|
8 |
-
from predict_custom_model import predict_custom_trained_model
|
9 |
-
from google.protobuf.json_format import MessageToDict
|
10 |
-
|
11 |
-
|
12 |
-
def get_gpt_responses(data: dict[str, any], gpt_helper: GetGPTAnswer):
|
13 |
-
data["gpt4o_answer"] = gpt_helper.generate_gpt4o_answer(data["question"])
|
14 |
-
return data
|
15 |
-
|
16 |
-
|
17 |
-
def process_batch(batch: List[dict[str, any]], batch_size: int, gpt_helper: GetGPTAnswer):
|
18 |
-
with ThreadPoolExecutor(max_workers=batch_size) as executor:
|
19 |
-
futures = [executor.submit(
|
20 |
-
get_gpt_responses, data, gpt_helper) for data in batch]
|
21 |
-
results = [future.result() for future in futures]
|
22 |
-
|
23 |
-
predictions = predict_custom_trained_model(
|
24 |
-
instances=results, project=os.environ.get("PROJECT_ID"), endpoint_id=os.environ.get("ENDPOINT_ID"))
|
25 |
-
|
26 |
-
results = []
|
27 |
-
for prediction in predictions:
|
28 |
-
result_dict = {}
|
29 |
-
for key, value in prediction._pb.items():
|
30 |
-
# Ensure that 'value' is a protobuf message
|
31 |
-
if hasattr(value, 'DESCRIPTOR'):
|
32 |
-
result_dict[key] = MessageToDict(value)
|
33 |
-
else:
|
34 |
-
print(f"Item {key} is not a convertible protobuf message.")
|
35 |
-
results.append(result_dict)
|
36 |
-
|
37 |
-
return results
|
38 |
-
|
39 |
-
|
40 |
-
def send_results_back(full_results: dict[str, any], job_application_id: str):
|
41 |
-
print(f"Sending results back with job_app_id {job_application_id}")
|
42 |
-
url = "https://ta-2-sistem-cerdas-be-vi2jkj4riq-et.a.run.app/api/anti-cheat/result"
|
43 |
-
headers = {
|
44 |
-
"Content-Type": "application/json",
|
45 |
-
"x-api-key": os.environ.get("X-API-KEY")
|
46 |
-
}
|
47 |
-
|
48 |
-
body = {
|
49 |
-
"job_application_id": job_application_id,
|
50 |
-
"evaluations": full_results
|
51 |
-
}
|
52 |
-
|
53 |
-
response = requests.patch(url, json=body, headers=headers)
|
54 |
-
print(f"Data sent with status code {response.status_code}")
|
55 |
-
|
56 |
-
|
57 |
-
def consume_messages():
|
58 |
-
consumer = KafkaConsumer(
|
59 |
-
"ai-detector",
|
60 |
-
bootstrap_servers=[os.environ.get("KAFKA_IP")],
|
61 |
-
auto_offset_reset='earliest',
|
62 |
-
client_id="ai-detector-1",
|
63 |
-
group_id="ai-detector",
|
64 |
-
api_version=(0, 10, 2)
|
65 |
-
)
|
66 |
-
|
67 |
-
print("Successfully connected to Kafka at", os.environ.get("KAFKA_IP"))
|
68 |
-
|
69 |
-
BATCH_SIZE = 5
|
70 |
-
gpt_helper = GetGPTAnswer()
|
71 |
-
|
72 |
-
for message in consumer:
|
73 |
-
try:
|
74 |
-
incoming_message = json.loads(json.loads(message.value.decode("utf-8")))
|
75 |
-
full_batch = incoming_message["data"]
|
76 |
-
except json.JSONDecodeError:
|
77 |
-
print("Failed to decode JSON from message:", message.value)
|
78 |
-
print("Continuing...")
|
79 |
-
continue
|
80 |
-
|
81 |
-
print("Parsing successful. Processing job_app_id {0}".format(
|
82 |
-
incoming_message['job_application_id']))
|
83 |
-
|
84 |
-
full_results = []
|
85 |
-
for i in range(0, len(full_batch), BATCH_SIZE):
|
86 |
-
print(f"Processing batch {i} to {i+BATCH_SIZE}")
|
87 |
-
batch = full_batch[i:i+BATCH_SIZE]
|
88 |
-
batch_results = process_batch(batch, BATCH_SIZE, gpt_helper)
|
89 |
-
full_results.extend(batch_results)
|
90 |
-
|
91 |
-
send_results_back(full_results, incoming_message["job_application_id"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
public-prediction/main.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from kafka_consumer import consume_messages
|
2 |
-
from dotenv import load_dotenv
|
3 |
-
|
4 |
-
if __name__ == "__main__":
|
5 |
-
load_dotenv()
|
6 |
-
consume_messages()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
public-prediction/predict_custom_model.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
from typing import Dict, List, Union
|
2 |
-
import os
|
3 |
-
from google.cloud import aiplatform
|
4 |
-
from google.protobuf import json_format
|
5 |
-
from google.protobuf.struct_pb2 import Value
|
6 |
-
from google.oauth2 import service_account
|
7 |
-
|
8 |
-
|
9 |
-
def predict_custom_trained_model(
|
10 |
-
project: str,
|
11 |
-
endpoint_id: str,
|
12 |
-
instances: Union[Dict, List[Dict]],
|
13 |
-
location: str = "us-central1",
|
14 |
-
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
|
15 |
-
):
|
16 |
-
"""
|
17 |
-
`instances` can be either single instance of type dict or a list
|
18 |
-
of instances.
|
19 |
-
"""
|
20 |
-
# The AI Platform services require regional API endpoints.
|
21 |
-
client_options = {"api_endpoint": api_endpoint}
|
22 |
-
|
23 |
-
# Initialize client that will be used to create and send requests.
|
24 |
-
# This client only needs to be created once, and can be reused for multiple requests.
|
25 |
-
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
|
26 |
-
# The format of each instance should conform to the deployed model's prediction input schema.
|
27 |
-
instances = instances if isinstance(instances, list) else [instances]
|
28 |
-
instances = [
|
29 |
-
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
|
30 |
-
]
|
31 |
-
parameters_dict = {}
|
32 |
-
parameters = json_format.ParseDict(parameters_dict, Value())
|
33 |
-
endpoint = client.endpoint_path(
|
34 |
-
project=project, location=location, endpoint=endpoint_id
|
35 |
-
)
|
36 |
-
response = client.predict(
|
37 |
-
endpoint=endpoint, instances=instances, parameters=parameters
|
38 |
-
)
|
39 |
-
# The predictions are a google.protobuf.Value representation of the model's predictions.
|
40 |
-
predictions = response.predictions
|
41 |
-
|
42 |
-
return predictions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
public-prediction/requirements.txt
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
kafka-python
|
2 |
-
langchain
|
3 |
-
openai
|
4 |
-
langchain-openai
|
5 |
-
python-dotenv
|
6 |
-
google-cloud-aiplatform
|
7 |
-
requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core-model-prediction/requirements.txt β requirements.txt
RENAMED
@@ -1,10 +1,12 @@
|
|
1 |
nltk==3.2.4
|
2 |
vaderSentiment==3.3.2
|
3 |
-
pandas
|
|
|
4 |
textstat==0.7.3
|
5 |
scikit-learn==1.2.2
|
6 |
transformers==4.38.2
|
7 |
sentence-transformers==2.7.0
|
8 |
fastapi
|
9 |
uvicorn
|
10 |
-
google-cloud-secret-manager
|
|
|
|
1 |
nltk==3.2.4
|
2 |
vaderSentiment==3.3.2
|
3 |
+
pandas>=2.2.0,<3.0.0
|
4 |
+
numpy==1.26.2
|
5 |
textstat==0.7.3
|
6 |
scikit-learn==1.2.2
|
7 |
transformers==4.38.2
|
8 |
sentence-transformers==2.7.0
|
9 |
fastapi
|
10 |
uvicorn
|
11 |
+
google-cloud-secret-manager
|
12 |
+
python-dotenv
|
{core-model-prediction/scalers β scalers}/scaler-normalized-text-length.joblib
RENAMED
File without changes
|
{core-model-prediction/scalers β scalers}/scaler-not-normalized.joblib
RENAMED
File without changes
|
{core-model-prediction/scalers β scalers}/secondary_scaler.joblib
RENAMED
File without changes
|
core-model-prediction/secondary_model.py β secondary_model.py
RENAMED
File without changes
|
core-model-prediction/secondary_model_dependencies.py β secondary_model_dependencies.py
RENAMED
File without changes
|