GautamGaur commited on
Commit
f0496d0
·
verified ·
1 Parent(s): f9bd353

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -9
app.py CHANGED
@@ -16,28 +16,48 @@ from pydantic import BaseModel
16
  import torch
17
  from transformers import RobertaTokenizer, RobertaForSequenceClassification
18
 
 
 
 
 
 
 
 
19
  app = FastAPI()
20
 
21
  # Load the tokenizer
22
  tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
23
 
24
- # Load the model
25
- model_path="model_ai_detection"
26
- model = RobertaForSequenceClassification.from_pretrained(model_path)
27
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
- model.to(device)
29
- model.eval()
 
 
 
 
 
 
 
30
 
31
  class TextData(BaseModel):
32
  text: str
33
 
 
 
 
34
  @app.post("/predict")
35
  async def predict(data: TextData):
 
 
 
36
  inputs = tokenizer(data.text, return_tensors="pt", padding=True, truncation=True)
37
- inputs = {k: v.to(device) for k, v in inputs.items()}
38
 
39
  with torch.no_grad():
40
- outputs = model(**inputs)
41
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
42
  ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated
43
 
@@ -48,7 +68,26 @@ async def predict(data: TextData):
48
  "message": message
49
  }
50
 
51
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  @app.get("/")
54
  async def read_root():
 
16
  import torch
17
  from transformers import RobertaTokenizer, RobertaForSequenceClassification
18
 
19
+ from fastapi import FastAPI, HTTPException
20
+ from pydantic import BaseModel
21
+ import torch
22
+ from transformers import RobertaTokenizer, RobertaForSequenceClassification
23
+ from datetime import datetime
24
+ import logging
25
+
26
  app = FastAPI()
27
 
28
  # Load the tokenizer
29
  tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
30
 
31
+ # Load the first model
32
+ model_path1 = "model_ai_detection"
33
+ model1 = RobertaForSequenceClassification.from_pretrained(model_path1)
34
+ device1 = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
+ model1.to(device1)
36
+ model1.eval()
37
+
38
+ # Load the second model
39
+ model_path2 = "best-ai-model" # Change this to your second model's path
40
+ model2 = RobertaForSequenceClassification.from_pretrained(model_path2)
41
+ device2 = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+ model2.to(device2)
43
+ model2.eval()
44
 
45
  class TextData(BaseModel):
46
  text: str
47
 
48
+ # Set up logging
49
+ logging.basicConfig(filename="logs_value.log", level=logging.INFO)
50
+
51
  @app.post("/predict")
52
  async def predict(data: TextData):
53
+ timestamp = datetime.now().isoformat()
54
+ log_entry = f"{timestamp} - {data.text}"
55
+ logging.info(log_entry)
56
  inputs = tokenizer(data.text, return_tensors="pt", padding=True, truncation=True)
57
+ inputs = {k: v.to(device1) for k, v in inputs.items()}
58
 
59
  with torch.no_grad():
60
+ outputs = model1(**inputs)
61
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
62
  ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated
63
 
 
68
  "message": message
69
  }
70
 
71
+ @app.post("/predict_v2")
72
+ async def predict_v2(data: TextData):
73
+ timestamp = datetime.now().isoformat()
74
+ log_entry = f"{timestamp} - {data.text}"
75
+ logging.info(log_entry)
76
+
77
+ inputs = tokenizer(data.text, return_tensors="pt", padding=True, truncation=True)
78
+ inputs = {k: v.to(device2) for k, v in inputs.items()}
79
+
80
+ with torch.no_grad():
81
+ outputs = model2(**inputs)
82
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
83
+ ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated
84
+
85
+ message = "The text is likely generated by AI." if ai_prob > 50 else "The text is likely generated by a human."
86
+
87
+ return {
88
+ "score": ai_prob,
89
+ "message": message
90
+ }
91
 
92
  @app.get("/")
93
  async def read_root():