Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,22 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
3 |
import gradio as gr
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
8 |
|
9 |
-
def
|
10 |
-
|
11 |
-
|
12 |
-
input_ids = tokenizer.encode(input_text, return_tensors='pt', add_special_tokens=True)
|
13 |
-
|
14 |
-
input_ids = input_ids[:, :1024]
|
15 |
-
|
16 |
-
output = model.generate(input_ids, max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id, attention_mask=input_ids)
|
17 |
-
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
18 |
|
19 |
-
|
20 |
-
|
|
|
21 |
|
22 |
-
|
23 |
-
ax.bar(["Real", "Fake"], [real_confidence, fake_confidence], color=['blue', 'red'])
|
24 |
-
plt.ylim(0, 1)
|
25 |
-
plt.xticks(rotation=45)
|
26 |
-
plt.title("Prediction Confidence")
|
27 |
-
|
28 |
-
return fig
|
29 |
-
|
30 |
-
input_text = gr.Textbox(lines=7, label="Paste the news article here", placeholder="Example: Scientists have discovered a new cure for cancer.")
|
31 |
-
output_graph = gr.Image(label="Prediction Confidence")
|
32 |
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
|
4 |
+
# Load the GPT-2 pipeline for text generation
|
5 |
+
classifier = pipeline("text-classification", model="gpt2")
|
6 |
|
7 |
+
def analyze_text(text):
|
8 |
+
# Use the GPT-2 classifier to predict if the text is fake or true
|
9 |
+
result = classifier(text)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Extract the label and confidence score
|
12 |
+
label = result['label']
|
13 |
+
score = result['score']
|
14 |
|
15 |
+
return {"Result": label, "Confidence (%)": score * 100}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Gradio interface with soft theme
|
18 |
+
gr.Interface(analyze_text,
|
19 |
+
"text",
|
20 |
+
"text",
|
21 |
+
theme="soft"
|
22 |
+
).launch()
|