Spaces:
Sleeping
Sleeping
import torch | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
import gradio as gr | |
import matplotlib.pyplot as plt | |
model = GPT2LMHeadModel.from_pretrained("gpt2") | |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
def predict_fake_news(text): | |
input_ids = tokenizer.encode(text, return_tensors='pt') | |
output = model.generate(input_ids, max_length=50, num_return_sequences=1) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
fake_confidence = 1 if "fake" in generated_text.lower() else 0 | |
real_confidence = 1 - fake_confidence | |
fig, ax = plt.subplots() | |
ax.bar(["Real", "Fake"], [real_confidence, fake_confidence], color=['blue', 'red']) | |
plt.ylim(0, 1) | |
plt.xticks(rotation=45) | |
plt.title("Prediction Confidence") | |
return fig | |
input_text = gr.Textbox(lines=7, label="Paste the news article here", placeholder="Example: Scientists have discovered a new cure for cancer.") | |
output_graph = gr.Image(label="Prediction Confidence") | |
examples = [["New study shows coffee may prevent heart disease.", "Real"], | |
["Aliens have landed in New York City!", "Fake"], | |
["Global warming effects becoming more severe.", "Real"]] | |
gr.Interface(predict_fake_news, inputs=input_text, outputs=output_graph, title="Real/Fake News Detector", theme="soft", examples=examples).launch() | |