Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,74 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
def greet (name):
|
4 |
-
return f"Hello, {name}!"
|
5 |
-
iface = gr.Interface(fn=greet, inputs="text",outputs="text")
|
6 |
iface.launch()
|
|
|
1 |
+
# import torch
|
2 |
+
# device = "cuda" if torch.cuda.is_available() else "cpu"
|
3 |
+
# print(f"Using device: {device}")
|
4 |
+
|
5 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
6 |
+
|
7 |
+
model_name = "rajistics/informal_formal_style_transfer"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
10 |
+
import pandas as pd
|
11 |
+
|
12 |
+
df = pd.read_csv("hf://datasets/thesherrycode/gen-z-slangs-translation/gen_z_slangs_translation.csv")
|
13 |
+
|
14 |
+
# Rename the actual columns
|
15 |
+
df = df[["Gen-Z Slang", "Plain English"]].dropna().drop_duplicates()
|
16 |
+
df.columns = ["slang", "formal"]
|
17 |
+
df.head()
|
18 |
+
|
19 |
+
gradio_examples = [["[Gen-Z Example] " + row["slang"]] for _, row in df.sample(3, random_state=1).iterrows()]
|
20 |
+
gradio_examples
|
21 |
+
|
22 |
+
examples = [
|
23 |
+
["hey, can u send me the stuff by tonight?"],
|
24 |
+
["yo sorry i missed the call, was busy"],
|
25 |
+
["lemme know if ur free tmrw to chat abt the thing"],
|
26 |
+
["bro the file’s messed up, fix it asap pls"],
|
27 |
+
["i'm out rn, text u later"]
|
28 |
+
]
|
29 |
+
|
30 |
+
# Clean the model output
|
31 |
+
def clean_output(output: str):
|
32 |
+
return re.sub(r"(?i)make this sentence more formal", "", output).strip()
|
33 |
+
|
34 |
+
# Main function
|
35 |
+
def make_formal(text):
|
36 |
+
if not text.strip():
|
37 |
+
return "⚠️ Please enter some text."
|
38 |
+
prompt = "[Casual] " + text.strip() + " [Formal]"
|
39 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(device)
|
40 |
+
outputs = model.generate(**inputs, max_new_tokens=100)
|
41 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
42 |
+
return clean_output(result)
|
43 |
+
|
44 |
+
iface = gr.Interface(
|
45 |
+
fn=make_formal,
|
46 |
+
inputs=gr.Textbox(
|
47 |
+
lines=3,
|
48 |
+
label="🗣️ Your Slang / Casual Text",
|
49 |
+
placeholder="e.g., yo can u help me out real quick?"
|
50 |
+
),
|
51 |
+
outputs=gr.Textbox(
|
52 |
+
label="📄 Formal & Polished Version",
|
53 |
+
lines=4,
|
54 |
+
interactive=True
|
55 |
+
),
|
56 |
+
title="💬 Text Polisher: From Slang to Formal",
|
57 |
+
description=(
|
58 |
+
"Transform casual, Gen-Z slang, or unpolished English into clear, professional language. 🧠✨\n\n"
|
59 |
+
"This demo uses a text generation model to rewrite input sentences with improved formality — great for school, work, or writing more professionally.\n\n"
|
60 |
+
"✍️ The output is editable — feel free to tweak before using/copying!\n\n"
|
61 |
+
),
|
62 |
+
article=(
|
63 |
+
"**Project by Jonathan Friedman** \n"
|
64 |
+
"📌 **Task:** Text Generation (Formality Transfer using Sequence-to-Sequence) \n"
|
65 |
+
"🧠 **Model:** rajistics/informal_formal_style_transfer \n"
|
66 |
+
"📚 **Dataset Used:** thesherrycode/gen-z-slangs-translation (used to generate real-world Gen-Z slang examples) \n"
|
67 |
+
"🛠️ **Tech Stack:** 🤗 Transformers, Hugging Face Datasets, Gradio \n"
|
68 |
+
),
|
69 |
+
examples=examples + gradio_examples,
|
70 |
+
theme="soft"
|
71 |
+
)
|
72 |
+
|
73 |
|
|
|
|
|
|
|
74 |
iface.launch()
|