File size: 3,975 Bytes
ad32177 cef31a4 ad32177 b4a4c25 ad32177 b4a4c25 ad32177 b4a4c25 8511f5e b4a4c25 cef31a4 b4a4c25 cef31a4 b4a4c25 cef31a4 b4a4c25 cef31a4 b4a4c25 40bbb95 b4a4c25 ad32177 40bbb95 b4a4c25 40bbb95 b4a4c25 cef31a4 b4a4c25 cef31a4 b4a4c25 cef31a4 b4a4c25 8511f5e cef31a4 b4a4c25 cef31a4 b4a4c25 40bbb95 b4a4c25 cef31a4 b4a4c25 8511f5e b4a4c25 40bbb95 ad32177 8511f5e b4a4c25 5b97012 ad32177 cef31a4 b4a4c25 8511f5e b4a4c25 8511f5e cef31a4 b4a4c25 cef31a4 8511f5e b4a4c25 8511f5e b4a4c25 5b97012 ad32177 cef31a4 b4a4c25 cef31a4 b4a4c25 ad32177 b4a4c25 ad32177 b4a4c25 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import gradio as gr
import os
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
print("π Starting GPT-2 Text Generator...")
# Load environment variables
HF_TOKEN = os.getenv("HF_TOKEN")
API_KEY = os.getenv("API_KEY")
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
print(f"HF_TOKEN: {'Set' if HF_TOKEN else 'Not set'}")
print(f"API_KEY: {'Set' if API_KEY else 'Not set'}")
print(f"ADMIN_PASSWORD: {'Set' if ADMIN_PASSWORD else 'Not set'}")
# Load model and tokenizer
print("Loading GPT-2 model...")
try:
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
print("β
Model loaded successfully!")
except Exception as e:
print(f"β Error loading model: {e}")
raise e
def generate_text(prompt, max_length=100, temperature=0.7):
"""Simple text generation function"""
if not prompt:
return "Please enter a prompt"
if len(prompt) > 500:
return "Prompt too long (max 500 characters)"
try:
print(f"Generating text for: {prompt[:30]}...")
# Encode the prompt
inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=300, truncation=True)
# Generate text
with torch.no_grad():
outputs = model.generate(
inputs,
max_length=inputs.shape[1] + max_length,
temperature=temperature,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=2
)
# Decode the output
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract only the new text (remove the original prompt)
new_text = generated_text[len(prompt):].strip()
print(f"β
Generated {len(new_text)} characters")
return new_text if new_text else "No text generated. Try a different prompt."
except Exception as e:
error_msg = f"Error generating text: {str(e)}"
print(f"β {error_msg}")
return error_msg
# Create the Gradio interface
print("Creating Gradio interface...")
with gr.Blocks() as demo:
gr.Markdown("# GPT-2 Text Generator")
gr.Markdown("Enter a prompt and click generate to create text using GPT-2")
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(
label="Enter your prompt",
placeholder="Type your text here...",
lines=3
)
max_length_slider = gr.Slider(
minimum=20,
maximum=200,
value=100,
step=10,
label="Max length of generated text"
)
temperature_slider = gr.Slider(
minimum=0.1,
maximum=1.5,
value=0.7,
step=0.1,
label="Temperature (creativity)"
)
generate_button = gr.Button("Generate Text", variant="primary")
with gr.Column():
output_text = gr.Textbox(
label="Generated Text",
lines=8,
placeholder="Generated text will appear here..."
)
# Add some example prompts
gr.Examples(
examples=[
"Once upon a time",
"The future of technology is",
"In a world where",
],
inputs=prompt_input
)
# Connect the generate function
generate_button.click(
fn=generate_text,
inputs=[prompt_input, max_length_slider, temperature_slider],
outputs=output_text
)
# Launch the app
print("Launching Gradio app...")
if __name__ == "__main__":
demo.launch()
print("β
App is running!") |