sonyps1928 commited on
Commit
2ce4afd
Β·
1 Parent(s): b4a4c25

update app

Browse files
Files changed (2) hide show
  1. app.py +136 -85
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,32 +1,35 @@
1
- import gradio as gr
2
  import os
3
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
  import torch
5
 
6
- print("πŸš€ Starting GPT-2 Text Generator...")
 
 
 
 
 
7
 
8
  # Load environment variables
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
  API_KEY = os.getenv("API_KEY")
11
  ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
12
 
13
- print(f"HF_TOKEN: {'Set' if HF_TOKEN else 'Not set'}")
14
- print(f"API_KEY: {'Set' if API_KEY else 'Not set'}")
15
- print(f"ADMIN_PASSWORD: {'Set' if ADMIN_PASSWORD else 'Not set'}")
 
 
 
 
 
 
 
 
 
16
 
17
- # Load model and tokenizer
18
- print("Loading GPT-2 model...")
19
- try:
20
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
21
- model = GPT2LMHeadModel.from_pretrained("gpt2")
22
- tokenizer.pad_token = tokenizer.eos_token
23
- print("βœ… Model loaded successfully!")
24
- except Exception as e:
25
- print(f"❌ Error loading model: {e}")
26
- raise e
27
-
28
- def generate_text(prompt, max_length=100, temperature=0.7):
29
- """Simple text generation function"""
30
  if not prompt:
31
  return "Please enter a prompt"
32
 
@@ -34,8 +37,6 @@ def generate_text(prompt, max_length=100, temperature=0.7):
34
  return "Prompt too long (max 500 characters)"
35
 
36
  try:
37
- print(f"Generating text for: {prompt[:30]}...")
38
-
39
  # Encode the prompt
40
  inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=300, truncation=True)
41
 
@@ -53,79 +54,129 @@ def generate_text(prompt, max_length=100, temperature=0.7):
53
 
54
  # Decode the output
55
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
-
57
- # Extract only the new text (remove the original prompt)
58
  new_text = generated_text[len(prompt):].strip()
59
 
60
- print(f"βœ… Generated {len(new_text)} characters")
61
  return new_text if new_text else "No text generated. Try a different prompt."
62
 
63
  except Exception as e:
64
- error_msg = f"Error generating text: {str(e)}"
65
- print(f"❌ {error_msg}")
66
- return error_msg
67
 
68
- # Create the Gradio interface
69
- print("Creating Gradio interface...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- with gr.Blocks() as demo:
72
- gr.Markdown("# GPT-2 Text Generator")
73
- gr.Markdown("Enter a prompt and click generate to create text using GPT-2")
74
-
75
- with gr.Row():
76
- with gr.Column():
77
- prompt_input = gr.Textbox(
78
- label="Enter your prompt",
79
- placeholder="Type your text here...",
80
- lines=3
81
- )
82
-
83
- max_length_slider = gr.Slider(
84
- minimum=20,
85
- maximum=200,
86
- value=100,
87
- step=10,
88
- label="Max length of generated text"
89
- )
90
-
91
- temperature_slider = gr.Slider(
92
- minimum=0.1,
93
- maximum=1.5,
94
- value=0.7,
95
- step=0.1,
96
- label="Temperature (creativity)"
97
- )
98
-
99
- generate_button = gr.Button("Generate Text", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- with gr.Column():
102
- output_text = gr.Textbox(
103
- label="Generated Text",
104
- lines=8,
105
- placeholder="Generated text will appear here..."
106
- )
107
 
108
- # Add some example prompts
109
- gr.Examples(
110
- examples=[
111
- "Once upon a time",
112
- "The future of technology is",
113
- "In a world where",
114
- ],
115
- inputs=prompt_input
116
- )
117
-
118
- # Connect the generate function
119
- generate_button.click(
120
- fn=generate_text,
121
- inputs=[prompt_input, max_length_slider, temperature_slider],
122
- outputs=output_text
123
- )
124
-
125
- # Launch the app
126
- print("Launching Gradio app...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  if __name__ == "__main__":
129
- demo.launch()
130
-
131
- print("βœ… App is running!")
 
1
+ import streamlit as st
2
  import os
3
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
  import torch
5
 
6
+ # Set page config
7
+ st.set_page_config(
8
+ page_title="GPT-2 Text Generator",
9
+ page_icon="πŸ€–",
10
+ layout="wide"
11
+ )
12
 
13
  # Load environment variables
14
  HF_TOKEN = os.getenv("HF_TOKEN")
15
  API_KEY = os.getenv("API_KEY")
16
  ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
17
 
18
+ @st.cache_resource
19
+ def load_model():
20
+ """Load and cache the GPT-2 model"""
21
+ with st.spinner("Loading GPT-2 model..."):
22
+ try:
23
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
24
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
25
+ tokenizer.pad_token = tokenizer.eos_token
26
+ return tokenizer, model
27
+ except Exception as e:
28
+ st.error(f"Error loading model: {e}")
29
+ return None, None
30
 
31
+ def generate_text(prompt, max_length, temperature, tokenizer, model):
32
+ """Generate text using GPT-2"""
 
 
 
 
 
 
 
 
 
 
 
33
  if not prompt:
34
  return "Please enter a prompt"
35
 
 
37
  return "Prompt too long (max 500 characters)"
38
 
39
  try:
 
 
40
  # Encode the prompt
41
  inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=300, truncation=True)
42
 
 
54
 
55
  # Decode the output
56
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
57
  new_text = generated_text[len(prompt):].strip()
58
 
 
59
  return new_text if new_text else "No text generated. Try a different prompt."
60
 
61
  except Exception as e:
62
+ return f"Error generating text: {str(e)}"
 
 
63
 
64
+ def check_auth():
65
+ """Handle authentication"""
66
+ if ADMIN_PASSWORD:
67
+ if "authenticated" not in st.session_state:
68
+ st.session_state.authenticated = False
69
+
70
+ if not st.session_state.authenticated:
71
+ st.title("πŸ”’ Authentication Required")
72
+ password = st.text_input("Enter admin password:", type="password")
73
+ if st.button("Login"):
74
+ if password == ADMIN_PASSWORD:
75
+ st.session_state.authenticated = True
76
+ st.rerun()
77
+ else:
78
+ st.error("Invalid password")
79
+ return False
80
+ return True
81
 
82
+ def main():
83
+ # Check authentication
84
+ if not check_auth():
85
+ return
86
+
87
+ # Load model
88
+ tokenizer, model = load_model()
89
+ if tokenizer is None or model is None:
90
+ st.error("Failed to load model. Please check the logs.")
91
+ return
92
+
93
+ # Main interface
94
+ st.title("πŸ€– GPT-2 Text Generator")
95
+ st.markdown("Generate text using GPT-2 language model")
96
+
97
+ # Security status
98
+ col1, col2, col3 = st.columns(3)
99
+ with col1:
100
+ if HF_TOKEN:
101
+ st.success("πŸ”‘ HF Token: Active")
102
+ else:
103
+ st.info("πŸ”‘ HF Token: Not set")
104
+
105
+ with col2:
106
+ if API_KEY:
107
+ st.success("πŸ”’ API Auth: Enabled")
108
+ else:
109
+ st.info("πŸ”’ API Auth: Disabled")
110
+
111
+ with col3:
112
+ if ADMIN_PASSWORD:
113
+ st.success("πŸ‘€ Admin Auth: Active")
114
+ else:
115
+ st.info("πŸ‘€ Admin Auth: Disabled")
116
+
117
+ # Input section
118
+ st.subheader("πŸ“ Input")
119
+
120
+ col1, col2 = st.columns([2, 1])
121
+
122
+ with col1:
123
+ prompt = st.text_area(
124
+ "Enter your prompt:",
125
+ placeholder="Type your text here...",
126
+ height=100
127
+ )
128
 
129
+ # API key input if needed
130
+ api_key = ""
131
+ if API_KEY:
132
+ api_key = st.text_input("API Key:", type="password")
 
 
133
 
134
+ with col2:
135
+ st.subheader("βš™οΈ Settings")
136
+ max_length = st.slider("Max Length", 20, 200, 100, 10)
137
+ temperature = st.slider("Temperature", 0.1, 1.5, 0.7, 0.1)
138
+
139
+ generate_btn = st.button("πŸš€ Generate Text", type="primary")
140
+
141
+ # API key validation
142
+ if API_KEY and generate_btn:
143
+ if not api_key or api_key != API_KEY:
144
+ st.error("πŸ”’ Invalid or missing API key")
145
+ return
146
+
147
+ # Generate text
148
+ if generate_btn and prompt:
149
+ with st.spinner("Generating text..."):
150
+ result = generate_text(prompt, max_length, temperature, tokenizer, model)
151
+
152
+ st.subheader("πŸ“„ Generated Text")
153
+ st.text_area("Output:", value=result, height=200)
154
+
155
+ # Copy button
156
+ st.code(result)
157
+
158
+ elif generate_btn:
159
+ st.warning("Please enter a prompt")
160
+
161
+ # Examples
162
+ st.subheader("πŸ’‘ Example Prompts")
163
+ examples = [
164
+ "Once upon a time in a distant galaxy,",
165
+ "The future of artificial intelligence is",
166
+ "In the heart of the ancient forest,",
167
+ "The detective walked into the room and noticed"
168
+ ]
169
+
170
+ cols = st.columns(len(examples))
171
+ for i, example in enumerate(examples):
172
+ with cols[i]:
173
+ if st.button(f"Use Example {i+1}", key=f"ex_{i}"):
174
+ st.session_state.example_prompt = example
175
+ st.rerun()
176
+
177
+ # Use selected example
178
+ if hasattr(st.session_state, 'example_prompt'):
179
+ st.info(f"Example selected: {st.session_state.example_prompt}")
180
 
181
  if __name__ == "__main__":
182
+ main()
 
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio==4.44.0
2
  transformers==4.44.2
3
  torch==2.4.1
4
  tokenizers==0.19.1
 
1
+ streamlit==1.28.1
2
  transformers==4.44.2
3
  torch==2.4.1
4
  tokenizers==0.19.1