Euryeth commited on
Commit
c278a4e
·
verified ·
1 Parent(s): be2d946

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -1,13 +1,20 @@
1
- from transformers import pipeline
2
  import os
 
3
 
4
- # Set cache directory to a writable location
5
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/cache'
6
  os.environ['HF_HOME'] = '/tmp/cache'
 
 
7
 
8
- # Initialize model with smaller config
9
  model = pipeline(
10
- "text-generation",
11
- model="gpt2",
12
- device=-1 # Force CPU usage
13
- )
 
 
 
 
 
 
 
1
  import os
2
+ from transformers import pipeline
3
 
4
+ # Force cache location BEFORE importing transformers
5
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/cache'
6
  os.environ['HF_HOME'] = '/tmp/cache'
7
+ os.environ['HF_DATASETS_CACHE'] = '/tmp/cache'
8
+ os.environ['HUGGINGFACE_HUB_CACHE'] = '/tmp/cache'
9
 
10
+ # Now import pipeline
11
  model = pipeline(
12
+ "text-generation",
13
+ model="distilgpt2", # Smaller model for reliability
14
+ device=-1 # Force CPU
15
+ )
16
+
17
+ def generate_text(prompt, max_length=100):
18
+ """Generate text from a prompt"""
19
+ output = model(prompt, max_length=max_length)
20
+ return output[0]["generated_text"]