soiz1 commited on
Commit
a04f8cb
Β·
verified Β·
1 Parent(s): 22d9b5e

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -5
  2. README.md +5 -4
  3. app.py +50 -0
  4. ideas.txt +0 -0
  5. requirements.txt +4 -0
.gitattributes CHANGED
@@ -2,13 +2,11 @@
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
  *.npy filter=lfs diff=lfs merge=lfs -text
@@ -16,16 +14,14 @@
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
 
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
5
  *.ftz filter=lfs diff=lfs merge=lfs -text
6
  *.gz filter=lfs diff=lfs merge=lfs -text
7
  *.h5 filter=lfs diff=lfs merge=lfs -text
8
  *.joblib filter=lfs diff=lfs merge=lfs -text
9
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
10
  *.model filter=lfs diff=lfs merge=lfs -text
11
  *.msgpack filter=lfs diff=lfs merge=lfs -text
12
  *.npy filter=lfs diff=lfs merge=lfs -text
 
14
  *.onnx filter=lfs diff=lfs merge=lfs -text
15
  *.ot filter=lfs diff=lfs merge=lfs -text
16
  *.parquet filter=lfs diff=lfs merge=lfs -text
 
17
  *.pickle filter=lfs diff=lfs merge=lfs -text
18
  *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
22
  *.rar filter=lfs diff=lfs merge=lfs -text
 
23
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
25
  *.tflite filter=lfs diff=lfs merge=lfs -text
26
  *.tgz filter=lfs diff=lfs merge=lfs -text
27
  *.wasm filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Stable Diffusion Prompt Generator
3
- emoji: πŸ‘
4
  colorFrom: red
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 5.34.2
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: MagicPrompt Stable Diffusion
3
+ emoji: 😻😻
4
  colorFrom: red
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.27.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, set_seed
2
+ from flask import Flask, request, jsonify
3
+ import random, re
4
+
5
+ app = Flask(__name__)
6
+
7
+ # Initialize the GPT-2 pipeline
8
+ gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
9
+ with open("ideas.txt", "r") as f:
10
+ lines = f.readlines()
11
+
12
+ def generate_prompts(starting_text, num_prompts=1):
13
+ response_list = []
14
+
15
+ for _ in range(num_prompts):
16
+ for count in range(4): # Attempt up to 4 times to generate valid response
17
+ seed = random.randint(100, 1000000)
18
+ set_seed(seed)
19
+
20
+ # Choose a random line from the file if the input text is empty
21
+ if starting_text == "":
22
+ starting_text = lines[random.randrange(0, len(lines))].strip().lower().capitalize()
23
+ starting_text = re.sub(r"[,:\-–.!;?_]", '', starting_text)
24
+
25
+ # Generate text
26
+ response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=1)
27
+ generated_text = response[0]['generated_text'].strip()
28
+
29
+ # Clean and check the generated response
30
+ if generated_text != starting_text and len(generated_text) > (len(starting_text) + 4):
31
+ cleaned_text = re.sub(r'[^ ]+\.[^ ]+', '', generated_text) # Remove strings like 'abc.xyz'
32
+ cleaned_text = cleaned_text.replace("<", "").replace(">", "")
33
+ response_list.append(cleaned_text)
34
+ break # Stop trying further once a valid prompt is added
35
+
36
+ return response_list[:num_prompts]
37
+
38
+ # Define the API endpoint
39
+ @app.route('/', methods=['GET'])
40
+ def generate_api():
41
+ starting_text = request.args.get('text', default="", type=str)
42
+ num_prompts = request.args.get('n', default=1, type=int) # Get the number of prompts to return, default is 1
43
+
44
+ # Generate the prompts
45
+ results = generate_prompts(starting_text, num_prompts=num_prompts)
46
+ return jsonify(results)
47
+
48
+ if __name__ == '__main__':
49
+ # Run the Flask app on port 7860
50
+ app.run(host='0.0.0.0', port=7860)
ideas.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ transformers[sentencepiece]
4
+ flask