File size: 2,836 Bytes
a04f8cb
 
 
 
 
 
6af69d0
 
a04f8cb
 
 
6af69d0
 
 
 
 
726e80a
a04f8cb
6af69d0
 
a04f8cb
726e80a
a04f8cb
 
 
 
 
 
 
726e80a
 
 
 
a04f8cb
 
 
6af69d0
a04f8cb
 
6af69d0
a04f8cb
 
 
 
 
 
6af69d0
 
 
 
726e80a
6af69d0
726e80a
 
 
 
 
 
 
 
 
 
 
 
 
a04f8cb
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from transformers import pipeline, set_seed
from flask import Flask, request, jsonify
import random, re

app = Flask(__name__)

model_cache = {}

with open("ideas.txt", "r") as f:
    lines = f.readlines()

def get_pipeline(model_name):
    if model_name not in model_cache:
        model_cache[model_name] = pipeline('text-generation', model=model_name, tokenizer='gpt2')
    return model_cache[model_name]

def generate_prompts(starting_text, model_name, num_prompts=1, generation_args=None):
    response_list = []
    gpt2_pipe = get_pipeline(model_name)

    for _ in range(num_prompts):
        for count in range(4):
            seed = random.randint(100, 1000000)
            set_seed(seed)

            if starting_text == "":
                starting_text = lines[random.randrange(0, len(lines))].strip().lower().capitalize()
                starting_text = re.sub(r"[,:\-–.!;?_]", '', starting_text)

            response = gpt2_pipe(
                starting_text,
                **generation_args  # 各種パラメーターを一括で渡す
            )
            generated_text = response[0]['generated_text'].strip()

            if generated_text != starting_text and len(generated_text) > (len(starting_text) + 4):
                cleaned_text = re.sub(r'[^ ]+\.[^ ]+', '', generated_text)
                cleaned_text = cleaned_text.replace("<", "").replace(">", "")
                response_list.append(cleaned_text)
                break

    return response_list[:num_prompts]

@app.route('/', methods=['GET'])
def generate_api():
    starting_text = request.args.get('text', default="", type=str)
    num_prompts = request.args.get('n', default=1, type=int)
    model_param = request.args.get('model', default="sd", type=str).lower()

    # モデル選択
    model_name = "Gustavosta/MagicPrompt-Dalle" if model_param == "dall" else "Gustavosta/MagicPrompt-Stable-Diffusion"

    # URL パラメータから生成設定を取得
    generation_args = {
        "max_length": request.args.get('max_length', default=random.randint(60, 90), type=int),
        "min_length": request.args.get('min_length', default=0, type=int),
        "temperature": request.args.get('temperature', default=1.0, type=float),
        "top_k": request.args.get('top_k', default=50, type=int),
        "top_p": request.args.get('top_p', default=0.95, type=float),
        "repetition_penalty": request.args.get('repetition_penalty', default=1.0, type=float),
        "do_sample": request.args.get('do_sample', default=True, type=lambda v: v.lower() in ['true', '1', 'yes']),
        "num_return_sequences": 1
    }

    results = generate_prompts(starting_text, model_name, num_prompts=num_prompts, generation_args=generation_args)
    return jsonify(results)

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)