soiz1's picture
Update app.py
726e80a verified
from transformers import pipeline, set_seed
from flask import Flask, request, jsonify
import random, re
app = Flask(__name__)
model_cache = {}
with open("ideas.txt", "r") as f:
lines = f.readlines()
def get_pipeline(model_name):
if model_name not in model_cache:
model_cache[model_name] = pipeline('text-generation', model=model_name, tokenizer='gpt2')
return model_cache[model_name]
def generate_prompts(starting_text, model_name, num_prompts=1, generation_args=None):
response_list = []
gpt2_pipe = get_pipeline(model_name)
for _ in range(num_prompts):
for count in range(4):
seed = random.randint(100, 1000000)
set_seed(seed)
if starting_text == "":
starting_text = lines[random.randrange(0, len(lines))].strip().lower().capitalize()
starting_text = re.sub(r"[,:\-–.!;?_]", '', starting_text)
response = gpt2_pipe(
starting_text,
**generation_args # 各種パラメーターを一括で渡す
)
generated_text = response[0]['generated_text'].strip()
if generated_text != starting_text and len(generated_text) > (len(starting_text) + 4):
cleaned_text = re.sub(r'[^ ]+\.[^ ]+', '', generated_text)
cleaned_text = cleaned_text.replace("<", "").replace(">", "")
response_list.append(cleaned_text)
break
return response_list[:num_prompts]
@app.route('/', methods=['GET'])
def generate_api():
starting_text = request.args.get('text', default="", type=str)
num_prompts = request.args.get('n', default=1, type=int)
model_param = request.args.get('model', default="sd", type=str).lower()
# モデル選択
model_name = "Gustavosta/MagicPrompt-Dalle" if model_param == "dall" else "Gustavosta/MagicPrompt-Stable-Diffusion"
# URL パラメータから生成設定を取得
generation_args = {
"max_length": request.args.get('max_length', default=random.randint(60, 90), type=int),
"min_length": request.args.get('min_length', default=0, type=int),
"temperature": request.args.get('temperature', default=1.0, type=float),
"top_k": request.args.get('top_k', default=50, type=int),
"top_p": request.args.get('top_p', default=0.95, type=float),
"repetition_penalty": request.args.get('repetition_penalty', default=1.0, type=float),
"do_sample": request.args.get('do_sample', default=True, type=lambda v: v.lower() in ['true', '1', 'yes']),
"num_return_sequences": 1
}
results = generate_prompts(starting_text, model_name, num_prompts=num_prompts, generation_args=generation_args)
return jsonify(results)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860)