change to openjourney model
Browse files
app.py
CHANGED
|
@@ -12,8 +12,10 @@ from diffusers import StableDiffusionPipeline
|
|
| 12 |
|
| 13 |
READ_TOKEN = os.environ.get('HF_ACCESS_TOKEN', None)
|
| 14 |
|
| 15 |
-
|
|
|
|
| 16 |
# model_id = "CompVis/stable-diffusion-v1-4"
|
|
|
|
| 17 |
|
| 18 |
has_cuda = torch.cuda.is_available()
|
| 19 |
|
|
@@ -56,15 +58,12 @@ def generate_story(prompt):
|
|
| 56 |
return story, summary, gr.update(visible=True)
|
| 57 |
|
| 58 |
def on_change_event(app_state):
|
| 59 |
-
# print(f'on_change_event {app_state}')
|
| 60 |
if app_state and app_state['running'] and app_state['img']:
|
| 61 |
img = app_state['img']
|
| 62 |
step = app_state['step']
|
| 63 |
-
# print(f'Updating the image:! {app_state}')
|
| 64 |
app_state['dots'] += 1
|
| 65 |
app_state['dots'] = app_state['dots'] % 10
|
| 66 |
message = app_state['status_msg'] + ' *' * app_state['dots']
|
| 67 |
-
# print (f'message={message}')
|
| 68 |
return gr.update(value=app_state['img_list'], label='intermediate steps'), gr.update(value=message)
|
| 69 |
else:
|
| 70 |
return gr.update(label='images list'), gr.update(value='')
|
|
@@ -87,7 +86,7 @@ with gr.Blocks() as demo:
|
|
| 87 |
app_state['img_list'].append(res)
|
| 88 |
app_state['status_msg'] = f'Generating step ({step + 1})'
|
| 89 |
|
| 90 |
-
prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration'
|
| 91 |
img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=2)
|
| 92 |
app_state['running'] = False
|
| 93 |
app_state['img'] = None
|
|
@@ -141,4 +140,4 @@ with gr.Blocks() as demo:
|
|
| 141 |
if READ_TOKEN:
|
| 142 |
demo.queue().launch()
|
| 143 |
else:
|
| 144 |
-
demo.queue().launch(share=True, debug=True)
|
|
|
|
| 12 |
|
| 13 |
READ_TOKEN = os.environ.get('HF_ACCESS_TOKEN', None)
|
| 14 |
|
| 15 |
+
|
| 16 |
+
# model_id = "runwayml/stable-diffusion-v1-5"
|
| 17 |
# model_id = "CompVis/stable-diffusion-v1-4"
|
| 18 |
+
model_id = "prompthero/openjourney"
|
| 19 |
|
| 20 |
has_cuda = torch.cuda.is_available()
|
| 21 |
|
|
|
|
| 58 |
return story, summary, gr.update(visible=True)
|
| 59 |
|
| 60 |
def on_change_event(app_state):
|
|
|
|
| 61 |
if app_state and app_state['running'] and app_state['img']:
|
| 62 |
img = app_state['img']
|
| 63 |
step = app_state['step']
|
|
|
|
| 64 |
app_state['dots'] += 1
|
| 65 |
app_state['dots'] = app_state['dots'] % 10
|
| 66 |
message = app_state['status_msg'] + ' *' * app_state['dots']
|
|
|
|
| 67 |
return gr.update(value=app_state['img_list'], label='intermediate steps'), gr.update(value=message)
|
| 68 |
else:
|
| 69 |
return gr.update(label='images list'), gr.update(value='')
|
|
|
|
| 86 |
app_state['img_list'].append(res)
|
| 87 |
app_state['status_msg'] = f'Generating step ({step + 1})'
|
| 88 |
|
| 89 |
+
prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration mdjrny-v4 style'
|
| 90 |
img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=2)
|
| 91 |
app_state['running'] = False
|
| 92 |
app_state['img'] = None
|
|
|
|
| 140 |
if READ_TOKEN:
|
| 141 |
demo.queue().launch()
|
| 142 |
else:
|
| 143 |
+
demo.queue().launch(share=True, debug=True)
|