QHL067 commited on
Commit
a5ded5e
·
1 Parent(s): d54394d
Files changed (1) hide show
  1. app.py +22 -14
app.py CHANGED
@@ -14,8 +14,8 @@ if torch.cuda.is_available():
14
  else:
15
  torch_dtype = torch.float32
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
@@ -38,17 +38,17 @@ def infer(
38
 
39
  generator = torch.Generator().manual_seed(seed)
40
 
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
 
51
- return image, seed
52
 
53
 
54
  examples = [
@@ -70,10 +70,18 @@ with gr.Blocks(css=css) as demo:
70
 
71
  with gr.Row():
72
  prompt = gr.Text(
73
- label="Prompt",
74
  show_label=False,
75
  max_lines=1,
76
- placeholder="Enter your prompt",
 
 
 
 
 
 
 
 
77
  container=False,
78
  )
79
 
 
14
  else:
15
  torch_dtype = torch.float32
16
 
17
+ # pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
+ # pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
 
38
 
39
  generator = torch.Generator().manual_seed(seed)
40
 
41
+ # image = pipe(
42
+ # prompt=prompt,
43
+ # negative_prompt=negative_prompt,
44
+ # guidance_scale=guidance_scale,
45
+ # num_inference_steps=num_inference_steps,
46
+ # width=width,
47
+ # height=height,
48
+ # generator=generator,
49
+ # ).images[0]
50
 
51
+ # return image, seed
52
 
53
 
54
  examples = [
 
70
 
71
  with gr.Row():
72
  prompt = gr.Text(
73
+ label="Prompt_1",
74
  show_label=False,
75
  max_lines=1,
76
+ placeholder="Enter your prompt for the first image",
77
+ container=False,
78
+ )
79
+
80
+ prompt = gr.Text(
81
+ label="Prompt_2",
82
+ show_label=False,
83
+ max_lines=1,
84
+ placeholder="Enter your prompt for the second",
85
  container=False,
86
  )
87