rahul7star commited on
Commit
58264c9
·
verified ·
1 Parent(s): 3b59afe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -37
app.py CHANGED
@@ -26,37 +26,14 @@ pipe = DiffusionPipeline.from_pretrained(
26
 
27
  pipe.to("cuda")
28
 
29
-
30
-
31
- # @spaces.GPU
32
- # def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
33
- # """Generate an image from the given prompt."""
34
- # print(prompt)
35
- # if randomize_seed:
36
- # seed = torch.randint(0, 2**32 - 1, (1,)).item()
37
-
38
- # generator = torch.Generator("cuda").manual_seed(int(seed))
39
- # image = pipe(
40
- # prompt=prompt,
41
- # height=int(height),
42
- # width=int(width),
43
- # num_inference_steps=int(num_inference_steps),
44
- # guidance_scale=0.0, # Guidance should be 0 for Turbo models
45
- # generator=generator,
46
- # max_sequence_length=1024,
47
- # num_images_per_prompt=2
48
-
49
- # ).images[0]
50
-
51
- # return image, seed
52
-
53
-
54
-
55
  @spaces.GPU
56
- def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
57
- """Generate images from the given prompt."""
58
  if randomize_seed:
59
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
 
 
 
60
 
61
  generator = torch.Generator("cuda").manual_seed(int(seed))
62
  result = pipe(
@@ -67,10 +44,9 @@ def generate_image(prompt, height, width, num_inference_steps, seed, randomize_s
67
  guidance_scale=0.0,
68
  generator=generator,
69
  max_sequence_length=1024,
70
- num_images_per_prompt=2
71
  )
72
 
73
- # return both images
74
  return result.images, seed
75
 
76
  # Example prompts
@@ -116,7 +92,15 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
116
  step=64,
117
  label="Width",
118
  )
119
-
 
 
 
 
 
 
 
 
120
  with gr.Row():
121
  num_inference_steps = gr.Slider(
122
  minimum=1,
@@ -161,15 +145,16 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
161
  # Connect the generate button
162
  generate_btn.click(
163
  fn=generate_image,
164
- inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
165
- outputs=[output_images, used_seed],
166
- )
167
  prompt.submit(
168
- fn=generate_image,
169
- inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
170
- outputs=[output_images, used_seed],
171
  )
172
 
173
 
 
174
  if __name__ == "__main__":
175
  demo.launch()
 
26
 
27
  pipe.to("cuda")
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  @spaces.GPU
30
+ def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed, num_images):
31
+ """Generate multiple images from the given prompt."""
32
  if randomize_seed:
33
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
34
+
35
+ # Clamp num_images to max 3
36
+ num_images = min(max(1, int(num_images)), 3)
37
 
38
  generator = torch.Generator("cuda").manual_seed(int(seed))
39
  result = pipe(
 
44
  guidance_scale=0.0,
45
  generator=generator,
46
  max_sequence_length=1024,
47
+ num_images_per_prompt=num_images
48
  )
49
 
 
50
  return result.images, seed
51
 
52
  # Example prompts
 
92
  step=64,
93
  label="Width",
94
  )
95
+ with gr.Row():
96
+ num_images = gr.Slider(
97
+ minimum=1,
98
+ maximum=3,
99
+ value=2,
100
+ step=1,
101
+ label="Number of Images",
102
+ )
103
+
104
  with gr.Row():
105
  num_inference_steps = gr.Slider(
106
  minimum=1,
 
145
  # Connect the generate button
146
  generate_btn.click(
147
  fn=generate_image,
148
+ inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed, num_images],
149
+ outputs=[output_images, used_seed],
150
+ )
151
  prompt.submit(
152
+ fn=generate_image,
153
+ inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed, num_images],
154
+ outputs=[output_images, used_seed],
155
  )
156
 
157
 
158
+
159
  if __name__ == "__main__":
160
  demo.launch()