rahul7star commited on
Commit
3ac4904
·
verified ·
1 Parent(s): 5812881

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -18
app.py CHANGED
@@ -28,28 +28,50 @@ pipe.to("cuda")
28
 
29
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  @spaces.GPU
32
  def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
33
- """Generate an image from the given prompt."""
34
- print(prompt)
35
  if randomize_seed:
36
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
37
 
38
  generator = torch.Generator("cuda").manual_seed(int(seed))
39
- image = pipe(
40
  prompt=prompt,
41
  height=int(height),
42
  width=int(width),
43
  num_inference_steps=int(num_inference_steps),
44
- guidance_scale=0.0, # Guidance should be 0 for Turbo models
45
  generator=generator,
46
  max_sequence_length=1024,
47
  num_images_per_prompt=2
48
-
49
- ).images[0]
50
 
51
- return image, seed
52
-
53
 
54
  # Example prompts
55
  examples = [
@@ -119,7 +141,7 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
119
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
120
 
121
  with gr.Column(scale=1):
122
- output_image = gr.Image(
123
  label="Generated Image",
124
  type="pil",
125
  )
@@ -137,18 +159,17 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
137
 
138
 
139
  # Connect the generate button
140
- generate_btn.click(
141
- fn=generate_image,
142
- inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
143
- outputs=[output_image, used_seed],
144
- )
145
-
146
- # Also allow generating by pressing Enter in the prompt box
147
  prompt.submit(
148
  fn=generate_image,
149
  inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
150
- outputs=[output_image, used_seed],
151
- )
 
152
 
153
  if __name__ == "__main__":
154
  demo.launch()
 
28
 
29
 
30
 
31
+ # @spaces.GPU
32
+ # def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
33
+ # """Generate an image from the given prompt."""
34
+ # print(prompt)
35
+ # if randomize_seed:
36
+ # seed = torch.randint(0, 2**32 - 1, (1,)).item()
37
+
38
+ # generator = torch.Generator("cuda").manual_seed(int(seed))
39
+ # image = pipe(
40
+ # prompt=prompt,
41
+ # height=int(height),
42
+ # width=int(width),
43
+ # num_inference_steps=int(num_inference_steps),
44
+ # guidance_scale=0.0, # Guidance should be 0 for Turbo models
45
+ # generator=generator,
46
+ # max_sequence_length=1024,
47
+ # num_images_per_prompt=2
48
+
49
+ # ).images[0]
50
+
51
+ # return image, seed
52
+
53
+
54
+
55
  @spaces.GPU
56
  def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
57
+ """Generate images from the given prompt."""
 
58
  if randomize_seed:
59
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
60
 
61
  generator = torch.Generator("cuda").manual_seed(int(seed))
62
+ result = pipe(
63
  prompt=prompt,
64
  height=int(height),
65
  width=int(width),
66
  num_inference_steps=int(num_inference_steps),
67
+ guidance_scale=0.0,
68
  generator=generator,
69
  max_sequence_length=1024,
70
  num_images_per_prompt=2
71
+ )
 
72
 
73
+ # return both images
74
+ return result.images, seed
75
 
76
  # Example prompts
77
  examples = [
 
141
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
142
 
143
  with gr.Column(scale=1):
144
+ output_image = gr.Gallery(
145
  label="Generated Image",
146
  type="pil",
147
  )
 
159
 
160
 
161
  # Connect the generate button
162
+ generate_btn.click(
163
+ fn=generate_image,
164
+ inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
165
+ outputs=[output_images, used_seed],
166
+ )
 
 
167
  prompt.submit(
168
  fn=generate_image,
169
  inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
170
+ outputs=[output_images, used_seed],
171
+ )
172
+
173
 
174
  if __name__ == "__main__":
175
  demo.launch()