Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -88,6 +88,134 @@ def load_img(resize_width,img: str):
|
|
| 88 |
img = img.resize((resize_width, resize_height), Image.Resampling.LANCZOS)
|
| 89 |
return img, resize_width, resize_height
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
@spaces.GPU
|
| 92 |
def generate(
|
| 93 |
prompt: str,
|
|
@@ -457,7 +585,7 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
| 457 |
examples=config.examples,
|
| 458 |
inputs=prompt,
|
| 459 |
outputs=[result, gr_metadata],
|
| 460 |
-
fn=lambda *args, **kwargs:
|
| 461 |
cache_examples=CACHE_EXAMPLES,
|
| 462 |
)
|
| 463 |
use_upscaler.change(
|
|
|
|
| 88 |
img = img.resize((resize_width, resize_height), Image.Resampling.LANCZOS)
|
| 89 |
return img, resize_width, resize_height
|
| 90 |
|
| 91 |
+
@spaces.GPU
|
| 92 |
+
def example_generate(
|
| 93 |
+
prompt: str,
|
| 94 |
+
negative_prompt: str = "",
|
| 95 |
+
seed: int = 0,
|
| 96 |
+
custom_width: int = 1024,
|
| 97 |
+
custom_height: int = 1024,
|
| 98 |
+
guidance_scale: float = 7.0,
|
| 99 |
+
num_inference_steps: int = 28,
|
| 100 |
+
sampler: str = "Euler a",
|
| 101 |
+
aspect_ratio_selector: str = "896 x 1152",
|
| 102 |
+
style_selector: str = "(None)",
|
| 103 |
+
quality_selector: str = "Standard v3.1",
|
| 104 |
+
use_upscaler: bool = False,
|
| 105 |
+
upscaler_strength: float = 0.55,
|
| 106 |
+
upscale_by: float = 1.5,
|
| 107 |
+
add_quality_tags: bool = True,
|
| 108 |
+
progress=gr.Progress(track_tqdm=True),
|
| 109 |
+
):
|
| 110 |
+
generator = utils.seed_everything(seed)
|
| 111 |
+
|
| 112 |
+
width, height = utils.aspect_ratio_handler(
|
| 113 |
+
aspect_ratio_selector,
|
| 114 |
+
custom_width,
|
| 115 |
+
custom_height,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
prompt = utils.add_wildcard(prompt, wildcard_files)
|
| 119 |
+
|
| 120 |
+
prompt, negative_prompt = utils.preprocess_prompt(
|
| 121 |
+
quality_prompt, quality_selector, prompt, negative_prompt, add_quality_tags
|
| 122 |
+
)
|
| 123 |
+
prompt, negative_prompt = utils.preprocess_prompt(
|
| 124 |
+
styles, style_selector, prompt, negative_prompt
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
width, height = utils.preprocess_image_dimensions(width, height)
|
| 128 |
+
|
| 129 |
+
backup_scheduler = pipe.scheduler
|
| 130 |
+
pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
|
| 131 |
+
|
| 132 |
+
if use_upscaler:
|
| 133 |
+
upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
|
| 134 |
+
metadata = {
|
| 135 |
+
"prompt": prompt,
|
| 136 |
+
"negative_prompt": negative_prompt,
|
| 137 |
+
"resolution": f"{width} x {height}",
|
| 138 |
+
"guidance_scale": guidance_scale,
|
| 139 |
+
"num_inference_steps": num_inference_steps,
|
| 140 |
+
"seed": seed,
|
| 141 |
+
"sampler": sampler,
|
| 142 |
+
"sdxl_style": style_selector,
|
| 143 |
+
"add_quality_tags": add_quality_tags,
|
| 144 |
+
"quality_tags": quality_selector,
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
if use_upscaler:
|
| 148 |
+
new_width = int(width * upscale_by)
|
| 149 |
+
new_height = int(height * upscale_by)
|
| 150 |
+
metadata["use_upscaler"] = {
|
| 151 |
+
"upscale_method": "nearest-exact",
|
| 152 |
+
"upscaler_strength": upscaler_strength,
|
| 153 |
+
"upscale_by": upscale_by,
|
| 154 |
+
"new_resolution": f"{new_width} x {new_height}",
|
| 155 |
+
}
|
| 156 |
+
else:
|
| 157 |
+
metadata["use_upscaler"] = None
|
| 158 |
+
metadata["Model"] = {
|
| 159 |
+
"Model": DESCRIPTION,
|
| 160 |
+
"Model hash": "e3c47aedb0",
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
logger.info(json.dumps(metadata, indent=4))
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
if use_upscaler:
|
| 167 |
+
latents = pipe(
|
| 168 |
+
prompt=prompt,
|
| 169 |
+
negative_prompt=negative_prompt,
|
| 170 |
+
width=width,
|
| 171 |
+
height=height,
|
| 172 |
+
guidance_scale=guidance_scale,
|
| 173 |
+
num_inference_steps=num_inference_steps,
|
| 174 |
+
generator=generator,
|
| 175 |
+
output_type="latent",
|
| 176 |
+
).images
|
| 177 |
+
upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
|
| 178 |
+
images = upscaler_pipe(
|
| 179 |
+
prompt=prompt,
|
| 180 |
+
negative_prompt=negative_prompt,
|
| 181 |
+
image=upscaled_latents,
|
| 182 |
+
guidance_scale=guidance_scale,
|
| 183 |
+
num_inference_steps=num_inference_steps,
|
| 184 |
+
strength=upscaler_strength,
|
| 185 |
+
generator=generator,
|
| 186 |
+
output_type="pil",
|
| 187 |
+
).images
|
| 188 |
+
else:
|
| 189 |
+
images = pipe(
|
| 190 |
+
prompt=prompt,
|
| 191 |
+
negative_prompt=negative_prompt,
|
| 192 |
+
width=width,
|
| 193 |
+
height=height,
|
| 194 |
+
guidance_scale=guidance_scale,
|
| 195 |
+
num_inference_steps=num_inference_steps,
|
| 196 |
+
generator=generator,
|
| 197 |
+
output_type="pil",
|
| 198 |
+
).images
|
| 199 |
+
|
| 200 |
+
if images:
|
| 201 |
+
image_paths = [
|
| 202 |
+
utils.save_image(image, metadata, OUTPUT_DIR, IS_COLAB)
|
| 203 |
+
for image in images
|
| 204 |
+
]
|
| 205 |
+
|
| 206 |
+
for image_path in image_paths:
|
| 207 |
+
logger.info(f"Image saved as {image_path} with metadata")
|
| 208 |
+
|
| 209 |
+
return image_paths, metadata
|
| 210 |
+
except Exception as e:
|
| 211 |
+
logger.exception(f"An error occurred: {e}")
|
| 212 |
+
raise
|
| 213 |
+
finally:
|
| 214 |
+
if use_upscaler:
|
| 215 |
+
del upscaler_pipe
|
| 216 |
+
pipe.scheduler = backup_scheduler
|
| 217 |
+
utils.free_memory()
|
| 218 |
+
|
| 219 |
@spaces.GPU
|
| 220 |
def generate(
|
| 221 |
prompt: str,
|
|
|
|
| 585 |
examples=config.examples,
|
| 586 |
inputs=prompt,
|
| 587 |
outputs=[result, gr_metadata],
|
| 588 |
+
fn=lambda *args, **kwargs: example_generate(*args, use_upscaler=True, **kwargs),
|
| 589 |
cache_examples=CACHE_EXAMPLES,
|
| 590 |
)
|
| 591 |
use_upscaler.change(
|