Spaces:
Running
Running
Upload 45 files
Browse files- README.md +1 -1
- app.py +5 -37
- requirements.txt +1 -0
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: AnyStory
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
|
|
|
| 1 |
---
|
| 2 |
title: AnyStory
|
| 3 |
+
emoji: ✏️
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
app.py
CHANGED
|
@@ -10,7 +10,6 @@ import gradio as gr
|
|
| 10 |
import numpy as np
|
| 11 |
from PIL import Image
|
| 12 |
|
| 13 |
-
from src.agent import extend_prompt, translate_prompt
|
| 14 |
from src.anystory import call_anystory
|
| 15 |
from src.matting import ImageUniversalMatting
|
| 16 |
from src.util import upload_pil_2_oss
|
|
@@ -112,8 +111,6 @@ def interface():
|
|
| 112 |
|
| 113 |
with gr.Group():
|
| 114 |
prompt = gr.Textbox(value="", label='Prompt', lines=6, show_label=True)
|
| 115 |
-
en_prompt = gr.Textbox(value="", label='prompt', lines=6, show_label=True, visible=False)
|
| 116 |
-
# prompt_extend_button = gr.Button(value="提示词扩写")
|
| 117 |
|
| 118 |
with gr.Column(scale=1, min_width=100):
|
| 119 |
result_gallery = gr.Image(type="pil", label="Generated Image", visible=True, height=450)
|
|
@@ -139,23 +136,11 @@ def interface():
|
|
| 139 |
fn=set_image_seg_finished, outputs=generated_information
|
| 140 |
)
|
| 141 |
|
| 142 |
-
# prompt_extend_button.click(
|
| 143 |
-
# fn=set_prompt_extend_unfinished, outputs=generated_information
|
| 144 |
-
# ).then(
|
| 145 |
-
# fn=extend_prompt, inputs=[prompt], outputs=[prompt]
|
| 146 |
-
# ).then(
|
| 147 |
-
# fn=set_prompt_extend_finished, outputs=generated_information
|
| 148 |
-
# )
|
| 149 |
-
|
| 150 |
run_button.click(
|
| 151 |
-
fn=set_prompt_translate_unfinished, outputs=generated_information
|
| 152 |
-
).then(
|
| 153 |
-
fn=translate_prompt, inputs=[prompt], outputs=[en_prompt]
|
| 154 |
-
).then(
|
| 155 |
fn=set_image_generate_unfinished, outputs=generated_information
|
| 156 |
).then(
|
| 157 |
fn=process,
|
| 158 |
-
inputs=[pil_subject_A_image, pil_subject_A_mask, pil_subject_B_image, pil_subject_B_mask,
|
| 159 |
outputs=[result_gallery]
|
| 160 |
).then(
|
| 161 |
fn=set_image_generate_finished, outputs=generated_information
|
|
@@ -247,7 +232,8 @@ def interface():
|
|
| 247 |
gr.Examples(
|
| 248 |
label="Examples",
|
| 249 |
examples=examples,
|
| 250 |
-
inputs=[pil_subject_A_image, pil_subject_A_mask, pil_subject_B_image, pil_subject_B_mask, prompt,
|
|
|
|
| 251 |
)
|
| 252 |
|
| 253 |
|
|
@@ -262,24 +248,6 @@ def set_image_seg_finished():
|
|
| 262 |
return gr.update(visible=True, value="<h3>Subject mask ready!</h3>")
|
| 263 |
|
| 264 |
|
| 265 |
-
def set_prompt_extend_unfinished():
|
| 266 |
-
return gr.update(
|
| 267 |
-
visible=True,
|
| 268 |
-
value="<h3>(Unfinished) Rewriting your prompt... ✍️</h3>",
|
| 269 |
-
)
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
def set_prompt_extend_finished():
|
| 273 |
-
return gr.update(visible=True, value="<h3>Prompt expanded successfully!</h3>")
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
def set_prompt_translate_unfinished():
|
| 277 |
-
return gr.update(
|
| 278 |
-
visible=True,
|
| 279 |
-
value="<h3>(Unfinished) Preprocessing...</h3>",
|
| 280 |
-
)
|
| 281 |
-
|
| 282 |
-
|
| 283 |
def set_image_generate_unfinished():
|
| 284 |
return gr.update(
|
| 285 |
visible=True,
|
|
@@ -301,7 +269,7 @@ if __name__ == "__main__":
|
|
| 301 |
|
| 302 |
<a href='https://aigcdesigngroup.github.io/AnyStory/'><img src='https://img.shields.io/badge/Project_Page-AnyStory-green' alt='Project Page'></a>
|
| 303 |
|
| 304 |
-
<a href='https://modelscope.cn/studios/
|
| 305 |
</div>
|
| 306 |
</br>
|
| 307 |
</div>
|
|
@@ -313,7 +281,7 @@ if __name__ == "__main__":
|
|
| 313 |
"""
|
| 314 |
|
| 315 |
description = r"""🚀🚀🚀 Quick Start:<br>
|
| 316 |
-
1. Upload subject reference images (clean background; real human IDs unsupported for now), Add prompts (
|
| 317 |
2. (Recommended) Click "<b>Segment Subject</b>" to create masks (or upload your own B&W masks) for subjects. This helps the model better reference the subject you specify (otherwise, we will perform automatic detection). 🤗<br>
|
| 318 |
"""
|
| 319 |
|
|
|
|
| 10 |
import numpy as np
|
| 11 |
from PIL import Image
|
| 12 |
|
|
|
|
| 13 |
from src.anystory import call_anystory
|
| 14 |
from src.matting import ImageUniversalMatting
|
| 15 |
from src.util import upload_pil_2_oss
|
|
|
|
| 111 |
|
| 112 |
with gr.Group():
|
| 113 |
prompt = gr.Textbox(value="", label='Prompt', lines=6, show_label=True)
|
|
|
|
|
|
|
| 114 |
|
| 115 |
with gr.Column(scale=1, min_width=100):
|
| 116 |
result_gallery = gr.Image(type="pil", label="Generated Image", visible=True, height=450)
|
|
|
|
| 136 |
fn=set_image_seg_finished, outputs=generated_information
|
| 137 |
)
|
| 138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
run_button.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
fn=set_image_generate_unfinished, outputs=generated_information
|
| 141 |
).then(
|
| 142 |
fn=process,
|
| 143 |
+
inputs=[pil_subject_A_image, pil_subject_A_mask, pil_subject_B_image, pil_subject_B_mask, prompt],
|
| 144 |
outputs=[result_gallery]
|
| 145 |
).then(
|
| 146 |
fn=set_image_generate_finished, outputs=generated_information
|
|
|
|
| 232 |
gr.Examples(
|
| 233 |
label="Examples",
|
| 234 |
examples=examples,
|
| 235 |
+
inputs=[pil_subject_A_image, pil_subject_A_mask, pil_subject_B_image, pil_subject_B_mask, prompt,
|
| 236 |
+
result_gallery],
|
| 237 |
)
|
| 238 |
|
| 239 |
|
|
|
|
| 248 |
return gr.update(visible=True, value="<h3>Subject mask ready!</h3>")
|
| 249 |
|
| 250 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
def set_image_generate_unfinished():
|
| 252 |
return gr.update(
|
| 253 |
visible=True,
|
|
|
|
| 269 |
|
| 270 |
<a href='https://aigcdesigngroup.github.io/AnyStory/'><img src='https://img.shields.io/badge/Project_Page-AnyStory-green' alt='Project Page'></a>
|
| 271 |
|
| 272 |
+
<a href='https://modelscope.cn/studios/iic/AnyStory'><img src='https://img.shields.io/badge/Demo-ModelScope-blue'></a>
|
| 273 |
</div>
|
| 274 |
</br>
|
| 275 |
</div>
|
|
|
|
| 281 |
"""
|
| 282 |
|
| 283 |
description = r"""🚀🚀🚀 Quick Start:<br>
|
| 284 |
+
1. Upload subject reference images (clean background; real human IDs unsupported for now), Add prompts (only EN supported), and Click "<b>RUN</b>".<br>
|
| 285 |
2. (Recommended) Click "<b>Segment Subject</b>" to create masks (or upload your own B&W masks) for subjects. This helps the model better reference the subject you specify (otherwise, we will perform automatic detection). 🤗<br>
|
| 286 |
"""
|
| 287 |
|
requirements.txt
CHANGED
|
@@ -2,3 +2,4 @@ oss2
|
|
| 2 |
phidata
|
| 3 |
dashscope
|
| 4 |
tensorflow==2.15
|
|
|
|
|
|
| 2 |
phidata
|
| 3 |
dashscope
|
| 4 |
tensorflow==2.15
|
| 5 |
+
openai
|