Promots / app.py
wangshenzhuo's picture
Create app.py
87df57c verified
import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
libraries = ["transformers", "diffusers", "accelerate", "ftfy", "safetensors"]
for library in libraries:
try:
install(library)
print(f"{library} 安装成功")
except subprocess.CalledProcessError as e:
print(f"安装 {library} 时出错: {e}")
from prompt_to_prompt import prompt_to_prompt
from prompt_to_image import prompt_to_image
# 用户提供的简短文本描述
user_description = "a cute cat"
# 将简短描述转换为适合Stable Diffusion的提示词
generated_prompt = prompt_to_prompt(user_description)
print(f"Generated Prompt: {generated_prompt}")
# 使用提示词生成图像
image = prompt_to_image(generated_prompt)
# 保存生成的图像
image.save("generated_image.png")
print("Image saved as generated_image.png")
from diffusers import StableDiffusionPipeline
import torch
# 加载Stable Diffusion模型
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
# 将模型移动到GPU上(如果有可用的GPU)
pipe = pipe.to("cuda")
def prompt_to_image(prompt):
"""
使用提示词生成图像
:param prompt: 适合Stable Diffusion的提示词
:return: 生成的图像
"""
# 生成图像
image = pipe(prompt).images[0]
return image
from transformers import AutoModelForCausalLM, AutoTokenizer
# 选择一个大语言模型,这里以GPT - Neo 125M为例
model_name = "EleutherAI/gpt-neo-125M"
# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_name)
# 加载模型
model = AutoModelForCausalLM.from_pretrained(model_name)
def prompt_to_prompt(user_description):
"""
将用户的简短描述转换为适合Stable Diffusion的提示词
:param user_description: 用户提供的简短文本描述
:return: 适合Stable Diffusion的提示词
"""
# 将用户描述编码为模型可以接受的输入
input_ids = tokenizer.encode(user_description, return_tensors='pt')
# 生成文本
output = model.generate(input_ids, max_length=50)
# 将生成的文本解码为字符串
generated_prompt = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_prompt