| | from flask import Flask, request, jsonify |
| | from flask_cors import CORS |
| | import os |
| | from huggingface_hub import InferenceClient |
| | from io import BytesIO |
| | from PIL import Image |
| | import gradio as gr |
| |
|
| | |
| | myapp = Flask(__name__) |
| | CORS(myapp) |
| |
|
| | |
| | HF_TOKEN = os.environ.get("HF_TOKEN") |
| | client = InferenceClient(token=HF_TOKEN) |
| |
|
| | |
| | def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"): |
| | try: |
| | |
| | result_image = client.text_to_image(prompt=prompt, seed=seed, model=model) |
| | return result_image |
| | except Exception as e: |
| | print(f"Error generating image: {str(e)}") |
| | return None |
| |
|
| | |
| | def gradio_interface(prompt, seed, model_name): |
| | image = generate_image(prompt, seed, model_name) |
| |
|
| | if image: |
| | img_byte_arr = BytesIO() |
| | image.save(img_byte_arr, format='PNG') |
| | img_byte_arr.seek(0) |
| | return img_byte_arr |
| | else: |
| | return "Failed to generate image" |
| |
|
| | |
| | gr.Interface( |
| | fn=gradio_interface, |
| | inputs=[ |
| | gr.Textbox(label="Prompt", placeholder="Enter a text prompt", lines=2), |
| | gr.Number(label="Seed", value=1, precision=0), |
| | gr.Textbox(label="Model Name", value="prompthero/openjourney-v4", placeholder="Enter model name"), |
| | ], |
| | outputs="image", |
| | title="Image Generation with Hugging Face", |
| | description="Enter a prompt, seed, and model name to generate an image." |
| | ).launch() |
| |
|
| | |
| | if __name__ == "__main__": |
| | myapp.run(host='0.0.0.0', port=7860) |