| | import os |
| | import json |
| | import time |
| | import gradio as gr |
| | import google.generativeai as genai |
| | from huggingface_hub import HfApi, hf_hub_download |
| | from collections import deque |
| |
|
| | |
| | genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) |
| | hf_token = os.getenv("HF_TOKEN") |
| | hf_api = HfApi(token=hf_token) |
| |
|
| | |
| | REPO_ID = "Sakalti/Gemini_ai_chat" |
| | DATASET_FILE = "characters.jsonl" |
| |
|
| | |
| | model = genai.GenerativeModel(model_name='gemini-2.0-flash') |
| |
|
| | |
| | request_times = deque() |
| | REQUEST_LIMIT = 15 |
| | TIME_WINDOW = 60 |
| |
|
| | def is_rate_limited(): |
| | now = time.time() |
| | while request_times and now - request_times[0] > TIME_WINDOW: |
| | request_times.popleft() |
| | if len(request_times) >= REQUEST_LIMIT: |
| | return True |
| | else: |
| | request_times.append(now) |
| | return False |
| |
|
| | |
| | def fetch_characters(): |
| | try: |
| | file_path = hf_hub_download(repo_id=REPO_ID, filename=DATASET_FILE, repo_type="dataset", token=hf_token) |
| | with open(file_path, "r", encoding="utf-8") as f: |
| | return [json.loads(line) for line in f if line.strip()] |
| | except Exception as e: |
| | print(f"[ERROR] fetch_characters: {e}") |
| | return [] |
| |
|
| | |
| | def upload_character(name, prompt): |
| | characters = fetch_characters() |
| | characters.append({"name": name, "prompt": prompt}) |
| | temp_file = "temp_characters.jsonl" |
| | with open(temp_file, "w", encoding="utf-8") as f: |
| | for char in characters: |
| | f.write(json.dumps(char, ensure_ascii=False) + "\n") |
| | hf_api.upload_file( |
| | path_or_fileobj=temp_file, |
| | path_in_repo=DATASET_FILE, |
| | repo_id=REPO_ID, |
| | repo_type="dataset" |
| | ) |
| | os.remove(temp_file) |
| |
|
| | |
| | def generate_response(message, history, temperature, top_p, top_k, max_output_tokens, system_prompt): |
| | if is_rate_limited(): |
| | return "⚠️ 1分間に15回までです。しばらく待ってください。", history, history |
| |
|
| | gemini_history = [] |
| | if system_prompt: |
| | gemini_history.append({"role": "user", "parts": [f"以下の指示に従ってAIキャラとして振る舞ってください:\n{system_prompt}"]}) |
| | for user, bot in history: |
| | gemini_history.append({"role": "user", "parts": [user]}) |
| | gemini_history.append({"role": "model", "parts": [bot]}) |
| | gemini_history.append({"role": "user", "parts": [message]}) |
| |
|
| | response = model.generate_content( |
| | gemini_history, |
| | generation_config={ |
| | "temperature": temperature, |
| | "top_p": top_p, |
| | "top_k": top_k, |
| | "max_output_tokens": int(max_output_tokens), |
| | } |
| | ) |
| |
|
| | history.append((message, response.text)) |
| | return "", history, history |
| |
|
| | |
| | with gr.Blocks(theme='Sakalti/Eternalstar') as demo: |
| | gr.Markdown("## Gemini AIキャラクターチャット") |
| |
|
| | with gr.Tab("チャット"): |
| | chatbot = gr.Chatbot() |
| | msg = gr.Textbox(placeholder="メッセージを入力...") |
| | state = gr.State([]) |
| | system_prompt = gr.Textbox(label="キャラのシステムプロンプト", lines=4) |
| |
|
| | with gr.Row(): |
| | temperature = gr.Slider(0.0, 1.0, value=0.7, label="Temperature") |
| | top_p = gr.Slider(0.0, 1.0, value=0.9, label="Top-p") |
| | top_k = gr.Slider(1, 100, value=40, label="Top-k") |
| | max_output_tokens = gr.Number(value=1024, label="Max Output Tokens", precision=0) |
| |
|
| | msg.submit(generate_response, |
| | inputs=[msg, state, temperature, top_p, top_k, max_output_tokens, system_prompt], |
| | outputs=[msg, chatbot, state]) |
| |
|
| | with gr.Tab("キャラクター投稿"): |
| | char_name = gr.Textbox(label="キャラクター名") |
| | char_prompt = gr.Textbox(label="システムプロンプト", lines=5) |
| | submit_char = gr.Button("キャラクターを追加") |
| | char_status = gr.Textbox(label="ステータス", interactive=False) |
| |
|
| | def post_character(name, prompt): |
| | try: |
| | upload_character(name, prompt) |
| | return "キャラクターをアップロードしました。" |
| | except Exception as e: |
| | return f"失敗しました: {e}" |
| |
|
| | submit_char.click(post_character, inputs=[char_name, char_prompt], outputs=[char_status]) |
| |
|
| | with gr.Tab("キャラクター選択"): |
| | character_list = gr.Dropdown(choices=[], label="使用するキャラクターを選択") |
| |
|
| | def refresh_characters(): |
| | return gr.update(choices=[c["name"] for c in fetch_characters()]) |
| |
|
| | def load_prompt(name): |
| | for c in fetch_characters(): |
| | if c.get("name") == name: |
| | print(f"[DEBUG] 読み込み成功: {c}") |
| | return c.get("prompt", "") |
| | print("[DEBUG] キャラが見つかりません") |
| | return "" |
| |
|
| | character_list.change(load_prompt, inputs=[character_list], outputs=[system_prompt]) |
| | demo.load(refresh_characters, outputs=[character_list]) |
| |
|
| | demo.launch() |