yakki233 commited on
Commit
2811ef4
·
verified ·
1 Parent(s): 567056d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -10
app.py CHANGED
@@ -1,16 +1,90 @@
 
 
1
  import gradio as gr
2
 
3
- def echo(text):
4
- return f"你刚刚说的是:{text}"
 
 
 
 
5
 
6
- demo = gr.Interface(
7
- fn=echo,
8
- inputs=gr.Textbox(label="输入点什么试试"),
9
- outputs=gr.Textbox(label="模型回复(其实是回声而已)"),
10
- title="最简 Space 测试",
11
- description="如果你能看到这个界面并正常输入输出,说明 Space 运行没问题。"
12
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  if __name__ == "__main__":
15
- # 不要加任何多余参数,如 server_name, server_port, ssr_mode
16
  demo.launch()
 
1
+ import os
2
+ import requests
3
  import gradio as gr
4
 
5
+ # 从环境变量中读取你的 HF API Token
6
+ HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
7
+ if HF_API_TOKEN is None:
8
+ raise RuntimeError(
9
+ "环境变量 HF_API_TOKEN 未设置,请在 Space 的 Settings -> Variables 中添加一个名为 HF_API_TOKEN 的 Secret。"
10
+ )
11
 
12
+ # 想使用的模型 ID,可以自行替换为其他支持 Inference API 的模型
13
+ # 比如 "meta-llama/Llama-3.2-1B-Instruct"、"Qwen/Qwen2.5-1.5B-Instruct" 等
14
+ MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
15
+
16
+ API_URL = f"https://api-inference.huggingface.co/models/{MODEL_ID}"
17
+ HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
18
+
19
+
20
+ def query_hf_api(prompt: str, max_new_tokens: int = 256, temperature: float = 0.7) -> str:
21
+ payload = {
22
+ "inputs": prompt,
23
+ "parameters": {
24
+ "max_new_tokens": max_new_tokens,
25
+ "temperature": temperature,
26
+ "do_sample": True,
27
+ },
28
+ }
29
+ response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=120)
30
+ response.raise_for_status()
31
+ data = response.json()
32
+
33
+ # text-generation 类模型常见返回格式是 [{"generated_text": "..."}]
34
+ if isinstance(data, list) and len(data) > 0:
35
+ return data[0].get("generated_text", "").strip()
36
+ # 兜底:直接把返回内容转成字符串方便调试
37
+ return str(data)
38
+
39
+
40
+ def chat_fn(history, message, max_new_tokens, temperature):
41
+ # 简单地把历史对话拼成一个长 prompt
42
+ dialog = ""
43
+ if history:
44
+ for user_msg, bot_msg in history:
45
+ dialog += f"用户: {user_msg}\n助手: {bot_msg}\n"
46
+ dialog += f"用户: {message}\n助手:"
47
+
48
+ try:
49
+ output = query_hf_api(dialog, max_new_tokens=int(max_new_tokens), temperature=float(temperature))
50
+ except Exception as e:
51
+ output = f"[调用模型出错] {type(e).__name__}: {e}"
52
+
53
+ history = history + [(message, output)]
54
+ return history, ""
55
+
56
+
57
+ with gr.Blocks() as demo:
58
+ gr.Markdown(f"# 云端模型聊天 Demo\n使用模型:`{MODEL_ID}`(通过 Hugging Face Inference API)")
59
+
60
+ with gr.Row():
61
+ with gr.Column(scale=3):
62
+ chatbot = gr.Chatbot(label="对话", height=500)
63
+ msg = gr.Textbox(
64
+ label="你的问题",
65
+ placeholder="输入你想问的问题,回车或点击发送",
66
+ lines=2,
67
+ )
68
+ send_btn = gr.Button("发送")
69
+ clear_btn = gr.Button("清空对话")
70
+
71
+ with gr.Column(scale=1):
72
+ gr.Markdown("### 参数设置")
73
+ max_new_tokens = gr.Slider(16, 512, value=256, step=16, label="max_new_tokens")
74
+ temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature")
75
+
76
+ send_btn.click(
77
+ chat_fn,
78
+ inputs=[chatbot, msg, max_new_tokens, temperature],
79
+ outputs=[chatbot, msg],
80
+ )
81
+ msg.submit(
82
+ chat_fn,
83
+ inputs=[chatbot, msg, max_new_tokens, temperature],
84
+ outputs=[chatbot, msg],
85
+ )
86
+ clear_btn.click(lambda: ([], ""), None, [chatbot, msg])
87
 
88
  if __name__ == "__main__":
89
+ # 不要给 launch() 传额外参数,HF 会自己管理 host/port
90
  demo.launch()