goblingeorge commited on
Commit
2ee2335
·
1 Parent(s): c804271

add application

Browse files
Files changed (3) hide show
  1. README.md +3 -0
  2. app.py +100 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -8,6 +8,9 @@ sdk_version: 5.49.1
8
  app_file: app.py
9
  pinned: false
10
  short_description: Llama3-TAIDE-LX-8B-Chat-Alpha1 demo
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  app_file: app.py
9
  pinned: false
10
  short_description: Llama3-TAIDE-LX-8B-Chat-Alpha1 demo
11
+ models:
12
+ - taide/Llama3-TAIDE-LX-8B-Chat-Alpha1
13
+ - taide/Llama3-TAIDE-LX-8B-Chat-Alpha1-GGUF
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import litellm
4
+
5
+ DESCRIPTION = '''
6
+ <div>
7
+ <h1 style="text-align: center;">TAIDE/Llama3-TAIDE-LX-8B-Chat-Alpha1</h1>
8
+ <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/taide/Llama3-TAIDE-LX-8B-Chat-Alpha1"><b>Llama3-TAIDE-LX-8B-Chat-Alpha1</b></a>. Llama3-TAIDE-LX-8B-Chat-Alpha1 is the new open LLM and comes in one sizes: 8b. Feel free to play with it, or duplicate to run privately!</p>
9
+ </div>
10
+ '''
11
+
12
+ LICENSE = """
13
+ <p/>
14
+ ---
15
+ Built with Llama3-TAIDE-LX-8B-Chat-Alpha1
16
+ """
17
+
18
+ css = """
19
+ h1 {
20
+ text-align: center;
21
+ display: block;
22
+ }
23
+ #duplicate-button {
24
+ margin: auto;
25
+ color: white;
26
+ background: #1565c0;
27
+ border-radius: 100vh;
28
+ }
29
+ """
30
+
31
+
32
+ def chat(message: str,
33
+ history: list,
34
+ temperature: float,
35
+ max_new_tokens: int
36
+ ) -> str:
37
+ """
38
+ Generate a streaming response using the llama3-8b model.
39
+ """
40
+ try:
41
+ messages = []
42
+ for user, assistant in history:
43
+ messages.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
44
+ messages.append({"role": "user", "content": message})
45
+
46
+ response = litellm.completion(
47
+ model="openai/Llama3-TAIDE-LX-8B-Chat-Alpha1", # tells litellm to call the model via the Responses API
48
+ messages=messages,
49
+ max_completion_tokens=max_new_tokens,
50
+ temperature=temperature,
51
+ stream=True,
52
+
53
+ )
54
+ output = []
55
+ for part in response:
56
+ content = part.choices[0].delta.content or ""
57
+ output.append(content)
58
+ yield "".join(output)
59
+ except Exception as e:
60
+ yield f"生成過程中發生錯誤: {str(e)}"
61
+
62
+
63
+ # Gradio block
64
+ chatbot = gr.Chatbot(height=450, label='Gradio ChatInterface')
65
+
66
+ with gr.Blocks(fill_height=True, css=css) as demo:
67
+
68
+ gr.Markdown(DESCRIPTION)
69
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
70
+ gr.ChatInterface(
71
+ fn=chat,
72
+ chatbot=chatbot,
73
+ fill_height=True,
74
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
75
+ additional_inputs=[
76
+ gr.Slider(minimum=0,
77
+ maximum=1,
78
+ step=0.1,
79
+ value=0.95,
80
+ label="Temperature",
81
+ render=False),
82
+ gr.Slider(minimum=128,
83
+ maximum=131584,
84
+ step=1,
85
+ value=512,
86
+ label="Max new tokens",
87
+ render=False),
88
+ ],
89
+ examples=[
90
+ ['請以以下內容為基礎,寫一篇文章:撰寫一篇作文,題目為《一張舊照片》,內容要求為:選擇一張令你印象深刻的照片,說明令你印象深刻的原因,並描述照片中的影像及背後的故事。記錄成長的過程、與他人的情景、環境變遷和美麗的景色。'],
91
+ ['請以品牌經理的身份,給廣告公司的創意總監寫一封信,提出對於新產品廣告宣傳活動的創意建議。'],
92
+ ['以下提供英文內容,請幫我翻譯成中文。Dongshan coffee is famous for its unique position, and the constant refinement of production methods. The flavor is admired by many caffeine afficionados.'],
93
+ ],
94
+ cache_examples=False,
95
+ )
96
+
97
+ gr.Markdown(LICENSE)
98
+
99
+ if __name__ == "__main__":
100
+ demo.launch(server_name='0.0.0.0')
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ spaces
2
+ gradio
3
+ litellm