File size: 3,423 Bytes
093bfef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import os
import litellm

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">TAIDE/Llama-3.1-TAIDE-LX-8B-Chat</h1>
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/taide/Llama-3.1-TAIDE-LX-8B-Chat"><b>Llama-3.1-TAIDE-LX-8B-Chat</b></a>. Llama-3.1-TAIDE-LX-8B-Chat is the new open LLM and comes in one sizes: 8b. Feel free to play with it, or duplicate to run privately!</p>
</div>
'''

LICENSE = """
<p/>
---
Built with Llama-3.1-TAIDE-LX-8B-Chat
"""

css = """
h1 {
  text-align: center;
  display: block;
}
#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""


def chat(message: str,
              history: list,
              temperature: float,
              max_new_tokens: int
             ) -> str:
    try:
        messages = []
        for user, assistant in history:
            messages.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
        messages.append({"role": "user", "content": message})

        response = litellm.completion(
            model="openai/Llama-3.1-TAIDE-LX-8B-Chat", # tells litellm to call the model via the Responses API
            messages=messages,
            max_completion_tokens=max_new_tokens,
            temperature=temperature,
            stream=True,

        )
        output = []
        for part in response:
            content = part.choices[0].delta.content or ""
            output.append(content)
            yield "".join(output)
    except Exception as e:
        yield f"生成過程中發生錯誤: {str(e)}"


# Gradio block
chatbot = gr.Chatbot(height=450, label='Gradio ChatInterface')

with gr.Blocks(fill_height=True, css=css) as demo:

    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    gr.ChatInterface(
        fn=chat,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(minimum=0,
                      maximum=1,
                      step=0.1,
                      value=0.95,
                      label="Temperature",
                      render=False),
            gr.Slider(minimum=128,
                      maximum=131584,
                      step=1,
                      value=512,
                      label="Max new tokens",
                      render=False),
        ],
        examples=[
            ['請以以下內容為基礎,寫一篇文章:撰寫一篇作文,題目為《一張舊照片》,內容要求為:選擇一張令你印象深刻的照片,說明令你印象深刻的原因,並描述照片中的影像及背後的故事。記錄成長的過程、與他人的情景、環境變遷和美麗的景色。'],
            ['請以品牌經理的身份,給廣告公司的創意總監寫一封信,提出對於新產品廣告宣傳活動的創意建議。'],
            ['以下提供英文內容,請幫我翻譯成中文。Dongshan coffee is famous for its unique position, and the constant refinement of production methods. The flavor is admired by many caffeine afficionados.'],
        ],
        cache_examples=False,
    )

    gr.Markdown(LICENSE)

if __name__ == "__main__":
    demo.launch(server_name='0.0.0.0')