Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
########################################################################################################
|
| 2 |
+
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
|
| 3 |
+
########################################################################################################
|
| 4 |
+
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from huggingface_hub import hf_hub_download
|
| 8 |
+
import gc
|
| 9 |
+
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
| 10 |
+
import types, torch, copy, time
|
| 11 |
+
from typing import List
|
| 12 |
+
# torch.backends.cudnn.benchmark = True
|
| 13 |
+
# torch.backends.cudnn.allow_tf32 = True
|
| 14 |
+
# torch.backends.cuda.matmul.allow_tf32 = True
|
| 15 |
+
# torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = True
|
| 16 |
+
# torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = True
|
| 17 |
+
torch._C._jit_set_autocast_mode(False)
|
| 18 |
+
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torch.nn import functional as F
|
| 21 |
+
|
| 22 |
+
MyModule = torch.jit.ScriptModule
|
| 23 |
+
MyFunction = torch.jit.script_method
|
| 24 |
+
MyStatic = torch.jit.script
|
| 25 |
+
pipeline = PIPELINE(None, "rwkv_vocab_v20230424")
|
| 26 |
+
|
| 27 |
+
########################################################################################################
|
| 28 |
+
|
| 29 |
+
print('\nNOTE: this is very inefficient (loads all weights to VRAM, and slow KV cache). better method is to prefetch DeepEmbed from RAM/SSD\n')
|
| 30 |
+
|
| 31 |
+
args = types.SimpleNamespace()
|
| 32 |
+
model_path = hf_hub_download(repo_id='Alic-Li/RWKV_v7_G1_Translate_ctx4096_20250620', filename='RWKV_v7s_G1_DEA_0.1B_Translate_ctx4096_20250917_latest.pth')
|
| 33 |
+
args.MODEL_NAME = model_path
|
| 34 |
+
args.n_layer = 12
|
| 35 |
+
args.n_embd = 768
|
| 36 |
+
args.vocab_size = 65536
|
| 37 |
+
args.head_size = 64
|
| 38 |
+
ctx_limit = 4096
|
| 39 |
+
gen_limit = 4096
|
| 40 |
+
penalty_decay = 0.996
|
| 41 |
+
NUM_TRIALS = 1
|
| 42 |
+
LENGTH_PER_TRIAL = 500
|
| 43 |
+
TEMPERATURE = 1.0
|
| 44 |
+
TOP_P = 0.0
|
| 45 |
+
DTYPE = torch.half
|
| 46 |
+
from torch.utils.cpp_extension import load
|
| 47 |
+
HEAD_SIZE = args.head_size
|
| 48 |
+
# ROCm_flag = torch.version.hip is not None
|
| 49 |
+
# if ROCm_flag:
|
| 50 |
+
# load(name="wkv7s", sources=["cuda/wkv7s_op.cpp", f"cuda/wkv7s.cu"], is_python_module=False,
|
| 51 |
+
# verbose=True, extra_cuda_cflags=["-xhip", "-fopenmp", "-ffast-math", "-O3", "-munsafe-fp-atomics", f"-D_N_={HEAD_SIZE}"])
|
| 52 |
+
# else:
|
| 53 |
+
# load(name="wkv7s", sources=["cuda/wkv7s_op.cpp", f"cuda/wkv7s.cu"], is_python_module=False,
|
| 54 |
+
# verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"])
|
| 55 |
+
class WKV_7(torch.autograd.Function):
|
| 56 |
+
@staticmethod
|
| 57 |
+
def forward(ctx, state, r, w, k, v, a, b):
|
| 58 |
+
with torch.no_grad():
|
| 59 |
+
T, C = r.size()
|
| 60 |
+
H = C // HEAD_SIZE
|
| 61 |
+
N = HEAD_SIZE
|
| 62 |
+
assert HEAD_SIZE == C // H
|
| 63 |
+
assert all(x.dtype == DTYPE for x in [r,w,k,v,a,b])
|
| 64 |
+
assert all(x.is_contiguous() for x in [r,w,k,v,a,b])
|
| 65 |
+
y = torch.empty((T, C), device=k.device, dtype=DTYPE, requires_grad=False, memory_format=torch.contiguous_format)
|
| 66 |
+
torch.ops.wkv7s.forward(1, T, C, H, state, r, w, k, v, a, b, y)
|
| 67 |
+
return y
|
| 68 |
+
def RWKV7_OP(state, r, w, k, v, a, b):
|
| 69 |
+
return WKV_7.apply(state, r, w, k, v, a, b)
|
| 70 |
+
|
| 71 |
+
########################################################################################################
|
| 72 |
+
|
| 73 |
+
class RWKV_x070(MyModule):
|
| 74 |
+
def __init__(self, args):
|
| 75 |
+
super().__init__()
|
| 76 |
+
self.args = args
|
| 77 |
+
self.n_embd = args.n_embd
|
| 78 |
+
self.n_layer = args.n_layer
|
| 79 |
+
self.eval()
|
| 80 |
+
|
| 81 |
+
self.z = torch.load(args.MODEL_NAME, map_location='cpu')
|
| 82 |
+
z = self.z
|
| 83 |
+
self.n_head, self.head_size = z['blocks.0.att.r_k'].shape
|
| 84 |
+
|
| 85 |
+
keys = list(z.keys())
|
| 86 |
+
for k in keys:
|
| 87 |
+
if 'key.weight' in k or 'value.weight' in k or 'receptance.weight' in k or 'output.weight' in k or 'head.weight' in k or 'qq.weight' in k:
|
| 88 |
+
z[k] = z[k].t()
|
| 89 |
+
z[k] = z[k].squeeze().to(dtype=DTYPE)
|
| 90 |
+
if k.endswith('att.r_k'): z[k] = z[k].flatten()
|
| 91 |
+
assert self.head_size == args.head_size
|
| 92 |
+
|
| 93 |
+
z['emb.weight'] = F.layer_norm(z['emb.weight'], (args.n_embd,), weight=z['blocks.0.ln0.weight'], bias=z['blocks.0.ln0.bias'])
|
| 94 |
+
|
| 95 |
+
for i in range(self.n_layer): # !!! merge emb residual !!!
|
| 96 |
+
z[f'blocks.{i}.ffn.s_emb.weight'] = z[f'blocks.{i}.ffn.s_emb.weight'] + z['emb.weight'] @ z[f'blocks.{i}.ffn.s_emb_x.weight'].t()
|
| 97 |
+
z[f'blocks.{i}.qkv.k_emb.weight'] = z[f'blocks.{i}.qkv.k_emb.weight'] + z['emb.weight'] @ z[f'blocks.{i}.qkv.k_emb_x.weight'].t()
|
| 98 |
+
z[f'blocks.{i}.qkv.v_emb.weight'] = z[f'blocks.{i}.qkv.v_emb.weight'] + z['emb.weight'] @ z[f'blocks.{i}.qkv.v_emb_x.weight'].t()
|
| 99 |
+
|
| 100 |
+
z['blocks.0.att.v0'] = z['blocks.0.att.a0'] # actually ignored
|
| 101 |
+
z['blocks.0.att.v1'] = z['blocks.0.att.a1'] # actually ignored
|
| 102 |
+
z['blocks.0.att.v2'] = z['blocks.0.att.a2'] # actually ignored
|
| 103 |
+
|
| 104 |
+
def forward(self, idx, state, full_output=False):
|
| 105 |
+
if state == None:
|
| 106 |
+
state = [None for _ in range(args.n_layer * 3 + 37)] # with KV cache etc.
|
| 107 |
+
for i in range(args.n_layer): # state: 0=att_x_prev 1=att_kv 2=ffn_x_prev
|
| 108 |
+
state[i*3+0] = torch.zeros(args.n_embd, dtype=DTYPE, requires_grad=False, device="cpu")
|
| 109 |
+
state[i*3+1] = torch.zeros((args.n_embd // args.head_size, args.head_size, args.head_size), dtype=torch.float, requires_grad=False, device="cpu")
|
| 110 |
+
state[i*3+2] = torch.zeros(args.n_embd, dtype=DTYPE, requires_grad=False, device="cpu")
|
| 111 |
+
state[args.n_layer*3+0] = torch.empty((0), dtype=torch.int, requires_grad=False, device="cpu") # token idx cache
|
| 112 |
+
for i in range(1,1+24): # kv cache = 12*2*32 numbers per token
|
| 113 |
+
state[args.n_layer*3+i] = torch.empty((0,32), dtype=DTYPE, requires_grad=False, device="cpu")
|
| 114 |
+
for i in range(1+24,1+36): # token-shift cache for Q in DEA
|
| 115 |
+
state[args.n_layer*3+i] = torch.zeros(256, dtype=DTYPE, requires_grad=False, device="cpu")
|
| 116 |
+
|
| 117 |
+
if type(idx) is list:
|
| 118 |
+
if len(idx) > 1:
|
| 119 |
+
return self.forward_seq(idx, state, full_output)
|
| 120 |
+
else:
|
| 121 |
+
# return self.forward_one(idx[0], state) # sorry too busy to add forward_one mode
|
| 122 |
+
return self.forward_seq(idx, state, full_output)
|
| 123 |
+
else:
|
| 124 |
+
# return self.forward_one(idx, state) # sorry too busy to add forward_one mode
|
| 125 |
+
return self.forward_seq([idx], state, full_output)
|
| 126 |
+
|
| 127 |
+
@MyFunction
|
| 128 |
+
def forward_seq(self, idx:List[int], state:List[torch.Tensor], full_output:bool=False):
|
| 129 |
+
with torch.no_grad():
|
| 130 |
+
z = self.z
|
| 131 |
+
x = z['emb.weight'][idx]
|
| 132 |
+
state[self.n_layer*3] = torch.cat((state[self.n_layer*3], torch.tensor(idx, dtype=torch.int, device=x.device)), dim=0)
|
| 133 |
+
ctx = state[self.n_layer*3]
|
| 134 |
+
|
| 135 |
+
v_first = torch.empty_like(x)
|
| 136 |
+
for i in range(self.n_layer):
|
| 137 |
+
bbb = f'blocks.{i}.'
|
| 138 |
+
att = f'blocks.{i}.att.'
|
| 139 |
+
ffn = f'blocks.{i}.ffn.'
|
| 140 |
+
|
| 141 |
+
qkv = f'blocks.{i}.qkv.'
|
| 142 |
+
q = x @ z[qkv+'qq.weight']
|
| 143 |
+
k = x @ z[qkv+'k1']
|
| 144 |
+
state[self.n_layer*3+1+i*2] = torch.cat((state[self.n_layer*3+1+i*2], k), dim=0)
|
| 145 |
+
k = (state[self.n_layer*3+1+i*2] @ z[qkv+'k2']) * (z[qkv+'k_emb.weight'][ctx])
|
| 146 |
+
v = x @ z[qkv+'v1']
|
| 147 |
+
state[self.n_layer*3+1+i*2+1] = torch.cat((state[self.n_layer*3+1+i*2+1], v), dim=0)
|
| 148 |
+
v = torch.tanh(state[self.n_layer*3+1+i*2+1] @ z[qkv+'v2']) * (z[qkv+'v_emb.weight'][ctx])
|
| 149 |
+
qq = torch.cat((state[self.n_layer*3+1+24+i].unsqueeze(0), q[:-1,:]))
|
| 150 |
+
state[self.n_layer*3+1+24+i] = q[-1,:]
|
| 151 |
+
q = q + (qq - q) * z[qkv+'x_q']
|
| 152 |
+
k = k + (F.pad(k,(0, 0, 1, -1)) - k) * z[qkv+'x_k']
|
| 153 |
+
v = v + (F.pad(v,(0, 0, 1, -1)) - v) * z[qkv+'x_v']
|
| 154 |
+
q = F.layer_norm(q, (256,), weight=z[qkv+'lnq.weight'], bias=z[qkv+'lnq.bias'])
|
| 155 |
+
k = F.layer_norm(k, (256,), weight=z[qkv+'lnk.weight'], bias=z[qkv+'lnk.bias'])
|
| 156 |
+
v = F.layer_norm(v, (self.n_embd,), weight=z[qkv+'lnv.weight'], bias=z[qkv+'lnv.bias'])
|
| 157 |
+
scores = 64 * torch.tanh((q @ k.mT) * (1.0 / 1024.0)) # using soft-cap
|
| 158 |
+
if len(idx) > 1:
|
| 159 |
+
mask = ~torch.tril(torch.ones(len(ctx), len(ctx), dtype=torch.bool, device=x.device))[-len(idx):,:]
|
| 160 |
+
scores = scores.masked_fill(mask, float('-inf'))
|
| 161 |
+
qkv = scores.softmax(dim=-1) @ v
|
| 162 |
+
|
| 163 |
+
xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln1.weight'], bias=z[bbb+'ln1.bias'])
|
| 164 |
+
|
| 165 |
+
xx, state[i*3+0], state[i*3+1], v_first = RWKV_x070_TMix_seq(i, self.n_head, self.head_size, xx, state[i*3+0], v_first, state[i*3+1],
|
| 166 |
+
z[att+'x_r'], z[att+'x_w'], z[att+'x_k'], z[att+'x_v'], z[att+'x_a'], z[att+'x_g'],
|
| 167 |
+
z[att+'w0'], z[att+'w1'], z[att+'w2'], z[att+'a0'], z[att+'a1'], z[att+'a2'], z[att+'v0'], z[att+'v1'], z[att+'v2'],
|
| 168 |
+
z[att+'g1'], z[att+'g2'], z[att+'k_k'], z[att+'k_a'], z[att+'r_k'],
|
| 169 |
+
z[att+'receptance.weight'], z[att+'key.weight'], z[att+'value.weight'], z[att+'output.weight'],
|
| 170 |
+
z[att+'ln_x.weight'], z[att+'ln_x.bias'])
|
| 171 |
+
x = x + xx + qkv
|
| 172 |
+
|
| 173 |
+
xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln2.weight'], bias=z[bbb+'ln2.bias'])
|
| 174 |
+
|
| 175 |
+
xx, state[i*3+2] = RWKV_x070_CMix_seq(xx, state[i*3+2], z[ffn+'x_k'], z[ffn+'key.weight'], z[ffn+'value.weight'], z[ffn+'s_emb.weight'][idx], z[ffn+'s1'], z[ffn+'s2'], z[ffn+'s0'])
|
| 176 |
+
x = x + xx
|
| 177 |
+
|
| 178 |
+
if not full_output: x = x[-1,:]
|
| 179 |
+
x = F.layer_norm(x, (self.n_embd,), weight=z['ln_out.weight'], bias=z['ln_out.bias'])
|
| 180 |
+
x = x @ z['head.weight']
|
| 181 |
+
return x, state
|
| 182 |
+
|
| 183 |
+
########################################################################################################
|
| 184 |
+
|
| 185 |
+
@MyStatic
|
| 186 |
+
def RWKV_x070_TMix_seq(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
|
| 187 |
+
T = x.shape[0]
|
| 188 |
+
xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
|
| 189 |
+
xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
|
| 190 |
+
|
| 191 |
+
r = xr @ R_
|
| 192 |
+
w = torch.tanh(xw @ w1) @ w2
|
| 193 |
+
k = xk @ K_
|
| 194 |
+
v = xv @ V_
|
| 195 |
+
a = torch.sigmoid(a0 + (xa @ a1) @ a2)
|
| 196 |
+
g = torch.sigmoid(xg @ g1) @ g2
|
| 197 |
+
|
| 198 |
+
kk = torch.nn.functional.normalize((k * k_k).view(T,H,N), dim=-1, p=2.0).view(T,H*N)
|
| 199 |
+
k = k * (1 + (a-1) * k_a)
|
| 200 |
+
if layer_id == 0: v_first = v
|
| 201 |
+
else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
|
| 202 |
+
|
| 203 |
+
######## cuda-free method
|
| 204 |
+
w = torch.exp(-0.606531 * torch.sigmoid((w0 + w).float())) # 0.606531 = exp(-0.5)
|
| 205 |
+
for t in range(T):
|
| 206 |
+
r_, w_, k_, v_, kk_, a_ = r[t], w[t], k[t], v[t], kk[t], a[t]
|
| 207 |
+
vk = v_.view(H,N,1) @ k_.view(H,1,N)
|
| 208 |
+
ab = (-kk_).view(H,N,1) @ (kk_*a_).view(H,1,N)
|
| 209 |
+
state = state * w_.view(H,1,N) + state @ ab.float() + vk.float()
|
| 210 |
+
xx[t] = (state.to(dtype=x.dtype) @ r_.view(H,N,1)).view(H*N)
|
| 211 |
+
|
| 212 |
+
# w = -torch.nn.functional.softplus(-(w0 + w)) - 0.5
|
| 213 |
+
# xx = RWKV7_OP(state, r, w, k, v, -kk, kk*a)
|
| 214 |
+
|
| 215 |
+
xx = torch.nn.functional.group_norm(xx.view(T,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(T,H*N)
|
| 216 |
+
xx = xx + ((r * k * r_k).view(T,H,N).sum(dim=-1, keepdim=True) * v.view(T,H,N)).view(T,H*N)
|
| 217 |
+
return (xx * g) @ O_, x[-1,:], state, v_first
|
| 218 |
+
|
| 219 |
+
########################################################################################################
|
| 220 |
+
|
| 221 |
+
@MyStatic
|
| 222 |
+
def RWKV_x070_CMix_seq(x, x_prev, x_k, K_, V_, semb_, s1_, s2_, s0_):
|
| 223 |
+
T,C = x.shape
|
| 224 |
+
xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
|
| 225 |
+
k = x + xx * x_k
|
| 226 |
+
k = torch.relu(k @ K_) ** 2
|
| 227 |
+
ss = (x @ s1_).view(T,1,32) @ semb_.view(T,32,32)
|
| 228 |
+
k = k * ((ss.view(T,32) @ s2_) + s0_)
|
| 229 |
+
return k @ V_, x[-1,:]
|
| 230 |
+
|
| 231 |
+
model = RWKV_x070(args)
|
| 232 |
+
|
| 233 |
+
def evaluate(
|
| 234 |
+
ctx,
|
| 235 |
+
token_count=200,
|
| 236 |
+
temperature=0.0,
|
| 237 |
+
top_p=0.0,
|
| 238 |
+
presencePenalty = 0.0,
|
| 239 |
+
countPenalty = 0.0,
|
| 240 |
+
):
|
| 241 |
+
args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
|
| 242 |
+
alpha_frequency = countPenalty,
|
| 243 |
+
alpha_presence = presencePenalty,
|
| 244 |
+
token_ban = [], # ban the generation of some tokens
|
| 245 |
+
token_stop = [0]) # stop generation whenever you see any token here
|
| 246 |
+
ctx = ctx.strip()
|
| 247 |
+
all_tokens = []
|
| 248 |
+
out_last = 0
|
| 249 |
+
out_str = ''
|
| 250 |
+
occurrence = {}
|
| 251 |
+
state = None
|
| 252 |
+
for i in range(int(token_count)):
|
| 253 |
+
|
| 254 |
+
input_ids = pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token]
|
| 255 |
+
out, state = model.forward(input_ids, state)
|
| 256 |
+
for n in occurrence:
|
| 257 |
+
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
| 258 |
+
|
| 259 |
+
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
|
| 260 |
+
if token in args.token_stop:
|
| 261 |
+
break
|
| 262 |
+
all_tokens += [token]
|
| 263 |
+
for xxx in occurrence:
|
| 264 |
+
occurrence[xxx] *= penalty_decay
|
| 265 |
+
|
| 266 |
+
ttt = pipeline.decode([token])
|
| 267 |
+
www = 1
|
| 268 |
+
if ttt in ' \t0123456789':
|
| 269 |
+
www = 0
|
| 270 |
+
#elif ttt in '\r\n,.;?!"\':+-*/=#@$%^&_`~|<>\\()[]{},。;“”:?!()【】':
|
| 271 |
+
# www = 0.5
|
| 272 |
+
if token not in occurrence:
|
| 273 |
+
occurrence[token] = www
|
| 274 |
+
else:
|
| 275 |
+
occurrence[token] += www
|
| 276 |
+
|
| 277 |
+
tmp = pipeline.decode(all_tokens[out_last:])
|
| 278 |
+
if '\ufffd' not in tmp:
|
| 279 |
+
out_str += tmp
|
| 280 |
+
yield out_str.strip()
|
| 281 |
+
out_last = i + 1
|
| 282 |
+
del out
|
| 283 |
+
del state
|
| 284 |
+
gc.collect()
|
| 285 |
+
# torch.cuda.empty_cache()
|
| 286 |
+
yield out_str.strip()
|
| 287 |
+
|
| 288 |
+
def translate_english_to_chinese(english_text, token_count, temperature, top_p, presence_penalty, count_penalty):
|
| 289 |
+
if not english_text.strip():
|
| 290 |
+
return "Chinese:\n请输入英文内容。"
|
| 291 |
+
|
| 292 |
+
full_prompt = f"English: {english_text}\n\nChinese:"
|
| 293 |
+
for output in evaluate(full_prompt, token_count, temperature, top_p, presence_penalty, count_penalty):
|
| 294 |
+
yield output
|
| 295 |
+
|
| 296 |
+
def translate_chinese_to_chinses(Chinese_text, token_count, temperature, top_p, presence_penalty, count_penalty):
|
| 297 |
+
if not Chinese_text.strip():
|
| 298 |
+
return "Chinses:\n请输入中文内容。"
|
| 299 |
+
|
| 300 |
+
full_prompt = f"Chinese: {Chinese_text}\n\nEnglish:"
|
| 301 |
+
for output in evaluate(full_prompt, token_count, temperature, top_p, presence_penalty, count_penalty):
|
| 302 |
+
yield output
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
with gr.Blocks(title="RWKV_v7s_G1_DEA_0.1B_Translate_ctx4096_20250917 English -> Chinese") as demo:
|
| 306 |
+
with gr.Tab("English To Chinses"):
|
| 307 |
+
gr.HTML(f"<div style='text-align:center;'><h1>RWKV_v7s_G1_DEA_0.1B_Translate_ctx4096_20250917 English -> Chinese</h1></div>")
|
| 308 |
+
with gr.Row():
|
| 309 |
+
with gr.Column():
|
| 310 |
+
english_input = gr.Textbox(
|
| 311 |
+
label="英文输入(注意不能有空行)",
|
| 312 |
+
lines=20,
|
| 313 |
+
placeholder="请输入英文内容...",
|
| 314 |
+
value="ROCm is an open-source stack, composed primarily of open-source software, designed for graphics processing unit (GPU) computation. ROCm consists of a collection of drivers, development tools, and APIs that enable GPU programming from low-level kernel to end-user applications.\n"
|
| 315 |
+
"With ROCm, you can customize your GPU software to meet your specific needs.You can develop, collaborate, test, and deploy your applications in a free, open source, integrated, and secure software ecosystem. ROCm is particularly well-suited to GPU-accelerated high-performance computing (HPC), artificial intelligence (AI), scientific computing, and computer aided design (CAD).\n"
|
| 316 |
+
"ROCm is powered by AMD’s Heterogeneous-computing Interface for Portability (HIP), an open-source software C++ GPU programming environment and its corresponding runtime. HIP allows ROCm developers to create portable applications on different platforms by deploying code on a range of platforms, from dedicated gaming GPUs to exascale HPC clusters.\n"
|
| 317 |
+
"ROCm supports programming models, such as OpenMP and OpenCL, and includes all necessary open source software compilers, debuggers, and libraries. ROCm is fully integrated into machine learning (ML) frameworks, such as PyTorch and TensorFlow."
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
with gr.Column():
|
| 321 |
+
chinese_output = gr.Textbox(
|
| 322 |
+
label="中文输出",
|
| 323 |
+
lines=20,
|
| 324 |
+
placeholder="翻译结果将显示在此处",
|
| 325 |
+
value=""
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
with gr.Row():
|
| 329 |
+
translate_btn = gr.Button("Translate", variant="primary")
|
| 330 |
+
clear_btn = gr.Button("Clear", variant="secondary")
|
| 331 |
+
stop_btn = gr.Button("Stop", variant="stop")
|
| 332 |
+
|
| 333 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 334 |
+
token_count = gr.Slider(10, gen_limit, label="Max Tokens", step=10, value=gen_limit)
|
| 335 |
+
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
|
| 336 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0)
|
| 337 |
+
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
|
| 338 |
+
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0)
|
| 339 |
+
|
| 340 |
+
translate_event = translate_btn.click(
|
| 341 |
+
fn=translate_english_to_chinese,
|
| 342 |
+
inputs=[english_input, token_count, temperature, top_p, presence_penalty, count_penalty],
|
| 343 |
+
outputs=[chinese_output]
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
clear_btn.click(
|
| 347 |
+
fn=lambda: ("", ""),
|
| 348 |
+
inputs=[],
|
| 349 |
+
outputs=[english_input, chinese_output]
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
stop_btn.click(
|
| 353 |
+
fn=None,
|
| 354 |
+
inputs=None,
|
| 355 |
+
outputs=None,
|
| 356 |
+
cancels=[translate_event]
|
| 357 |
+
)
|
| 358 |
+
with gr.Tab("Chinses To English"):
|
| 359 |
+
gr.HTML(f"<div style='text-align:center;'><h1>RWKV_v7s_G1_DEA_0.1B_Translate_ctx4096_20250917 Chinses -> English</h1></div>")
|
| 360 |
+
with gr.Row():
|
| 361 |
+
with gr.Column():
|
| 362 |
+
chinese_input = gr.Textbox(
|
| 363 |
+
label="中文输入(注意不能有空行)",
|
| 364 |
+
lines=20,
|
| 365 |
+
placeholder="请输入中文内容...",
|
| 366 |
+
value="ROCm是一个开源栈,主要由开源软件组成,旨在用于图形处理单元(GPU)计算。ROCm由一系列驱动程序、开发工具和API组成,这些工具和API允许从低级内核到最终用户应用程序对GPU进行编程。"
|
| 367 |
+
"使用ROCm,您可以根据您的特定需求定制GPU软件。您可以在一个免费、开源、集成和安全的软件生态系统中开发、协作、测试和部署应用程序。ROCm特别适合GPU加速的高性能计算(HPC)、人工智能(AI)、科学计算和计算机辅助设计(CAD)。"
|
| 368 |
+
"ROCm由AMD的可移植性图形处理接口(HIP)驱动,这是一个开源的C++ GPU编程环境及其相应的运行时。HIP允许ROCm开发者在不同平台上创建可移植应用程序,通过在从专用游戏GPU到exascale HPC集群的各种平台上部署代码来实现这一目标。"
|
| 369 |
+
"ROCm支持编程模型,如OpenMP和OpenCL,并包含所有必要的开源软件编译器、调试器和库。ROCm完全集成到机器学习(ML)框架中,如PyTorch和TensorFlow。"
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
with gr.Column():
|
| 373 |
+
english_output = gr.Textbox(
|
| 374 |
+
label="英文输出",
|
| 375 |
+
lines=20,
|
| 376 |
+
placeholder="翻译结果将显示在此处",
|
| 377 |
+
value=""
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
with gr.Row():
|
| 381 |
+
translate_btn = gr.Button("Translate", variant="primary")
|
| 382 |
+
clear_btn = gr.Button("Clear", variant="secondary")
|
| 383 |
+
stop_btn = gr.Button("Stop", variant="stop")
|
| 384 |
+
|
| 385 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 386 |
+
token_count = gr.Slider(10, gen_limit, label="Max Tokens", step=10, value=gen_limit)
|
| 387 |
+
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
|
| 388 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0)
|
| 389 |
+
presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
|
| 390 |
+
count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0)
|
| 391 |
+
|
| 392 |
+
translate_event = translate_btn.click(
|
| 393 |
+
fn=translate_chinese_to_chinses,
|
| 394 |
+
inputs=[chinese_input, token_count, temperature, top_p, presence_penalty, count_penalty],
|
| 395 |
+
outputs=[english_output]
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
clear_btn.click(
|
| 399 |
+
fn=lambda: ("", ""),
|
| 400 |
+
inputs=[],
|
| 401 |
+
outputs=[chinese_input, english_output]
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
stop_btn.click(
|
| 405 |
+
fn=None,
|
| 406 |
+
inputs=None,
|
| 407 |
+
outputs=None,
|
| 408 |
+
cancels=[translate_event]
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
demo.queue(max_size=10, default_concurrency_limit=1)
|
| 412 |
+
demo.launch()
|