|
|
|
|
|
from typing import List, Optional, Dict |
|
|
import json |
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
|
|
|
class NSAByteTokenizer(PreTrainedTokenizer): |
|
|
"""A simple byte-level tokenizer with fixed vocab size 256. |
|
|
|
|
|
- Encodes UTF-8 bytes of the input string as token ids 0..255. |
|
|
- No special tokens by default; EOS/PAD can be configured via special tokens map. |
|
|
- Decoding uses UTF-8 with replacement for invalid sequences. |
|
|
""" |
|
|
|
|
|
def __init__(self, **kwargs): |
|
|
|
|
|
self._vocab: Dict[str, int] = {f"<{i}>": i for i in range(256)} |
|
|
self._ids_to_tokens: Dict[int, str] = {i: f"<{i}>" for i in range(256)} |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
self.model_input_names = ["input_ids", "attention_mask"] |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return 256 |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
data = text.encode("utf-8", errors="replace") |
|
|
return [f"<{b}>" for b in data] |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
if token in self._vocab: |
|
|
return self._vocab[token] |
|
|
|
|
|
if token.startswith("<") and token.endswith(">"): |
|
|
try: |
|
|
v = int(token[1:-1]) |
|
|
if 0 <= v < 256: |
|
|
return v |
|
|
except Exception: |
|
|
pass |
|
|
return 0 |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
return self._ids_to_tokens.get(int(index) % 256, "<0>") |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
bs = [] |
|
|
for t in tokens: |
|
|
if t in self._vocab: |
|
|
bs.append(self._vocab[t]) |
|
|
else: |
|
|
try: |
|
|
if t.startswith("<") and t.endswith(">"): |
|
|
v = int(t[1:-1]) |
|
|
if 0 <= v < 256: |
|
|
bs.append(v) |
|
|
continue |
|
|
except Exception: |
|
|
pass |
|
|
return bytes(bs).decode("utf-8", errors="replace") |
|
|
|
|
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]: |
|
|
if token_ids_1 is None: |
|
|
return token_ids_0 |
|
|
return token_ids_0 + token_ids_1 |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None): |
|
|
|
|
|
return (), () |
|
|
|