diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..35ede624c89bc7353a20a2206fdcf4978f8ea500 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +Trump.wav filter=lfs diff=lfs merge=lfs -text +new.wav filter=lfs diff=lfs merge=lfs -text +badXT_71.wav filter=lfs diff=lfs merge=lfs -text +zero_shot_prompt.wav filter=lfs diff=lfs merge=lfs -text +00000309-00000300.wav filter=lfs diff=lfs merge=lfs -text +another.wav filter=lfs diff=lfs merge=lfs -text +zero_2_0.wav filter=lfs diff=lfs merge=lfs -text +zero_shot_0.wav filter=lfs diff=lfs merge=lfs -text +zero_1_0.wav filter=lfs diff=lfs merge=lfs -text +zero_3_0.wav filter=lfs diff=lfs merge=lfs -text +zero_0_0.wav filter=lfs diff=lfs merge=lfs -text diff --git a/00000309-00000300.wav b/00000309-00000300.wav new file mode 100644 index 0000000000000000000000000000000000000000..c1d27e38f9bed83bf9539e328fbf89047eae5bb7 --- /dev/null +++ b/00000309-00000300.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:631608f5c8b931ece1d45adc7f40a3b3b0ae2ec056a8a08a3565b04cc5750a4b +size 243244 diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.gitattributes b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..fcc122fac5d4523b2416a3f9247bc2cd68598f8e --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.gitattributes @@ -0,0 +1,38 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +flow.decoder.estimator.fp16.a10.plan filter=lfs diff=lfs merge=lfs -text +flow.decoder.estimator.fp16.l20.plan filter=lfs diff=lfs merge=lfs -text +flow.decoder.estimator.fp16.v100.plan filter=lfs diff=lfs merge=lfs -text diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.msc b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.msc new file mode 100644 index 0000000000000000000000000000000000000000..15b993f6cdaa3fcfcc2948129ccd2c44ad1da254 Binary files /dev/null and b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.msc differ diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.mv b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.mv new file mode 100644 index 0000000000000000000000000000000000000000..2f47b64f231838660626d5a880f2fff5d07ff826 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/.mv @@ -0,0 +1 @@ +Revision:master,CreatedAt:1736490687 \ No newline at end of file diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/README.md b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/README.md new file mode 100644 index 0000000000000000000000000000000000000000..24660db5ea1414879485feddaddc8b444b1fb995 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/README.md @@ -0,0 +1,119 @@ +--- +license: apache-2.0 +language: +- en +- zh +- ja +- ko +- fr +- ar +- es +- pt +metrics: +- accuracy +base_model: +- BlinkDL/rwkv-7-world +pipeline_tag: text-generation +--- + +# rwkv7-1.5B-world + + + +This is RWKV-7 model under flash-linear attention format. + +## Model Details + + +### Model Description + + + +- **Developed by:** Bo Peng, Yu Zhang, Songlin Yang, Ruichong Zhang +- **Funded by:** RWKV Project (Under LF AI & Data Foundation) +- **Model type:** RWKV7 +- **Language(s) (NLP):** English +- **License:** Apache-2.0 +- **Parameter count:** 1.52B +- **Tokenizer:** RWKV World tokenizer +- **Vocabulary size:** 65,536 + +### Model Sources + + + +- **Repository:** https://github.com/fla-org/flash-linear-attention ; https://github.com/BlinkDL/RWKV-LM +- **Paper:** With in Progress + +## Uses + + +Install `flash-linear-attention` and the latest version of `transformers` before using this model: + +```bash +pip install git+https://github.com/fla-org/flash-linear-attention +pip install 'transformers>=4.48.0' +``` + +### Direct Use + + +You can use this model just as any other HuggingFace models: +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True) +tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-1.5B-world', trust_remote_code=True) + +model = model.cuda() +prompt = "What is a large language model?" +messages = [ + {"role": "user", "content": "Who are you?"}, + {"role": "assistant", "content": "I am a GPT-3 based model."}, + {"role": "user", "content": prompt} +] +text = tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True +) + +model_inputs = tokenizer([text], return_tensors="pt").to(model.device) + +generated_ids = model.generate( + **model_inputs, + max_new_tokens=1024, +) +generated_ids = [ + output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) +] + +response = tokenizer.batch_decode(generated_ids, skip_special_tokens=False)[0] +print(response) +``` + +## Training Details + +### Training Data + +This model is trained on the World v3 with a total of 3.119 trillion tokens. + +#### Training Hyperparameters + +- **Training regime:** bfloat16, lr 4e-4 to 1e-5 "delayed" cosine decay, wd 0.1 (with increasing batch sizes during the middle) +- **Final Loss:** 1.9965 +- **Token Count:** 3.119 trillion + +## Evaluation + +#### Metrics + +`lambada_openai`: + +before conversion: ppl 4.13 acc 69.4% + +after conversion: ppl 4.26 acc 68.8% (without apply temple) + +## FAQ +Q: safetensors metadata is none. + +A: upgrade transformers to >=4.48.0: `pip install 'transformers>=4.48.0'` \ No newline at end of file diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/__init__.py b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/added_tokens.json b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/added_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..01936f7f079d12dd5d05c8208cb6edeaacaf7baa --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/added_tokens.json @@ -0,0 +1,3 @@ +{ + "<|rwkv_tokenizer_end_of_text|>": 0 +} diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/config.json b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1b5bc7f76e4e5e181bdfe12919df169277a530eb --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/config.json @@ -0,0 +1,39 @@ +{ + "_attn_implementation_autoset": true, + "a_low_rank_dim": 96, + "architectures": [ + "RWKV7ForCausalLM" + ], + "attn": null, + "attn_mode": "fused_recurrent", + "auto_map": { + "AutoConfig": "modeling_rwkv7.RWKV7Config", + "AutoModel": "modeling_rwkv7.RWKV7Model", + "AutoModelForCausalLM": "modeling_rwkv7.RWKV7ForCausalLM" + }, + "bos_token_id": 1, + "decay_low_rank_dim": 96, + "eos_token_id": 2, + "fuse_cross_entropy": true, + "fuse_norm": false, + "gate_low_rank_dim": 256, + "head_dim": 64, + "hidden_act": "sqrelu", + "hidden_ratio": 4.0, + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 8192, + "max_position_embeddings": 2048, + "model_type": "rwkv7", + "norm_bias": true, + "norm_eps": 1e-05, + "norm_first": true, + "num_heads": null, + "num_hidden_layers": 24, + "tie_word_embeddings": false, + "torch_dtype": "float32", + "transformers_version": "4.48.1", + "use_cache": true, + "v_low_rank_dim": 64, + "vocab_size": 65536 +} diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/generation_config.json b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..b3a81f68a4c388904e06b1f0ae81fa006c9e3dfd --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/generation_config.json @@ -0,0 +1,11 @@ +{ + "bos_token_id": 0, + "eos_token_id": 0, + "pad_token_id": 0, + "max_window_size": 2147483647, + "do_sample": true, + "top_k": 65536, + "top_p": 1.0, + "temperature": 1.0, + "transformers_version": "4.48.0" +} \ No newline at end of file diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/hf_rwkv_tokenizer.py b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/hf_rwkv_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..9dd4fbf9b5a0463b2f3a066797c325a991c96ed2 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/hf_rwkv_tokenizer.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for RWKV.""" + +import os +import re +from typing import TYPE_CHECKING, List, Optional, Tuple + +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from transformers.utils import logging + + +if TYPE_CHECKING: + pass + +logger = logging.get_logger(__name__) + + +VOCAB_FILES_NAMES = { + "vocab_file": "rwkv_vocab_v20230424.txt", +} + +class TRIE: + __slots__ = tuple("ch,to,values,front".split(",")) + to: list + values: set + + def __init__(self, front=None, ch=None): + self.ch = ch + self.to = [None for ch in range(256)] + self.values = set() + self.front = front + + def __repr__(self): + fr = self + ret = [] + while fr != None: + if fr.ch != None: + ret.append(fr.ch) + fr = fr.front + return "" % (ret[::-1], self.values) + + def add(self, key: bytes, idx: int = 0, val=None): + if idx == len(key): + if val is None: + val = key + self.values.add(val) + return self + ch = key[idx] + if self.to[ch] is None: + self.to[ch] = TRIE(front=self, ch=ch) + return self.to[ch].add(key, idx=idx + 1, val=val) + + def find_longest(self, key: bytes, idx: int = 0): + u: TRIE = self + ch: int = key[idx] + + while u.to[ch] is not None: + u = u.to[ch] + idx += 1 + if u.values: + ret = idx, u, u.values + if idx == len(key): + break + ch = key[idx] + return ret + + +class RWKV_TOKENIZER: + def __init__(self, file_name): + self.idx2token = {} + sorted = [] # must be already sorted + with open(file_name, "r", encoding="utf-8") as f: + lines = f.readlines() + for l in lines: + idx = int(l[: l.index(" ")]) + x = eval(l[l.index(" ") : l.rindex(" ")]) + x = x.encode("utf-8") if isinstance(x, str) else x + assert isinstance(x, bytes) + + assert len(x) == int(l[l.rindex(" ") :]) + sorted += [x] + self.idx2token[idx] = x + + self.token2idx = {} + for k, v in self.idx2token.items(): + self.token2idx[v] = int(k) + + self.root = TRIE() + for t, i in self.token2idx.items(): + _ = self.root.add(t, val=(t, i)) + + def encodeBytes(self, src: bytes): + idx: int = 0 + tokens = [] + while idx < len(src): + _idx: int = idx + idx, _, values = self.root.find_longest(src, idx) + assert idx != _idx + _, token = next(iter(values)) + tokens.append(token) + return tokens + + def decodeBytes(self, tokens): + return b"".join(map(lambda i: self.idx2token[i], tokens)) + + def encode(self, src): + if isinstance(src, str): + return [self.encodeBytes(src.encode("utf-8"))] + elif isinstance(src, list): + return [self.encodeBytes(s.encode("utf-8")) for s in src] + + def decode(self, tokens): + return [self.decodeBytes(batch).decode("utf-8") for batch in tokens] + # try: + # return self.decodeBytes(tokens).decode('utf-8') + # except: + # return '\ufffd' # bad utf-8 + + def printTokens(self, tokens): + for i in tokens: + s = self.idx2token[i] + try: + s = s.decode("utf-8") + except: + pass + print(f"{repr(s)}{i}", end=" ") + print() + + +class RwkvTokenizer(PreTrainedTokenizer): + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, vocab_file, bos_token="<|rwkv_tokenizer_end_of_text|>", eos_token="<|rwkv_tokenizer_end_of_text|>", unk_token="<|rwkv_tokenizer_end_of_text|>", **kwargs + ): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'." + ) + + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + + if "add_bos_token" in kwargs: + self.add_bos_token = kwargs["add_bos_token"] + else: + self.add_bos_token = False + self.trie_tokenizer = RWKV_TOKENIZER(vocab_file) + vocab = self.trie_tokenizer.token2idx + self.encoder = vocab + self.decoder = {v: k for k, v in vocab.items()} + self._added_tokens_decoder = {0: AddedToken(str(bos_token))} + super().__init__( + bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + vocab = self.encoder + vocab.update(self.added_tokens_encoder) + vocab = dict(sorted(vocab.items(), key=lambda item: item[1])) + return vocab + + def _tokenize(self, text, split_special_tokens=False): + # return self.wordpiece_tokenizer.tokenize(text.encode("utf-8")) + return self.trie_tokenizer.encode(text)[0] + + def _convert_token_to_id(self, token): + return token + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (byte) using the vocab.""" + token = self.decoder.get(index, self.unk_token) + if isinstance(token, (bytes)): + token = token.decode("utf-8", errors="replace") + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (bytes) in a single string. Additional tokens are encoded to bytes""" + out_string = b"".join( + [k.encode(errors="replace") if isinstance(k, str) else k for k in tokens] + ).decode("utf-8") + return out_string + + def save_vocabulary( + self, save_directory: str, filename_prefix: Optional[str] = None + ) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + "vocab.txt", + ) + else: + vocab_file = ( + filename_prefix + "-" if filename_prefix else "" + ) + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted( + self.encoder.items(), key=lambda kv: kv[1] + ): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(str(token) + "\n") + index += 1 + return (vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is None: + return output + + return output + bos_token_ids + token_ids_1 + + def get_special_tokens_mask( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False, + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True, + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=False, + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/modeling_rwkv7.py b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/modeling_rwkv7.py new file mode 100644 index 0000000000000000000000000000000000000000..cb80f73ada83057549f55847405132489525768f --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/modeling_rwkv7.py @@ -0,0 +1,4 @@ +from rwkvfla.models.rwkv7 import RWKV7ForCausalLM, RWKV7Model, RWKV7Config +RWKV7ForCausalLM = RWKV7ForCausalLM +RWKV7Model = RWKV7Model +RWKV7Config = RWKV7Config diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/rwkv_vocab_v20230424.txt b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/rwkv_vocab_v20230424.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8c3f7260d2cac8b83bfbe7bc43f462fef8c4012 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/rwkv_vocab_v20230424.txt @@ -0,0 +1,65529 @@ +1 '\x00' 1 +2 '\x01' 1 +3 '\x02' 1 +4 '\x03' 1 +5 '\x04' 1 +6 '\x05' 1 +7 '\x06' 1 +8 '\x07' 1 +9 '\x08' 1 +10 '\t' 1 +11 '\n' 1 +12 '\x0b' 1 +13 '\x0c' 1 +14 '\r' 1 +15 '\x0e' 1 +16 '\x0f' 1 +17 '\x10' 1 +18 '\x11' 1 +19 '\x12' 1 +20 '\x13' 1 +21 '\x14' 1 +22 '\x15' 1 +23 '\x16' 1 +24 '\x17' 1 +25 '\x18' 1 +26 '\x19' 1 +27 '\x1a' 1 +28 '\x1b' 1 +29 '\x1c' 1 +30 '\x1d' 1 +31 '\x1e' 1 +32 '\x1f' 1 +33 ' ' 1 +34 '!' 1 +35 '"' 1 +36 '#' 1 +37 '$' 1 +38 '%' 1 +39 '&' 1 +40 "'" 1 +41 '(' 1 +42 ')' 1 +43 '*' 1 +44 '+' 1 +45 ',' 1 +46 '-' 1 +47 '.' 1 +48 '/' 1 +49 '0' 1 +50 '1' 1 +51 '2' 1 +52 '3' 1 +53 '4' 1 +54 '5' 1 +55 '6' 1 +56 '7' 1 +57 '8' 1 +58 '9' 1 +59 ':' 1 +60 ';' 1 +61 '<' 1 +62 '=' 1 +63 '>' 1 +64 '?' 1 +65 '@' 1 +66 'A' 1 +67 'B' 1 +68 'C' 1 +69 'D' 1 +70 'E' 1 +71 'F' 1 +72 'G' 1 +73 'H' 1 +74 'I' 1 +75 'J' 1 +76 'K' 1 +77 'L' 1 +78 'M' 1 +79 'N' 1 +80 'O' 1 +81 'P' 1 +82 'Q' 1 +83 'R' 1 +84 'S' 1 +85 'T' 1 +86 'U' 1 +87 'V' 1 +88 'W' 1 +89 'X' 1 +90 'Y' 1 +91 'Z' 1 +92 '[' 1 +93 '\\' 1 +94 ']' 1 +95 '^' 1 +96 '_' 1 +97 '`' 1 +98 'a' 1 +99 'b' 1 +100 'c' 1 +101 'd' 1 +102 'e' 1 +103 'f' 1 +104 'g' 1 +105 'h' 1 +106 'i' 1 +107 'j' 1 +108 'k' 1 +109 'l' 1 +110 'm' 1 +111 'n' 1 +112 'o' 1 +113 'p' 1 +114 'q' 1 +115 'r' 1 +116 's' 1 +117 't' 1 +118 'u' 1 +119 'v' 1 +120 'w' 1 +121 'x' 1 +122 'y' 1 +123 'z' 1 +124 '{' 1 +125 '|' 1 +126 '}' 1 +127 '~' 1 +128 '\x7f' 1 +129 b'\x80' 1 +130 b'\x81' 1 +131 b'\x82' 1 +132 b'\x83' 1 +133 b'\x84' 1 +134 b'\x85' 1 +135 b'\x86' 1 +136 b'\x87' 1 +137 b'\x88' 1 +138 b'\x89' 1 +139 b'\x8a' 1 +140 b'\x8b' 1 +141 b'\x8c' 1 +142 b'\x8d' 1 +143 b'\x8e' 1 +144 b'\x8f' 1 +145 b'\x90' 1 +146 b'\x91' 1 +147 b'\x92' 1 +148 b'\x93' 1 +149 b'\x94' 1 +150 b'\x95' 1 +151 b'\x96' 1 +152 b'\x97' 1 +153 b'\x98' 1 +154 b'\x99' 1 +155 b'\x9a' 1 +156 b'\x9b' 1 +157 b'\x9c' 1 +158 b'\x9d' 1 +159 b'\x9e' 1 +160 b'\x9f' 1 +161 b'\xa0' 1 +162 b'\xa1' 1 +163 b'\xa2' 1 +164 b'\xa3' 1 +165 b'\xa4' 1 +166 b'\xa5' 1 +167 b'\xa6' 1 +168 b'\xa7' 1 +169 b'\xa8' 1 +170 b'\xa9' 1 +171 b'\xaa' 1 +172 b'\xab' 1 +173 b'\xac' 1 +174 b'\xad' 1 +175 b'\xae' 1 +176 b'\xaf' 1 +177 b'\xb0' 1 +178 b'\xb1' 1 +179 b'\xb2' 1 +180 b'\xb3' 1 +181 b'\xb4' 1 +182 b'\xb5' 1 +183 b'\xb6' 1 +184 b'\xb7' 1 +185 b'\xb8' 1 +186 b'\xb9' 1 +187 b'\xba' 1 +188 b'\xbb' 1 +189 b'\xbc' 1 +190 b'\xbd' 1 +191 b'\xbe' 1 +192 b'\xbf' 1 +193 b'\xc0' 1 +194 b'\xc1' 1 +195 b'\xc2' 1 +196 b'\xc3' 1 +197 b'\xc4' 1 +198 b'\xc5' 1 +199 b'\xc6' 1 +200 b'\xc7' 1 +201 b'\xc8' 1 +202 b'\xc9' 1 +203 b'\xca' 1 +204 b'\xcb' 1 +205 b'\xcc' 1 +206 b'\xcd' 1 +207 b'\xce' 1 +208 b'\xcf' 1 +209 b'\xd0' 1 +210 b'\xd1' 1 +211 b'\xd2' 1 +212 b'\xd3' 1 +213 b'\xd4' 1 +214 b'\xd5' 1 +215 b'\xd6' 1 +216 b'\xd7' 1 +217 b'\xd8' 1 +218 b'\xd9' 1 +219 b'\xda' 1 +220 b'\xdb' 1 +221 b'\xdc' 1 +222 b'\xdd' 1 +223 b'\xde' 1 +224 b'\xdf' 1 +225 b'\xe0' 1 +226 b'\xe1' 1 +227 b'\xe2' 1 +228 b'\xe3' 1 +229 b'\xe4' 1 +230 b'\xe5' 1 +231 b'\xe6' 1 +232 b'\xe7' 1 +233 b'\xe8' 1 +234 b'\xe9' 1 +235 b'\xea' 1 +236 b'\xeb' 1 +237 b'\xec' 1 +238 b'\xed' 1 +239 b'\xee' 1 +240 b'\xef' 1 +241 b'\xf0' 1 +242 b'\xf1' 1 +243 b'\xf2' 1 +244 b'\xf3' 1 +245 b'\xf4' 1 +246 b'\xf5' 1 +247 b'\xf6' 1 +248 b'\xf7' 1 +249 b'\xf8' 1 +250 b'\xf9' 1 +251 b'\xfa' 1 +252 b'\xfb' 1 +253 b'\xfc' 1 +254 b'\xfd' 1 +255 b'\xfe' 1 +256 b'\xff' 1 +257 '\t\t' 2 +258 '\t\n' 2 +259 '\t ' 2 +260 '\n\t' 2 +261 '\n\n' 2 +262 '\n ' 2 +263 '\r\n' 2 +264 ' \t' 2 +265 ' \n' 2 +266 ' \r' 2 +267 ' ' 2 +268 ' !' 2 +269 ' "' 2 +270 ' #' 2 +271 ' $' 2 +272 ' %' 2 +273 ' &' 2 +274 " '" 2 +275 ' (' 2 +276 ' )' 2 +277 ' *' 2 +278 ' +' 2 +279 ' ,' 2 +280 ' -' 2 +281 ' .' 2 +282 ' /' 2 +283 ' 0' 2 +284 ' 1' 2 +285 ' 2' 2 +286 ' 3' 2 +287 ' 4' 2 +288 ' 5' 2 +289 ' 6' 2 +290 ' 7' 2 +291 ' 8' 2 +292 ' 9' 2 +293 ' :' 2 +294 ' ;' 2 +295 ' <' 2 +296 ' =' 2 +297 ' >' 2 +298 ' ?' 2 +299 ' @' 2 +300 ' A' 2 +301 ' B' 2 +302 ' C' 2 +303 ' D' 2 +304 ' E' 2 +305 ' F' 2 +306 ' G' 2 +307 ' H' 2 +308 ' I' 2 +309 ' J' 2 +310 ' K' 2 +311 ' L' 2 +312 ' M' 2 +313 ' N' 2 +314 ' O' 2 +315 ' P' 2 +316 ' Q' 2 +317 ' R' 2 +318 ' S' 2 +319 ' T' 2 +320 ' U' 2 +321 ' V' 2 +322 ' W' 2 +323 ' X' 2 +324 ' Y' 2 +325 ' Z' 2 +326 ' [' 2 +327 ' \\' 2 +328 ' ]' 2 +329 ' ^' 2 +330 ' _' 2 +331 ' `' 2 +332 ' a' 2 +333 ' b' 2 +334 ' c' 2 +335 ' d' 2 +336 ' e' 2 +337 ' f' 2 +338 ' g' 2 +339 ' h' 2 +340 ' i' 2 +341 ' j' 2 +342 ' k' 2 +343 ' l' 2 +344 ' m' 2 +345 ' n' 2 +346 ' o' 2 +347 ' p' 2 +348 ' q' 2 +349 ' r' 2 +350 ' s' 2 +351 ' t' 2 +352 ' u' 2 +353 ' v' 2 +354 ' w' 2 +355 ' x' 2 +356 ' y' 2 +357 ' z' 2 +358 ' {' 2 +359 ' |' 2 +360 ' }' 2 +361 ' ~' 2 +362 '!!' 2 +363 '!"' 2 +364 "!'" 2 +365 '!(' 2 +366 '!)' 2 +367 '!,' 2 +368 '!.' 2 +369 '!/' 2 +370 '!=' 2 +371 '!?' 2 +372 '![' 2 +373 '!\\' 2 +374 '""' 2 +375 '"#' 2 +376 '"$' 2 +377 '"%' 2 +378 '"&' 2 +379 '"\'' 2 +380 '"(' 2 +381 '")' 2 +382 '"*' 2 +383 '"+' 2 +384 '",' 2 +385 '"-' 2 +386 '".' 2 +387 '"/' 2 +388 '":' 2 +389 '";' 2 +390 '"<' 2 +391 '">' 2 +392 '"?' 2 +393 '"[' 2 +394 '"\\' 2 +395 '"]' 2 +396 '"_' 2 +397 '"`' 2 +398 '"{' 2 +399 '"}' 2 +400 '#!' 2 +401 '#"' 2 +402 '##' 2 +403 "#'" 2 +404 '#,' 2 +405 '#.' 2 +406 '#:' 2 +407 '#{' 2 +408 '$"' 2 +409 '$$' 2 +410 "$'" 2 +411 '$(' 2 +412 '$,' 2 +413 '$.' 2 +414 '$/' 2 +415 '$:' 2 +416 '$;' 2 +417 '$\\' 2 +418 '$_' 2 +419 '${' 2 +420 '%"' 2 +421 '%%' 2 +422 "%'" 2 +423 '%(' 2 +424 '%)' 2 +425 '%,' 2 +426 '%-' 2 +427 '%.' 2 +428 '%;' 2 +429 '%=' 2 +430 '%\\' 2 +431 '&#' 2 +432 '&&' 2 +433 '&=' 2 +434 '&\\' 2 +435 '\'"' 2 +436 "'#" 2 +437 "'$" 2 +438 "'%" 2 +439 "''" 2 +440 "'(" 2 +441 "')" 2 +442 "'*" 2 +443 "'+" 2 +444 "'," 2 +445 "'-" 2 +446 "'." 2 +447 "'/" 2 +448 "':" 2 +449 "';" 2 +450 "'<" 2 +451 "'>" 2 +452 "'?" 2 +453 "'[" 2 +454 "'\\" 2 +455 "']" 2 +456 "'^" 2 +457 "'_" 2 +458 "'d" 2 +459 "'m" 2 +460 "'s" 2 +461 "'t" 2 +462 "'{" 2 +463 "'}" 2 +464 '(!' 2 +465 '("' 2 +466 '(#' 2 +467 '($' 2 +468 '(%' 2 +469 '(&' 2 +470 "('" 2 +471 '((' 2 +472 '()' 2 +473 '(*' 2 +474 '(+' 2 +475 '(-' 2 +476 '(.' 2 +477 '(/' 2 +478 '(:' 2 +479 '(<' 2 +480 '(?' 2 +481 '(@' 2 +482 '([' 2 +483 '(\\' 2 +484 '(_' 2 +485 '(`' 2 +486 '({' 2 +487 '(|' 2 +488 ')!' 2 +489 ')"' 2 +490 ')$' 2 +491 ')&' 2 +492 ")'" 2 +493 ')(' 2 +494 '))' 2 +495 ')*' 2 +496 ')+' 2 +497 '),' 2 +498 ')-' 2 +499 ').' 2 +500 ')/' 2 +501 '):' 2 +502 ');' 2 +503 ')<' 2 +504 ')=' 2 +505 ')>' 2 +506 ')?' 2 +507 ')[' 2 +508 ')\\' 2 +509 ')]' 2 +510 ')^' 2 +511 ')_' 2 +512 ')`' 2 +513 '){' 2 +514 ')|' 2 +515 ')}' 2 +516 '*"' 2 +517 "*'" 2 +518 '*(' 2 +519 '*)' 2 +520 '**' 2 +521 '*,' 2 +522 '*-' 2 +523 '*.' 2 +524 '*/' 2 +525 '*:' 2 +526 '*=' 2 +527 '*>' 2 +528 '*\\' 2 +529 '*_' 2 +530 '*}' 2 +531 '+"' 2 +532 '+$' 2 +533 "+'" 2 +534 '+(' 2 +535 '+)' 2 +536 '++' 2 +537 '+,' 2 +538 '+-' 2 +539 '+.' 2 +540 '+/' 2 +541 '+=' 2 +542 '+[' 2 +543 '+\\' 2 +544 ',"' 2 +545 ',#' 2 +546 ',$' 2 +547 ',%' 2 +548 ",'" 2 +549 ',(' 2 +550 ',)' 2 +551 ',*' 2 +552 ',,' 2 +553 ',-' 2 +554 ',.' 2 +555 ',[' 2 +556 ',\\' 2 +557 ',_' 2 +558 ',{' 2 +559 '-"' 2 +560 '-$' 2 +561 '-%' 2 +562 "-'" 2 +563 '-(' 2 +564 '-)' 2 +565 '-*' 2 +566 '-+' 2 +567 '-,' 2 +568 '--' 2 +569 '-.' 2 +570 '-=' 2 +571 '->' 2 +572 '-[' 2 +573 '-\\' 2 +574 '-{' 2 +575 '."' 2 +576 '.$' 2 +577 '.%' 2 +578 ".'" 2 +579 '.(' 2 +580 '.)' 2 +581 '.*' 2 +582 '.+' 2 +583 '.,' 2 +584 '.-' 2 +585 '..' 2 +586 './' 2 +587 '.:' 2 +588 '.;' 2 +589 '.<' 2 +590 '.=' 2 +591 '.?' 2 +592 '.[' 2 +593 '.\\' 2 +594 '.]' 2 +595 '._' 2 +596 '.|' 2 +597 '/"' 2 +598 '/#' 2 +599 '/$' 2 +600 '/%' 2 +601 "/'" 2 +602 '/(' 2 +603 '/)' 2 +604 '/*' 2 +605 '/+' 2 +606 '/,' 2 +607 '/-' 2 +608 '/.' 2 +609 '//' 2 +610 '/:' 2 +611 '/<' 2 +612 '/>' 2 +613 '/?' 2 +614 '/@' 2 +615 '/[' 2 +616 '/\\' 2 +617 '/_' 2 +618 '/{' 2 +619 '/~' 2 +620 '00' 2 +621 '01' 2 +622 '02' 2 +623 '03' 2 +624 '04' 2 +625 '05' 2 +626 '06' 2 +627 '07' 2 +628 '08' 2 +629 '09' 2 +630 '10' 2 +631 '11' 2 +632 '12' 2 +633 '13' 2 +634 '14' 2 +635 '15' 2 +636 '16' 2 +637 '17' 2 +638 '18' 2 +639 '19' 2 +640 '20' 2 +641 '21' 2 +642 '22' 2 +643 '23' 2 +644 '24' 2 +645 '25' 2 +646 '26' 2 +647 '27' 2 +648 '28' 2 +649 '29' 2 +650 '30' 2 +651 '31' 2 +652 '32' 2 +653 '33' 2 +654 '34' 2 +655 '35' 2 +656 '36' 2 +657 '37' 2 +658 '38' 2 +659 '39' 2 +660 '40' 2 +661 '41' 2 +662 '42' 2 +663 '43' 2 +664 '44' 2 +665 '45' 2 +666 '46' 2 +667 '47' 2 +668 '48' 2 +669 '49' 2 +670 '50' 2 +671 '51' 2 +672 '52' 2 +673 '53' 2 +674 '54' 2 +675 '55' 2 +676 '56' 2 +677 '57' 2 +678 '58' 2 +679 '59' 2 +680 '60' 2 +681 '61' 2 +682 '62' 2 +683 '63' 2 +684 '64' 2 +685 '65' 2 +686 '66' 2 +687 '67' 2 +688 '68' 2 +689 '69' 2 +690 '70' 2 +691 '71' 2 +692 '72' 2 +693 '73' 2 +694 '74' 2 +695 '75' 2 +696 '76' 2 +697 '77' 2 +698 '78' 2 +699 '79' 2 +700 '80' 2 +701 '81' 2 +702 '82' 2 +703 '83' 2 +704 '84' 2 +705 '85' 2 +706 '86' 2 +707 '87' 2 +708 '88' 2 +709 '89' 2 +710 '90' 2 +711 '91' 2 +712 '92' 2 +713 '93' 2 +714 '94' 2 +715 '95' 2 +716 '96' 2 +717 '97' 2 +718 '98' 2 +719 '99' 2 +720 ':"' 2 +721 ':#' 2 +722 ':$' 2 +723 ':%' 2 +724 ":'" 2 +725 ':(' 2 +726 ':)' 2 +727 ':*' 2 +728 ':,' 2 +729 ':-' 2 +730 ':.' 2 +731 ':/' 2 +732 '::' 2 +733 ':=' 2 +734 ':@' 2 +735 ':[' 2 +736 ':\\' 2 +737 ':]' 2 +738 ':_' 2 +739 ':`' 2 +740 ':{' 2 +741 ';"' 2 +742 ';&' 2 +743 ";'" 2 +744 ';-' 2 +745 ';/' 2 +746 ';;' 2 +747 ';<' 2 +748 ';\\' 2 +749 ';}' 2 +750 '' 2 +758 '' 2 +774 '=[' 2 +775 '=\\' 2 +776 '=_' 2 +777 '=`' 2 +778 '={' 2 +779 '>"' 2 +780 '>&' 2 +781 ">'" 2 +782 '>(' 2 +783 '>)' 2 +784 '>,' 2 +785 '>-' 2 +786 '>.' 2 +787 '>/' 2 +788 '>:' 2 +789 '>;' 2 +790 '><' 2 +791 '>=' 2 +792 '>>' 2 +793 '>[' 2 +794 '>\\' 2 +795 '>]' 2 +796 '>`' 2 +797 '>{' 2 +798 '?!' 2 +799 '?"' 2 +800 "?'" 2 +801 '?(' 2 +802 '?)' 2 +803 '?,' 2 +804 '?.' 2 +805 '?:' 2 +806 '?>' 2 +807 '??' 2 +808 '?\\' 2 +809 '@"' 2 +810 '@@' 2 +811 '@{' 2 +812 'AA' 2 +813 'AB' 2 +814 'AC' 2 +815 'AD' 2 +816 'AE' 2 +817 'AF' 2 +818 'AG' 2 +819 'AH' 2 +820 'AI' 2 +821 'AJ' 2 +822 'AK' 2 +823 'AL' 2 +824 'AM' 2 +825 'AN' 2 +826 'AO' 2 +827 'AP' 2 +828 'AQ' 2 +829 'AR' 2 +830 'AS' 2 +831 'AT' 2 +832 'AU' 2 +833 'AV' 2 +834 'AW' 2 +835 'AX' 2 +836 'AY' 2 +837 'AZ' 2 +838 'Ab' 2 +839 'Ac' 2 +840 'Ad' 2 +841 'Af' 2 +842 'Ag' 2 +843 'Ah' 2 +844 'Ai' 2 +845 'Aj' 2 +846 'Ak' 2 +847 'Al' 2 +848 'Am' 2 +849 'An' 2 +850 'Ao' 2 +851 'Ap' 2 +852 'Ar' 2 +853 'As' 2 +854 'At' 2 +855 'Au' 2 +856 'Av' 2 +857 'Aw' 2 +858 'Ax' 2 +859 'Ay' 2 +860 'Az' 2 +861 'BA' 2 +862 'BB' 2 +863 'BC' 2 +864 'BD' 2 +865 'BE' 2 +866 'BF' 2 +867 'BG' 2 +868 'BH' 2 +869 'BI' 2 +870 'BJ' 2 +871 'BK' 2 +872 'BL' 2 +873 'BM' 2 +874 'BN' 2 +875 'BO' 2 +876 'BP' 2 +877 'BR' 2 +878 'BS' 2 +879 'BT' 2 +880 'BU' 2 +881 'BV' 2 +882 'BW' 2 +883 'BY' 2 +884 'BZ' 2 +885 'Ba' 2 +886 'Be' 2 +887 'Bg' 2 +888 'Bi' 2 +889 'Bl' 2 +890 'Bo' 2 +891 'Br' 2 +892 'Bs' 2 +893 'Bu' 2 +894 'By' 2 +895 'CA' 2 +896 'CB' 2 +897 'CC' 2 +898 'CD' 2 +899 'CE' 2 +900 'CF' 2 +901 'CG' 2 +902 'CH' 2 +903 'CI' 2 +904 'CK' 2 +905 'CL' 2 +906 'CM' 2 +907 'CN' 2 +908 'CO' 2 +909 'CP' 2 +910 'CR' 2 +911 'CS' 2 +912 'CT' 2 +913 'CU' 2 +914 'CV' 2 +915 'CW' 2 +916 'CX' 2 +917 'CY' 2 +918 'Ca' 2 +919 'Cb' 2 +920 'Cd' 2 +921 'Ce' 2 +922 'Ch' 2 +923 'Ci' 2 +924 'Cl' 2 +925 'Co' 2 +926 'Cp' 2 +927 'Cr' 2 +928 'Cs' 2 +929 'Ct' 2 +930 'Cu' 2 +931 'Cy' 2 +932 'DA' 2 +933 'DB' 2 +934 'DC' 2 +935 'DD' 2 +936 'DE' 2 +937 'DF' 2 +938 'DG' 2 +939 'DH' 2 +940 'DI' 2 +941 'DJ' 2 +942 'DK' 2 +943 'DL' 2 +944 'DM' 2 +945 'DN' 2 +946 'DO' 2 +947 'DP' 2 +948 'DQ' 2 +949 'DR' 2 +950 'DS' 2 +951 'DT' 2 +952 'DU' 2 +953 'DV' 2 +954 'DW' 2 +955 'DX' 2 +956 'DY' 2 +957 'Da' 2 +958 'Db' 2 +959 'De' 2 +960 'Di' 2 +961 'Do' 2 +962 'Dr' 2 +963 'Ds' 2 +964 'Du' 2 +965 'Dy' 2 +966 'EA' 2 +967 'EB' 2 +968 'EC' 2 +969 'ED' 2 +970 'EE' 2 +971 'EF' 2 +972 'EG' 2 +973 'EH' 2 +974 'EI' 2 +975 'EK' 2 +976 'EL' 2 +977 'EM' 2 +978 'EN' 2 +979 'EO' 2 +980 'EP' 2 +981 'EQ' 2 +982 'ER' 2 +983 'ES' 2 +984 'ET' 2 +985 'EU' 2 +986 'EV' 2 +987 'EW' 2 +988 'EX' 2 +989 'EY' 2 +990 'Ec' 2 +991 'Ed' 2 +992 'Eg' 2 +993 'Eh' 2 +994 'El' 2 +995 'Em' 2 +996 'En' 2 +997 'Ep' 2 +998 'Eq' 2 +999 'Er' 2 +1000 'Es' 2 +1001 'Et' 2 +1002 'Eu' 2 +1003 'Ev' 2 +1004 'Ex' 2 +1005 'Ey' 2 +1006 'FA' 2 +1007 'FB' 2 +1008 'FC' 2 +1009 'FD' 2 +1010 'FE' 2 +1011 'FF' 2 +1012 'FG' 2 +1013 'FH' 2 +1014 'FI' 2 +1015 'FK' 2 +1016 'FL' 2 +1017 'FM' 2 +1018 'FN' 2 +1019 'FO' 2 +1020 'FP' 2 +1021 'FR' 2 +1022 'FS' 2 +1023 'FT' 2 +1024 'FU' 2 +1025 'FV' 2 +1026 'FW' 2 +1027 'FX' 2 +1028 'FY' 2 +1029 'Fa' 2 +1030 'Fc' 2 +1031 'Fe' 2 +1032 'Fi' 2 +1033 'Fl' 2 +1034 'Fn' 2 +1035 'Fo' 2 +1036 'Fr' 2 +1037 'Fs' 2 +1038 'Fu' 2 +1039 'GA' 2 +1040 'GB' 2 +1041 'GC' 2 +1042 'GD' 2 +1043 'GE' 2 +1044 'GF' 2 +1045 'GG' 2 +1046 'GH' 2 +1047 'GI' 2 +1048 'GL' 2 +1049 'GM' 2 +1050 'GN' 2 +1051 'GO' 2 +1052 'GP' 2 +1053 'GR' 2 +1054 'GS' 2 +1055 'GT' 2 +1056 'GU' 2 +1057 'GV' 2 +1058 'GW' 2 +1059 'GY' 2 +1060 'Ga' 2 +1061 'Gb' 2 +1062 'Ge' 2 +1063 'Gh' 2 +1064 'Gi' 2 +1065 'Gl' 2 +1066 'Go' 2 +1067 'Gr' 2 +1068 'Gs' 2 +1069 'Gu' 2 +1070 'Gy' 2 +1071 'HA' 2 +1072 'HB' 2 +1073 'HC' 2 +1074 'HD' 2 +1075 'HE' 2 +1076 'HF' 2 +1077 'HG' 2 +1078 'HH' 2 +1079 'HI' 2 +1080 'HK' 2 +1081 'HL' 2 +1082 'HM' 2 +1083 'HN' 2 +1084 'HO' 2 +1085 'HP' 2 +1086 'HQ' 2 +1087 'HR' 2 +1088 'HS' 2 +1089 'HT' 2 +1090 'HU' 2 +1091 'HV' 2 +1092 'HW' 2 +1093 'HY' 2 +1094 'Ha' 2 +1095 'He' 2 +1096 'Hg' 2 +1097 'Hi' 2 +1098 'Ho' 2 +1099 'Hp' 2 +1100 'Hs' 2 +1101 'Hu' 2 +1102 'Hy' 2 +1103 'Hz' 2 +1104 'IA' 2 +1105 'IB' 2 +1106 'IC' 2 +1107 'ID' 2 +1108 'IE' 2 +1109 'IF' 2 +1110 'IG' 2 +1111 'IH' 2 +1112 'II' 2 +1113 'IJ' 2 +1114 'IK' 2 +1115 'IL' 2 +1116 'IM' 2 +1117 'IN' 2 +1118 'IO' 2 +1119 'IP' 2 +1120 'IQ' 2 +1121 'IR' 2 +1122 'IS' 2 +1123 'IT' 2 +1124 'IU' 2 +1125 'IV' 2 +1126 'IW' 2 +1127 'IX' 2 +1128 'IZ' 2 +1129 'Id' 2 +1130 'If' 2 +1131 'Ig' 2 +1132 'Ii' 2 +1133 'Ik' 2 +1134 'Il' 2 +1135 'Im' 2 +1136 'In' 2 +1137 'Io' 2 +1138 'Ip' 2 +1139 'Ir' 2 +1140 'Is' 2 +1141 'It' 2 +1142 'Iz' 2 +1143 'JA' 2 +1144 'JB' 2 +1145 'JC' 2 +1146 'JD' 2 +1147 'JE' 2 +1148 'JF' 2 +1149 'JI' 2 +1150 'JJ' 2 +1151 'JK' 2 +1152 'JM' 2 +1153 'JO' 2 +1154 'JP' 2 +1155 'JR' 2 +1156 'JS' 2 +1157 'JT' 2 +1158 'JU' 2 +1159 'Ja' 2 +1160 'Je' 2 +1161 'Ji' 2 +1162 'Jo' 2 +1163 'Js' 2 +1164 'Ju' 2 +1165 'Jy' 2 +1166 'KA' 2 +1167 'KB' 2 +1168 'KC' 2 +1169 'KD' 2 +1170 'KE' 2 +1171 'KF' 2 +1172 'KG' 2 +1173 'KH' 2 +1174 'KI' 2 +1175 'KK' 2 +1176 'KL' 2 +1177 'KM' 2 +1178 'KN' 2 +1179 'KO' 2 +1180 'KP' 2 +1181 'KR' 2 +1182 'KS' 2 +1183 'KT' 2 +1184 'KV' 2 +1185 'KW' 2 +1186 'KY' 2 +1187 'Ka' 2 +1188 'Ke' 2 +1189 'Kh' 2 +1190 'Ki' 2 +1191 'Kn' 2 +1192 'Ko' 2 +1193 'Kr' 2 +1194 'Ku' 2 +1195 'Ky' 2 +1196 'LA' 2 +1197 'LB' 2 +1198 'LC' 2 +1199 'LD' 2 +1200 'LE' 2 +1201 'LF' 2 +1202 'LG' 2 +1203 'LH' 2 +1204 'LI' 2 +1205 'LL' 2 +1206 'LM' 2 +1207 'LN' 2 +1208 'LO' 2 +1209 'LP' 2 +1210 'LR' 2 +1211 'LS' 2 +1212 'LT' 2 +1213 'LU' 2 +1214 'LV' 2 +1215 'LW' 2 +1216 'LY' 2 +1217 'La' 2 +1218 'Le' 2 +1219 'Li' 2 +1220 'Ll' 2 +1221 'Ln' 2 +1222 'Lo' 2 +1223 'Lt' 2 +1224 'Lu' 2 +1225 'Ly' 2 +1226 'MA' 2 +1227 'MB' 2 +1228 'MC' 2 +1229 'MD' 2 +1230 'ME' 2 +1231 'MF' 2 +1232 'MG' 2 +1233 'MH' 2 +1234 'MI' 2 +1235 'MK' 2 +1236 'ML' 2 +1237 'MM' 2 +1238 'MN' 2 +1239 'MO' 2 +1240 'MP' 2 +1241 'MQ' 2 +1242 'MR' 2 +1243 'MS' 2 +1244 'MT' 2 +1245 'MU' 2 +1246 'MV' 2 +1247 'MW' 2 +1248 'MX' 2 +1249 'MY' 2 +1250 'Ma' 2 +1251 'Mb' 2 +1252 'Mc' 2 +1253 'Me' 2 +1254 'Mg' 2 +1255 'Mi' 2 +1256 'Mj' 2 +1257 'Mn' 2 +1258 'Mo' 2 +1259 'Mp' 2 +1260 'Mr' 2 +1261 'Ms' 2 +1262 'Mt' 2 +1263 'Mu' 2 +1264 'My' 2 +1265 'Mz' 2 +1266 'NA' 2 +1267 'NB' 2 +1268 'NC' 2 +1269 'ND' 2 +1270 'NE' 2 +1271 'NF' 2 +1272 'NG' 2 +1273 'NH' 2 +1274 'NI' 2 +1275 'NJ' 2 +1276 'NK' 2 +1277 'NL' 2 +1278 'NM' 2 +1279 'NN' 2 +1280 'NO' 2 +1281 'NP' 2 +1282 'NR' 2 +1283 'NS' 2 +1284 'NT' 2 +1285 'NU' 2 +1286 'NV' 2 +1287 'NW' 2 +1288 'NX' 2 +1289 'NY' 2 +1290 'NZ' 2 +1291 'Na' 2 +1292 'Nb' 2 +1293 'Nd' 2 +1294 'Ne' 2 +1295 'Ng' 2 +1296 'Ni' 2 +1297 'No' 2 +1298 'Nr' 2 +1299 'Ns' 2 +1300 'Nu' 2 +1301 'Nx' 2 +1302 'Ny' 2 +1303 'Nz' 2 +1304 'OA' 2 +1305 'OB' 2 +1306 'OC' 2 +1307 'OD' 2 +1308 'OE' 2 +1309 'OF' 2 +1310 'OG' 2 +1311 'OH' 2 +1312 'OI' 2 +1313 'OK' 2 +1314 'OL' 2 +1315 'OM' 2 +1316 'ON' 2 +1317 'OO' 2 +1318 'OP' 2 +1319 'OR' 2 +1320 'OS' 2 +1321 'OT' 2 +1322 'OU' 2 +1323 'OV' 2 +1324 'OW' 2 +1325 'OX' 2 +1326 'OY' 2 +1327 'Ob' 2 +1328 'Oc' 2 +1329 'Od' 2 +1330 'Of' 2 +1331 'Oh' 2 +1332 'Oi' 2 +1333 'Ok' 2 +1334 'Ol' 2 +1335 'Om' 2 +1336 'On' 2 +1337 'Op' 2 +1338 'Or' 2 +1339 'Os' 2 +1340 'Ot' 2 +1341 'Ox' 2 +1342 'PA' 2 +1343 'PB' 2 +1344 'PC' 2 +1345 'PD' 2 +1346 'PE' 2 +1347 'PF' 2 +1348 'PG' 2 +1349 'PH' 2 +1350 'PI' 2 +1351 'PK' 2 +1352 'PL' 2 +1353 'PM' 2 +1354 'PN' 2 +1355 'PO' 2 +1356 'PP' 2 +1357 'PR' 2 +1358 'PS' 2 +1359 'PT' 2 +1360 'PU' 2 +1361 'PV' 2 +1362 'PW' 2 +1363 'PY' 2 +1364 'Pa' 2 +1365 'Pb' 2 +1366 'Pe' 2 +1367 'Ph' 2 +1368 'Pi' 2 +1369 'Pl' 2 +1370 'Po' 2 +1371 'Pr' 2 +1372 'Ps' 2 +1373 'Pt' 2 +1374 'Pu' 2 +1375 'Px' 2 +1376 'Py' 2 +1377 'QA' 2 +1378 'QB' 2 +1379 'QC' 2 +1380 'QE' 2 +1381 'QI' 2 +1382 'QL' 2 +1383 'QM' 2 +1384 'QP' 2 +1385 'QQ' 2 +1386 'QR' 2 +1387 'QS' 2 +1388 'QT' 2 +1389 'QU' 2 +1390 'Qi' 2 +1391 'Qt' 2 +1392 'Qu' 2 +1393 'RA' 2 +1394 'RB' 2 +1395 'RC' 2 +1396 'RD' 2 +1397 'RE' 2 +1398 'RF' 2 +1399 'RG' 2 +1400 'RH' 2 +1401 'RI' 2 +1402 'RK' 2 +1403 'RL' 2 +1404 'RM' 2 +1405 'RN' 2 +1406 'RO' 2 +1407 'RP' 2 +1408 'RR' 2 +1409 'RS' 2 +1410 'RT' 2 +1411 'RU' 2 +1412 'RV' 2 +1413 'RW' 2 +1414 'RX' 2 +1415 'RY' 2 +1416 'Ra' 2 +1417 'Re' 2 +1418 'Rh' 2 +1419 'Ri' 2 +1420 'Ro' 2 +1421 'Rp' 2 +1422 'Rs' 2 +1423 'Ru' 2 +1424 'Rv' 2 +1425 'Rx' 2 +1426 'Ry' 2 +1427 'SA' 2 +1428 'SB' 2 +1429 'SC' 2 +1430 'SD' 2 +1431 'SE' 2 +1432 'SF' 2 +1433 'SG' 2 +1434 'SH' 2 +1435 'SI' 2 +1436 'SK' 2 +1437 'SL' 2 +1438 'SM' 2 +1439 'SN' 2 +1440 'SO' 2 +1441 'SP' 2 +1442 'SQ' 2 +1443 'SR' 2 +1444 'SS' 2 +1445 'ST' 2 +1446 'SU' 2 +1447 'SV' 2 +1448 'SW' 2 +1449 'SY' 2 +1450 'SZ' 2 +1451 'Sa' 2 +1452 'Sb' 2 +1453 'Sc' 2 +1454 'Se' 2 +1455 'Sh' 2 +1456 'Si' 2 +1457 'Sk' 2 +1458 'Sl' 2 +1459 'Sm' 2 +1460 'Sn' 2 +1461 'So' 2 +1462 'Sp' 2 +1463 'Sq' 2 +1464 'Sr' 2 +1465 'St' 2 +1466 'Su' 2 +1467 'Sw' 2 +1468 'Sy' 2 +1469 'Sz' 2 +1470 'TA' 2 +1471 'TB' 2 +1472 'TC' 2 +1473 'TD' 2 +1474 'TE' 2 +1475 'TF' 2 +1476 'TG' 2 +1477 'TH' 2 +1478 'TI' 2 +1479 'TK' 2 +1480 'TL' 2 +1481 'TM' 2 +1482 'TN' 2 +1483 'TO' 2 +1484 'TP' 2 +1485 'TR' 2 +1486 'TS' 2 +1487 'TT' 2 +1488 'TU' 2 +1489 'TV' 2 +1490 'TW' 2 +1491 'TX' 2 +1492 'TY' 2 +1493 'TZ' 2 +1494 'Ta' 2 +1495 'Tc' 2 +1496 'Te' 2 +1497 'Th' 2 +1498 'Ti' 2 +1499 'Tk' 2 +1500 'To' 2 +1501 'Tp' 2 +1502 'Tr' 2 +1503 'Ts' 2 +1504 'Tu' 2 +1505 'Tw' 2 +1506 'Tx' 2 +1507 'Ty' 2 +1508 'UA' 2 +1509 'UB' 2 +1510 'UC' 2 +1511 'UD' 2 +1512 'UE' 2 +1513 'UF' 2 +1514 'UG' 2 +1515 'UH' 2 +1516 'UI' 2 +1517 'UK' 2 +1518 'UL' 2 +1519 'UM' 2 +1520 'UN' 2 +1521 'UP' 2 +1522 'UR' 2 +1523 'US' 2 +1524 'UT' 2 +1525 'UU' 2 +1526 'UV' 2 +1527 'UX' 2 +1528 'UY' 2 +1529 'Ub' 2 +1530 'Uh' 2 +1531 'Ui' 2 +1532 'Uk' 2 +1533 'Ul' 2 +1534 'Um' 2 +1535 'Un' 2 +1536 'Up' 2 +1537 'Ur' 2 +1538 'Us' 2 +1539 'Ut' 2 +1540 'VA' 2 +1541 'VB' 2 +1542 'VC' 2 +1543 'VD' 2 +1544 'VE' 2 +1545 'VF' 2 +1546 'VG' 2 +1547 'VH' 2 +1548 'VI' 2 +1549 'VK' 2 +1550 'VL' 2 +1551 'VM' 2 +1552 'VN' 2 +1553 'VO' 2 +1554 'VP' 2 +1555 'VR' 2 +1556 'VS' 2 +1557 'VT' 2 +1558 'VV' 2 +1559 'Va' 2 +1560 'Ve' 2 +1561 'Vi' 2 +1562 'Vm' 2 +1563 'Vo' 2 +1564 'Vs' 2 +1565 'Vu' 2 +1566 'Vy' 2 +1567 'WA' 2 +1568 'WB' 2 +1569 'WC' 2 +1570 'WD' 2 +1571 'WE' 2 +1572 'WF' 2 +1573 'WG' 2 +1574 'WH' 2 +1575 'WI' 2 +1576 'WK' 2 +1577 'WL' 2 +1578 'WM' 2 +1579 'WN' 2 +1580 'WO' 2 +1581 'WP' 2 +1582 'WR' 2 +1583 'WS' 2 +1584 'WT' 2 +1585 'WV' 2 +1586 'WW' 2 +1587 'WX' 2 +1588 'Wa' 2 +1589 'We' 2 +1590 'Wh' 2 +1591 'Wi' 2 +1592 'Wo' 2 +1593 'Wr' 2 +1594 'Ws' 2 +1595 'Wy' 2 +1596 'XA' 2 +1597 'XB' 2 +1598 'XC' 2 +1599 'XD' 2 +1600 'XF' 2 +1601 'XG' 2 +1602 'XI' 2 +1603 'XL' 2 +1604 'XM' 2 +1605 'XP' 2 +1606 'XR' 2 +1607 'XS' 2 +1608 'XT' 2 +1609 'XV' 2 +1610 'XX' 2 +1611 'XY' 2 +1612 'Xi' 2 +1613 'YA' 2 +1614 'YC' 2 +1615 'YE' 2 +1616 'YL' 2 +1617 'YM' 2 +1618 'YN' 2 +1619 'YO' 2 +1620 'YP' 2 +1621 'YR' 2 +1622 'YS' 2 +1623 'YT' 2 +1624 'YW' 2 +1625 'YX' 2 +1626 'YY' 2 +1627 'Ya' 2 +1628 'Ye' 2 +1629 'Yo' 2 +1630 'Yu' 2 +1631 'ZA' 2 +1632 'ZE' 2 +1633 'ZH' 2 +1634 'ZO' 2 +1635 'ZT' 2 +1636 'ZW' 2 +1637 'ZX' 2 +1638 'ZY' 2 +1639 'ZZ' 2 +1640 'Za' 2 +1641 'Ze' 2 +1642 'Zh' 2 +1643 'Zn' 2 +1644 '["' 2 +1645 '[$' 2 +1646 "['" 2 +1647 '[(' 2 +1648 '[*' 2 +1649 '[,' 2 +1650 '[-' 2 +1651 '[/' 2 +1652 '[:' 2 +1653 '[@' 2 +1654 '[[' 2 +1655 '[\\' 2 +1656 '[]' 2 +1657 '[^' 2 +1658 '[_' 2 +1659 '[{' 2 +1660 '\\"' 2 +1661 '\\$' 2 +1662 '\\%' 2 +1663 "\\'" 2 +1664 '\\(' 2 +1665 '\\)' 2 +1666 '\\,' 2 +1667 '\\-' 2 +1668 '\\.' 2 +1669 '\\/' 2 +1670 '\\;' 2 +1671 '\\<' 2 +1672 '\\[' 2 +1673 '\\\\' 2 +1674 '\\]' 2 +1675 '\\_' 2 +1676 '\\{' 2 +1677 '\\}' 2 +1678 ']"' 2 +1679 ']$' 2 +1680 "]'" 2 +1681 '](' 2 +1682 '])' 2 +1683 ']*' 2 +1684 ']+' 2 +1685 '],' 2 +1686 ']-' 2 +1687 '].' 2 +1688 ']/' 2 +1689 ']:' 2 +1690 '];' 2 +1691 ']<' 2 +1692 ']=' 2 +1693 ']>' 2 +1694 ']?' 2 +1695 '][' 2 +1696 ']\\' 2 +1697 ']]' 2 +1698 ']_' 2 +1699 ']{' 2 +1700 ']|' 2 +1701 ']}' 2 +1702 '^(' 2 +1703 '^*' 2 +1704 '^-' 2 +1705 '^\\' 2 +1706 '^^' 2 +1707 '^{' 2 +1708 '_"' 2 +1709 '_%' 2 +1710 "_'" 2 +1711 '_(' 2 +1712 '_)' 2 +1713 '_*' 2 +1714 '_,' 2 +1715 '_-' 2 +1716 '_.' 2 +1717 '_:' 2 +1718 '_;' 2 +1719 '_<' 2 +1720 '_>' 2 +1721 '_[' 2 +1722 '_\\' 2 +1723 '_]' 2 +1724 '__' 2 +1725 '_{' 2 +1726 '`)' 2 +1727 '`,' 2 +1728 '`.' 2 +1729 '`:' 2 +1730 '`;' 2 +1731 '`\\' 2 +1732 '``' 2 +1733 'aa' 2 +1734 'ab' 2 +1735 'ac' 2 +1736 'ad' 2 +1737 'ae' 2 +1738 'af' 2 +1739 'ag' 2 +1740 'ah' 2 +1741 'ai' 2 +1742 'aj' 2 +1743 'ak' 2 +1744 'al' 2 +1745 'am' 2 +1746 'an' 2 +1747 'ao' 2 +1748 'ap' 2 +1749 'aq' 2 +1750 'ar' 2 +1751 'as' 2 +1752 'at' 2 +1753 'au' 2 +1754 'av' 2 +1755 'aw' 2 +1756 'ax' 2 +1757 'ay' 2 +1758 'az' 2 +1759 'ba' 2 +1760 'bb' 2 +1761 'bc' 2 +1762 'bd' 2 +1763 'be' 2 +1764 'bf' 2 +1765 'bg' 2 +1766 'bh' 2 +1767 'bi' 2 +1768 'bj' 2 +1769 'bk' 2 +1770 'bl' 2 +1771 'bm' 2 +1772 'bn' 2 +1773 'bo' 2 +1774 'bp' 2 +1775 'br' 2 +1776 'bs' 2 +1777 'bt' 2 +1778 'bu' 2 +1779 'bv' 2 +1780 'bw' 2 +1781 'bx' 2 +1782 'by' 2 +1783 'bz' 2 +1784 'ca' 2 +1785 'cb' 2 +1786 'cc' 2 +1787 'cd' 2 +1788 'ce' 2 +1789 'cf' 2 +1790 'cg' 2 +1791 'ch' 2 +1792 'ci' 2 +1793 'cj' 2 +1794 'ck' 2 +1795 'cl' 2 +1796 'cm' 2 +1797 'cn' 2 +1798 'co' 2 +1799 'cp' 2 +1800 'cq' 2 +1801 'cr' 2 +1802 'cs' 2 +1803 'ct' 2 +1804 'cu' 2 +1805 'cv' 2 +1806 'cw' 2 +1807 'cx' 2 +1808 'cy' 2 +1809 'cz' 2 +1810 'dB' 2 +1811 'dL' 2 +1812 'dT' 2 +1813 'dX' 2 +1814 'da' 2 +1815 'db' 2 +1816 'dc' 2 +1817 'dd' 2 +1818 'de' 2 +1819 'df' 2 +1820 'dg' 2 +1821 'dh' 2 +1822 'di' 2 +1823 'dj' 2 +1824 'dk' 2 +1825 'dl' 2 +1826 'dm' 2 +1827 'dn' 2 +1828 'do' 2 +1829 'dp' 2 +1830 'dq' 2 +1831 'dr' 2 +1832 'ds' 2 +1833 'dt' 2 +1834 'du' 2 +1835 'dv' 2 +1836 'dw' 2 +1837 'dx' 2 +1838 'dy' 2 +1839 'dz' 2 +1840 'ea' 2 +1841 'eb' 2 +1842 'ec' 2 +1843 'ed' 2 +1844 'ee' 2 +1845 'ef' 2 +1846 'eg' 2 +1847 'eh' 2 +1848 'ei' 2 +1849 'ej' 2 +1850 'ek' 2 +1851 'el' 2 +1852 'em' 2 +1853 'en' 2 +1854 'eo' 2 +1855 'ep' 2 +1856 'eq' 2 +1857 'er' 2 +1858 'es' 2 +1859 'et' 2 +1860 'eu' 2 +1861 'ev' 2 +1862 'ew' 2 +1863 'ex' 2 +1864 'ey' 2 +1865 'ez' 2 +1866 'fa' 2 +1867 'fb' 2 +1868 'fc' 2 +1869 'fd' 2 +1870 'fe' 2 +1871 'ff' 2 +1872 'fg' 2 +1873 'fh' 2 +1874 'fi' 2 +1875 'fj' 2 +1876 'fk' 2 +1877 'fl' 2 +1878 'fm' 2 +1879 'fn' 2 +1880 'fo' 2 +1881 'fp' 2 +1882 'fq' 2 +1883 'fr' 2 +1884 'fs' 2 +1885 'ft' 2 +1886 'fu' 2 +1887 'fv' 2 +1888 'fw' 2 +1889 'fx' 2 +1890 'fy' 2 +1891 'ga' 2 +1892 'gb' 2 +1893 'gc' 2 +1894 'gd' 2 +1895 'ge' 2 +1896 'gf' 2 +1897 'gg' 2 +1898 'gh' 2 +1899 'gi' 2 +1900 'gl' 2 +1901 'gm' 2 +1902 'gn' 2 +1903 'go' 2 +1904 'gp' 2 +1905 'gr' 2 +1906 'gs' 2 +1907 'gt' 2 +1908 'gu' 2 +1909 'gv' 2 +1910 'gw' 2 +1911 'gx' 2 +1912 'gy' 2 +1913 'gz' 2 +1914 'ha' 2 +1915 'hb' 2 +1916 'hc' 2 +1917 'hd' 2 +1918 'he' 2 +1919 'hf' 2 +1920 'hg' 2 +1921 'hh' 2 +1922 'hi' 2 +1923 'hj' 2 +1924 'hk' 2 +1925 'hl' 2 +1926 'hm' 2 +1927 'hn' 2 +1928 'ho' 2 +1929 'hp' 2 +1930 'hr' 2 +1931 'hs' 2 +1932 'ht' 2 +1933 'hu' 2 +1934 'hw' 2 +1935 'hy' 2 +1936 'hz' 2 +1937 'ia' 2 +1938 'ib' 2 +1939 'ic' 2 +1940 'id' 2 +1941 'ie' 2 +1942 'if' 2 +1943 'ig' 2 +1944 'ih' 2 +1945 'ii' 2 +1946 'ij' 2 +1947 'ik' 2 +1948 'il' 2 +1949 'im' 2 +1950 'in' 2 +1951 'io' 2 +1952 'ip' 2 +1953 'iq' 2 +1954 'ir' 2 +1955 'is' 2 +1956 'it' 2 +1957 'iu' 2 +1958 'iv' 2 +1959 'iw' 2 +1960 'ix' 2 +1961 'iy' 2 +1962 'iz' 2 +1963 'ja' 2 +1964 'jb' 2 +1965 'jc' 2 +1966 'jd' 2 +1967 'je' 2 +1968 'jh' 2 +1969 'ji' 2 +1970 'jj' 2 +1971 'jk' 2 +1972 'jl' 2 +1973 'jm' 2 +1974 'jn' 2 +1975 'jo' 2 +1976 'jp' 2 +1977 'jq' 2 +1978 'jr' 2 +1979 'js' 2 +1980 'jt' 2 +1981 'ju' 2 +1982 'jv' 2 +1983 'kB' 2 +1984 'ka' 2 +1985 'kb' 2 +1986 'kc' 2 +1987 'kd' 2 +1988 'ke' 2 +1989 'kg' 2 +1990 'kh' 2 +1991 'ki' 2 +1992 'kj' 2 +1993 'kk' 2 +1994 'kl' 2 +1995 'km' 2 +1996 'kn' 2 +1997 'ko' 2 +1998 'kp' 2 +1999 'kr' 2 +2000 'ks' 2 +2001 'kt' 2 +2002 'ku' 2 +2003 'kv' 2 +2004 'kw' 2 +2005 'kx' 2 +2006 'ky' 2 +2007 'la' 2 +2008 'lb' 2 +2009 'lc' 2 +2010 'ld' 2 +2011 'le' 2 +2012 'lf' 2 +2013 'lg' 2 +2014 'lh' 2 +2015 'li' 2 +2016 'lj' 2 +2017 'lk' 2 +2018 'll' 2 +2019 'lm' 2 +2020 'ln' 2 +2021 'lo' 2 +2022 'lp' 2 +2023 'lr' 2 +2024 'ls' 2 +2025 'lt' 2 +2026 'lu' 2 +2027 'lv' 2 +2028 'lw' 2 +2029 'lx' 2 +2030 'ly' 2 +2031 'lz' 2 +2032 'mL' 2 +2033 'mV' 2 +2034 'ma' 2 +2035 'mb' 2 +2036 'mc' 2 +2037 'md' 2 +2038 'me' 2 +2039 'mf' 2 +2040 'mg' 2 +2041 'mh' 2 +2042 'mi' 2 +2043 'mj' 2 +2044 'mk' 2 +2045 'ml' 2 +2046 'mm' 2 +2047 'mn' 2 +2048 'mo' 2 +2049 'mp' 2 +2050 'mq' 2 +2051 'mr' 2 +2052 'ms' 2 +2053 'mt' 2 +2054 'mu' 2 +2055 'mv' 2 +2056 'mw' 2 +2057 'mx' 2 +2058 'my' 2 +2059 'na' 2 +2060 'nb' 2 +2061 'nc' 2 +2062 'nd' 2 +2063 'ne' 2 +2064 'nf' 2 +2065 'ng' 2 +2066 'nh' 2 +2067 'ni' 2 +2068 'nj' 2 +2069 'nk' 2 +2070 'nl' 2 +2071 'nm' 2 +2072 'nn' 2 +2073 'no' 2 +2074 'np' 2 +2075 'nr' 2 +2076 'ns' 2 +2077 'nt' 2 +2078 'nu' 2 +2079 'nv' 2 +2080 'nw' 2 +2081 'nx' 2 +2082 'ny' 2 +2083 'nz' 2 +2084 'oS' 2 +2085 'oa' 2 +2086 'ob' 2 +2087 'oc' 2 +2088 'od' 2 +2089 'oe' 2 +2090 'of' 2 +2091 'og' 2 +2092 'oh' 2 +2093 'oi' 2 +2094 'oj' 2 +2095 'ok' 2 +2096 'ol' 2 +2097 'om' 2 +2098 'on' 2 +2099 'oo' 2 +2100 'op' 2 +2101 'or' 2 +2102 'os' 2 +2103 'ot' 2 +2104 'ou' 2 +2105 'ov' 2 +2106 'ow' 2 +2107 'ox' 2 +2108 'oy' 2 +2109 'oz' 2 +2110 'pH' 2 +2111 'pa' 2 +2112 'pb' 2 +2113 'pc' 2 +2114 'pd' 2 +2115 'pe' 2 +2116 'pf' 2 +2117 'pg' 2 +2118 'ph' 2 +2119 'pi' 2 +2120 'pk' 2 +2121 'pl' 2 +2122 'pm' 2 +2123 'pn' 2 +2124 'po' 2 +2125 'pp' 2 +2126 'pq' 2 +2127 'pr' 2 +2128 'ps' 2 +2129 'pt' 2 +2130 'pu' 2 +2131 'pv' 2 +2132 'pw' 2 +2133 'px' 2 +2134 'py' 2 +2135 'qa' 2 +2136 'qb' 2 +2137 'qc' 2 +2138 'qd' 2 +2139 'qh' 2 +2140 'qi' 2 +2141 'ql' 2 +2142 'qn' 2 +2143 'qp' 2 +2144 'qq' 2 +2145 'qr' 2 +2146 'qs' 2 +2147 'qt' 2 +2148 'qu' 2 +2149 'qv' 2 +2150 'qw' 2 +2151 'ra' 2 +2152 'rb' 2 +2153 'rc' 2 +2154 'rd' 2 +2155 're' 2 +2156 'rf' 2 +2157 'rg' 2 +2158 'rh' 2 +2159 'ri' 2 +2160 'rk' 2 +2161 'rl' 2 +2162 'rm' 2 +2163 'rn' 2 +2164 'ro' 2 +2165 'rp' 2 +2166 'rq' 2 +2167 'rr' 2 +2168 'rs' 2 +2169 'rt' 2 +2170 'ru' 2 +2171 'rv' 2 +2172 'rw' 2 +2173 'rx' 2 +2174 'ry' 2 +2175 'rz' 2 +2176 'sa' 2 +2177 'sb' 2 +2178 'sc' 2 +2179 'sd' 2 +2180 'se' 2 +2181 'sf' 2 +2182 'sg' 2 +2183 'sh' 2 +2184 'si' 2 +2185 'sj' 2 +2186 'sk' 2 +2187 'sl' 2 +2188 'sm' 2 +2189 'sn' 2 +2190 'so' 2 +2191 'sp' 2 +2192 'sq' 2 +2193 'sr' 2 +2194 'ss' 2 +2195 'st' 2 +2196 'su' 2 +2197 'sv' 2 +2198 'sw' 2 +2199 'sx' 2 +2200 'sy' 2 +2201 'sz' 2 +2202 'ta' 2 +2203 'tb' 2 +2204 'tc' 2 +2205 'td' 2 +2206 'te' 2 +2207 'tf' 2 +2208 'tg' 2 +2209 'th' 2 +2210 'ti' 2 +2211 'tk' 2 +2212 'tl' 2 +2213 'tm' 2 +2214 'tn' 2 +2215 'to' 2 +2216 'tp' 2 +2217 'tr' 2 +2218 'ts' 2 +2219 'tt' 2 +2220 'tu' 2 +2221 'tv' 2 +2222 'tw' 2 +2223 'tx' 2 +2224 'ty' 2 +2225 'tz' 2 +2226 'ua' 2 +2227 'ub' 2 +2228 'uc' 2 +2229 'ud' 2 +2230 'ue' 2 +2231 'uf' 2 +2232 'ug' 2 +2233 'uh' 2 +2234 'ui' 2 +2235 'uj' 2 +2236 'uk' 2 +2237 'ul' 2 +2238 'um' 2 +2239 'un' 2 +2240 'uo' 2 +2241 'up' 2 +2242 'ur' 2 +2243 'us' 2 +2244 'ut' 2 +2245 'uu' 2 +2246 'uv' 2 +2247 'uw' 2 +2248 'ux' 2 +2249 'uy' 2 +2250 'uz' 2 +2251 'va' 2 +2252 'vb' 2 +2253 'vc' 2 +2254 'vd' 2 +2255 've' 2 +2256 'vf' 2 +2257 'vg' 2 +2258 'vh' 2 +2259 'vi' 2 +2260 'vk' 2 +2261 'vl' 2 +2262 'vm' 2 +2263 'vn' 2 +2264 'vo' 2 +2265 'vp' 2 +2266 'vr' 2 +2267 'vs' 2 +2268 'vt' 2 +2269 'vu' 2 +2270 'vv' 2 +2271 'vw' 2 +2272 'vx' 2 +2273 'vy' 2 +2274 'wa' 2 +2275 'wb' 2 +2276 'wc' 2 +2277 'wd' 2 +2278 'we' 2 +2279 'wf' 2 +2280 'wg' 2 +2281 'wh' 2 +2282 'wi' 2 +2283 'wk' 2 +2284 'wl' 2 +2285 'wm' 2 +2286 'wn' 2 +2287 'wo' 2 +2288 'wp' 2 +2289 'wr' 2 +2290 'ws' 2 +2291 'wt' 2 +2292 'wu' 2 +2293 'ww' 2 +2294 'wx' 2 +2295 'wy' 2 +2296 'xA' 2 +2297 'xB' 2 +2298 'xC' 2 +2299 'xD' 2 +2300 'xE' 2 +2301 'xF' 2 +2302 'xa' 2 +2303 'xb' 2 +2304 'xc' 2 +2305 'xd' 2 +2306 'xe' 2 +2307 'xf' 2 +2308 'xg' 2 +2309 'xh' 2 +2310 'xi' 2 +2311 'xl' 2 +2312 'xm' 2 +2313 'xn' 2 +2314 'xo' 2 +2315 'xp' 2 +2316 'xr' 2 +2317 'xs' 2 +2318 'xt' 2 +2319 'xu' 2 +2320 'xx' 2 +2321 'xy' 2 +2322 'xz' 2 +2323 'ya' 2 +2324 'yb' 2 +2325 'yc' 2 +2326 'yd' 2 +2327 'ye' 2 +2328 'yg' 2 +2329 'yi' 2 +2330 'yk' 2 +2331 'yl' 2 +2332 'ym' 2 +2333 'yn' 2 +2334 'yo' 2 +2335 'yp' 2 +2336 'yr' 2 +2337 'ys' 2 +2338 'yt' 2 +2339 'yu' 2 +2340 'yw' 2 +2341 'yx' 2 +2342 'yy' 2 +2343 'yz' 2 +2344 'zA' 2 +2345 'za' 2 +2346 'zb' 2 +2347 'zc' 2 +2348 'zd' 2 +2349 'ze' 2 +2350 'zh' 2 +2351 'zi' 2 +2352 'zk' 2 +2353 'zl' 2 +2354 'zm' 2 +2355 'zn' 2 +2356 'zo' 2 +2357 'zr' 2 +2358 'zs' 2 +2359 'zt' 2 +2360 'zu' 2 +2361 'zw' 2 +2362 'zy' 2 +2363 'zz' 2 +2364 '{"' 2 +2365 '{$' 2 +2366 '{%' 2 +2367 "{'" 2 +2368 '{(' 2 +2369 '{-' 2 +2370 '{:' 2 +2371 '{@' 2 +2372 '{\\' 2 +2373 '{{' 2 +2374 '{|' 2 +2375 '{}' 2 +2376 '|$' 2 +2377 '|(' 2 +2378 '|-' 2 +2379 '|.' 2 +2380 '|\\' 2 +2381 '|^' 2 +2382 '||' 2 +2383 '}"' 2 +2384 '}$' 2 +2385 '}%' 2 +2386 '}&' 2 +2387 "}'" 2 +2388 '}(' 2 +2389 '})' 2 +2390 '}+' 2 +2391 '},' 2 +2392 '}-' 2 +2393 '}.' 2 +2394 '}/' 2 +2395 '}:' 2 +2396 '};' 2 +2397 '}<' 2 +2398 '}=' 2 +2399 '}>' 2 +2400 '}?' 2 +2401 '}[' 2 +2402 '}\\' 2 +2403 '}]' 2 +2404 '}_' 2 +2405 '}`' 2 +2406 '}{' 2 +2407 '}|' 2 +2408 '}}' 2 +2409 '~/' 2 +2410 '~\\' 2 +2411 '~~' 2 +2412 b'\x82\xac' 2 +2413 b'\x83\xbd' 2 +2414 b'\x86\x92' 2 +2415 b'\x88\x98' 2 +2416 b'\x8c\x80' 2 +2417 b'\x99\x82' 2 +2418 b'\x9d\xbc' 2 +2419 b'\xa3\xbc' 2 +2420 b'\xa6\x82' 2 +2421 b'\xb7\xb8' 2 +2422 b'\xbf\xbd' 2 +2423 '\x80' 2 +2424 '\x81' 2 +2425 '\x91' 2 +2426 '\x92' 2 +2427 '\x93' 2 +2428 '\x94' 2 +2429 '\x97' 2 +2430 '\xa0' 2 +2431 '¡' 2 +2432 '¢' 2 +2433 '£' 2 +2434 '¤' 2 +2435 '¥' 2 +2436 '¦' 2 +2437 '§' 2 +2438 '¨' 2 +2439 '©' 2 +2440 'ª' 2 +2441 '«' 2 +2442 '¬' 2 +2443 '\xad' 2 +2444 '®' 2 +2445 '¯' 2 +2446 '°' 2 +2447 '±' 2 +2448 '²' 2 +2449 '³' 2 +2450 '´' 2 +2451 'µ' 2 +2452 '¶' 2 +2453 '·' 2 +2454 '¸' 2 +2455 '¹' 2 +2456 'º' 2 +2457 '»' 2 +2458 '¼' 2 +2459 '½' 2 +2460 '¾' 2 +2461 '¿' 2 +2462 'À' 2 +2463 'Á' 2 +2464 'Â' 2 +2465 'Ã' 2 +2466 'Ä' 2 +2467 'Å' 2 +2468 'Æ' 2 +2469 'Ç' 2 +2470 'È' 2 +2471 'É' 2 +2472 'Ê' 2 +2473 'Ë' 2 +2474 'Ì' 2 +2475 'Í' 2 +2476 'Î' 2 +2477 'Ï' 2 +2478 'Ð' 2 +2479 'Ñ' 2 +2480 'Ò' 2 +2481 'Ó' 2 +2482 'Ô' 2 +2483 'Õ' 2 +2484 'Ö' 2 +2485 '×' 2 +2486 'Ø' 2 +2487 'Ù' 2 +2488 'Ú' 2 +2489 'Û' 2 +2490 'Ü' 2 +2491 'Ý' 2 +2492 'Þ' 2 +2493 'ß' 2 +2494 'à' 2 +2495 'á' 2 +2496 'â' 2 +2497 'ã' 2 +2498 'ä' 2 +2499 'å' 2 +2500 'æ' 2 +2501 'ç' 2 +2502 'è' 2 +2503 'é' 2 +2504 'ê' 2 +2505 'ë' 2 +2506 'ì' 2 +2507 'í' 2 +2508 'î' 2 +2509 'ï' 2 +2510 'ð' 2 +2511 'ñ' 2 +2512 'ò' 2 +2513 'ó' 2 +2514 'ô' 2 +2515 'õ' 2 +2516 'ö' 2 +2517 '÷' 2 +2518 'ø' 2 +2519 'ù' 2 +2520 'ú' 2 +2521 'û' 2 +2522 'ü' 2 +2523 'ý' 2 +2524 'þ' 2 +2525 'ÿ' 2 +2526 'Ā' 2 +2527 'ā' 2 +2528 'Ă' 2 +2529 'ă' 2 +2530 'ą' 2 +2531 'Ć' 2 +2532 'ć' 2 +2533 'ĉ' 2 +2534 'ċ' 2 +2535 'Č' 2 +2536 'č' 2 +2537 'Ď' 2 +2538 'ď' 2 +2539 'Đ' 2 +2540 'đ' 2 +2541 'Ē' 2 +2542 'ē' 2 +2543 'ĕ' 2 +2544 'Ė' 2 +2545 'ė' 2 +2546 'ę' 2 +2547 'Ě' 2 +2548 'ě' 2 +2549 'ĝ' 2 +2550 'ğ' 2 +2551 'Ġ' 2 +2552 'ġ' 2 +2553 'Ħ' 2 +2554 'ħ' 2 +2555 'ĩ' 2 +2556 'Ī' 2 +2557 'ī' 2 +2558 'ĭ' 2 +2559 'į' 2 +2560 'İ' 2 +2561 'ı' 2 +2562 'ķ' 2 +2563 'ļ' 2 +2564 'Ľ' 2 +2565 'ľ' 2 +2566 'Ł' 2 +2567 'ł' 2 +2568 'ń' 2 +2569 'ņ' 2 +2570 'ň' 2 +2571 'ŋ' 2 +2572 'Ō' 2 +2573 'ō' 2 +2574 'ŏ' 2 +2575 'Ő' 2 +2576 'ő' 2 +2577 'Œ' 2 +2578 'œ' 2 +2579 'Ř' 2 +2580 'ř' 2 +2581 'Ś' 2 +2582 'ś' 2 +2583 'ŝ' 2 +2584 'Ş' 2 +2585 'ş' 2 +2586 'Š' 2 +2587 'š' 2 +2588 'Ţ' 2 +2589 'ţ' 2 +2590 'Ť' 2 +2591 'ť' 2 +2592 'ũ' 2 +2593 'ū' 2 +2594 'ŭ' 2 +2595 'ů' 2 +2596 'Ű' 2 +2597 'ű' 2 +2598 'ų' 2 +2599 'Ÿ' 2 +2600 'Ź' 2 +2601 'ź' 2 +2602 'Ż' 2 +2603 'ż' 2 +2604 'Ž' 2 +2605 'ž' 2 +2606 'ſ' 2 +2607 'Ə' 2 +2608 'ƒ' 2 +2609 'ơ' 2 +2610 'ư' 2 +2611 'ǎ' 2 +2612 'ǐ' 2 +2613 'ǒ' 2 +2614 'ǔ' 2 +2615 'ǚ' 2 +2616 'ǧ' 2 +2617 'ǫ' 2 +2618 'Ș' 2 +2619 'ș' 2 +2620 'Ț' 2 +2621 'ț' 2 +2622 'ɐ' 2 +2623 'ɑ' 2 +2624 'ɒ' 2 +2625 'ɔ' 2 +2626 'ɕ' 2 +2627 'ə' 2 +2628 'ɛ' 2 +2629 'ɡ' 2 +2630 'ɣ' 2 +2631 'ɨ' 2 +2632 'ɪ' 2 +2633 'ɫ' 2 +2634 'ɯ' 2 +2635 'ɲ' 2 +2636 'ɵ' 2 +2637 'ɹ' 2 +2638 'ɾ' 2 +2639 'ʀ' 2 +2640 'ʁ' 2 +2641 'ʂ' 2 +2642 'ʃ' 2 +2643 'ʊ' 2 +2644 'ʋ' 2 +2645 'ʌ' 2 +2646 'ʎ' 2 +2647 'ʐ' 2 +2648 'ʑ' 2 +2649 'ʒ' 2 +2650 'ʔ' 2 +2651 'ʰ' 2 +2652 'ʲ' 2 +2653 'ʷ' 2 +2654 'ʹ' 2 +2655 'ʻ' 2 +2656 'ʼ' 2 +2657 'ʾ' 2 +2658 'ʿ' 2 +2659 'ˆ' 2 +2660 'ˇ' 2 +2661 'ˈ' 2 +2662 'ˉ' 2 +2663 'ˊ' 2 +2664 'ˋ' 2 +2665 'ˌ' 2 +2666 'ː' 2 +2667 '˙' 2 +2668 '˚' 2 +2669 '˜' 2 +2670 'ˠ' 2 +2671 '̀' 2 +2672 '́' 2 +2673 '̂' 2 +2674 '̃' 2 +2675 '̄' 2 +2676 '̈' 2 +2677 '̌' 2 +2678 '̍' 2 +2679 '̣' 2 +2680 '̥' 2 +2681 '̩' 2 +2682 '̪' 2 +2683 '̯' 2 +2684 '̱' 2 +2685 '̲' 2 +2686 '̶' 2 +2687 '͒' 2 +2688 '͓' 2 +2689 '͘' 2 +2690 '͡' 2 +2691 'Ά' 2 +2692 'Έ' 2 +2693 'Α' 2 +2694 'Β' 2 +2695 'Γ' 2 +2696 'Δ' 2 +2697 'Ε' 2 +2698 'Ζ' 2 +2699 'Η' 2 +2700 'Θ' 2 +2701 'Ι' 2 +2702 'Κ' 2 +2703 'Λ' 2 +2704 'Μ' 2 +2705 'Ν' 2 +2706 'Ξ' 2 +2707 'Ο' 2 +2708 'Π' 2 +2709 'Ρ' 2 +2710 'Σ' 2 +2711 'Τ' 2 +2712 'Υ' 2 +2713 'Φ' 2 +2714 'Χ' 2 +2715 'Ψ' 2 +2716 'Ω' 2 +2717 'ά' 2 +2718 'έ' 2 +2719 'ή' 2 +2720 'ί' 2 +2721 'α' 2 +2722 'β' 2 +2723 'γ' 2 +2724 'δ' 2 +2725 'ε' 2 +2726 'ζ' 2 +2727 'η' 2 +2728 'θ' 2 +2729 'ι' 2 +2730 'κ' 2 +2731 'λ' 2 +2732 'μ' 2 +2733 'ν' 2 +2734 'ξ' 2 +2735 'ο' 2 +2736 'π' 2 +2737 'ρ' 2 +2738 'ς' 2 +2739 'σ' 2 +2740 'τ' 2 +2741 'υ' 2 +2742 'φ' 2 +2743 'χ' 2 +2744 'ψ' 2 +2745 'ω' 2 +2746 'ϊ' 2 +2747 'ό' 2 +2748 'ύ' 2 +2749 'ώ' 2 +2750 'ϕ' 2 +2751 'ϵ' 2 +2752 'Ё' 2 +2753 'Ђ' 2 +2754 'Є' 2 +2755 'І' 2 +2756 'Ї' 2 +2757 'Ј' 2 +2758 'Љ' 2 +2759 'Њ' 2 +2760 'Ћ' 2 +2761 'Џ' 2 +2762 'А' 2 +2763 'Б' 2 +2764 'В' 2 +2765 'Г' 2 +2766 'Д' 2 +2767 'Е' 2 +2768 'Ж' 2 +2769 'З' 2 +2770 'И' 2 +2771 'Й' 2 +2772 'К' 2 +2773 'Л' 2 +2774 'М' 2 +2775 'Н' 2 +2776 'О' 2 +2777 'П' 2 +2778 'Р' 2 +2779 'С' 2 +2780 'Т' 2 +2781 'У' 2 +2782 'Ф' 2 +2783 'Х' 2 +2784 'Ц' 2 +2785 'Ч' 2 +2786 'Ш' 2 +2787 'Щ' 2 +2788 'Ъ' 2 +2789 'Ы' 2 +2790 'Ь' 2 +2791 'Э' 2 +2792 'Ю' 2 +2793 'Я' 2 +2794 'а' 2 +2795 'б' 2 +2796 'в' 2 +2797 'г' 2 +2798 'д' 2 +2799 'е' 2 +2800 'ж' 2 +2801 'з' 2 +2802 'и' 2 +2803 'й' 2 +2804 'к' 2 +2805 'л' 2 +2806 'м' 2 +2807 'н' 2 +2808 'о' 2 +2809 'п' 2 +2810 'р' 2 +2811 'с' 2 +2812 'т' 2 +2813 'у' 2 +2814 'ф' 2 +2815 'х' 2 +2816 'ц' 2 +2817 'ч' 2 +2818 'ш' 2 +2819 'щ' 2 +2820 'ъ' 2 +2821 'ы' 2 +2822 'ь' 2 +2823 'э' 2 +2824 'ю' 2 +2825 'я' 2 +2826 'ѐ' 2 +2827 'ё' 2 +2828 'ђ' 2 +2829 'є' 2 +2830 'і' 2 +2831 'ї' 2 +2832 'ј' 2 +2833 'љ' 2 +2834 'њ' 2 +2835 'ћ' 2 +2836 'ѝ' 2 +2837 'ў' 2 +2838 'џ' 2 +2839 'ѣ' 2 +2840 'ѫ' 2 +2841 'Ґ' 2 +2842 'ґ' 2 +2843 'ғ' 2 +2844 'Қ' 2 +2845 'қ' 2 +2846 'ҡ' 2 +2847 'ң' 2 +2848 'ү' 2 +2849 'ұ' 2 +2850 'ӏ' 2 +2851 'ә' 2 +2852 'ө' 2 +2853 'Ա' 2 +2854 'Հ' 2 +2855 'Մ' 2 +2856 'Ս' 2 +2857 'ա' 2 +2858 'բ' 2 +2859 'գ' 2 +2860 'դ' 2 +2861 'ե' 2 +2862 'զ' 2 +2863 'թ' 2 +2864 'ի' 2 +2865 'լ' 2 +2866 'կ' 2 +2867 'հ' 2 +2868 'ղ' 2 +2869 'մ' 2 +2870 'յ' 2 +2871 'ն' 2 +2872 'շ' 2 +2873 'ո' 2 +2874 'պ' 2 +2875 'ս' 2 +2876 'վ' 2 +2877 'տ' 2 +2878 'ր' 2 +2879 'ց' 2 +2880 'ւ' 2 +2881 'ք' 2 +2882 'ְ' 2 +2883 'ִ' 2 +2884 'ֵ' 2 +2885 'ֶ' 2 +2886 'ַ' 2 +2887 'ָ' 2 +2888 'ֹ' 2 +2889 'ּ' 2 +2890 'ׁ' 2 +2891 'א' 2 +2892 'ב' 2 +2893 'ג' 2 +2894 'ד' 2 +2895 'ה' 2 +2896 'ו' 2 +2897 'ז' 2 +2898 'ח' 2 +2899 'ט' 2 +2900 'י' 2 +2901 'ך' 2 +2902 'כ' 2 +2903 'ל' 2 +2904 'ם' 2 +2905 'מ' 2 +2906 'ן' 2 +2907 'נ' 2 +2908 'ס' 2 +2909 'ע' 2 +2910 'ף' 2 +2911 'פ' 2 +2912 'ץ' 2 +2913 'צ' 2 +2914 'ק' 2 +2915 'ר' 2 +2916 'ש' 2 +2917 'ת' 2 +2918 '،' 2 +2919 'ء' 2 +2920 'آ' 2 +2921 'أ' 2 +2922 'إ' 2 +2923 'ئ' 2 +2924 'ا' 2 +2925 'ب' 2 +2926 'ة' 2 +2927 'ت' 2 +2928 'ث' 2 +2929 'ج' 2 +2930 'ح' 2 +2931 'خ' 2 +2932 'د' 2 +2933 'ذ' 2 +2934 'ر' 2 +2935 'ز' 2 +2936 'س' 2 +2937 'ش' 2 +2938 'ص' 2 +2939 'ض' 2 +2940 'ط' 2 +2941 'ظ' 2 +2942 'ع' 2 +2943 'غ' 2 +2944 'ـ' 2 +2945 'ف' 2 +2946 'ق' 2 +2947 'ك' 2 +2948 'ل' 2 +2949 'م' 2 +2950 'ن' 2 +2951 'ه' 2 +2952 'و' 2 +2953 'ى' 2 +2954 'ي' 2 +2955 'ً' 2 +2956 'َ' 2 +2957 'ُ' 2 +2958 'ِ' 2 +2959 'ّ' 2 +2960 'ْ' 2 +2961 'پ' 2 +2962 'چ' 2 +2963 'ک' 2 +2964 'گ' 2 +2965 'ھ' 2 +2966 'ہ' 2 +2967 'ی' 2 +2968 'ے' 2 +2969 'ە' 2 +2970 'ܐ' 2 +2971 'ܝ' 2 +2972 '߬' 2 +2973 b'\xe0\xa4' 2 +2974 b'\xe0\xa5' 2 +2975 b'\xe0\xa6' 2 +2976 b'\xe0\xa7' 2 +2977 b'\xe0\xa8' 2 +2978 b'\xe0\xa9' 2 +2979 b'\xe0\xaa' 2 +2980 b'\xe0\xab' 2 +2981 b'\xe0\xac' 2 +2982 b'\xe0\xae' 2 +2983 b'\xe0\xaf' 2 +2984 b'\xe0\xb0' 2 +2985 b'\xe0\xb1' 2 +2986 b'\xe0\xb2' 2 +2987 b'\xe0\xb3' 2 +2988 b'\xe0\xb4' 2 +2989 b'\xe0\xb5' 2 +2990 b'\xe0\xb6' 2 +2991 b'\xe0\xb7' 2 +2992 b'\xe0\xb8' 2 +2993 b'\xe0\xb9' 2 +2994 b'\xe0\xba' 2 +2995 b'\xe0\xbc' 2 +2996 b'\xe0\xbd' 2 +2997 b'\xe1\x80' 2 +2998 b'\xe1\x83' 2 +2999 b'\xe1\x9e' 2 +3000 b'\xe1\x9f' 2 +3001 b'\xe1\xb8' 2 +3002 b'\xe1\xb9' 2 +3003 b'\xe1\xba' 2 +3004 b'\xe1\xbb' 2 +3005 b'\xe1\xbd' 2 +3006 b'\xe2\x80' 2 +3007 b'\xe2\x81' 2 +3008 b'\xe2\x82' 2 +3009 b'\xe2\x84' 2 +3010 b'\xe2\x86' 2 +3011 b'\xe2\x88' 2 +3012 b'\xe2\x89' 2 +3013 b'\xe2\x94' 2 +3014 b'\xe2\x95' 2 +3015 b'\xe2\x96' 2 +3016 b'\xe2\x97' 2 +3017 b'\xe2\x98' 2 +3018 b'\xe2\x99' 2 +3019 b'\xe2\x9c' 2 +3020 b'\xe2\x9d' 2 +3021 b'\xe3\x80' 2 +3022 b'\xe3\x81' 2 +3023 b'\xe3\x82' 2 +3024 b'\xe3\x83' 2 +3025 b'\xe4\xb8' 2 +3026 b'\xe4\xb9' 2 +3027 b'\xe4\xba' 2 +3028 b'\xe4\xbb' 2 +3029 b'\xe4\xbc' 2 +3030 b'\xe4\xbd' 2 +3031 b'\xe4\xbe' 2 +3032 b'\xe4\xbf' 2 +3033 b'\xe5\x80' 2 +3034 b'\xe5\x81' 2 +3035 b'\xe5\x82' 2 +3036 b'\xe5\x83' 2 +3037 b'\xe5\x84' 2 +3038 b'\xe5\x85' 2 +3039 b'\xe5\x86' 2 +3040 b'\xe5\x87' 2 +3041 b'\xe5\x88' 2 +3042 b'\xe5\x89' 2 +3043 b'\xe5\x8a' 2 +3044 b'\xe5\x8b' 2 +3045 b'\xe5\x8c' 2 +3046 b'\xe5\x8d' 2 +3047 b'\xe5\x8e' 2 +3048 b'\xe5\x8f' 2 +3049 b'\xe5\x90' 2 +3050 b'\xe5\x91' 2 +3051 b'\xe5\x92' 2 +3052 b'\xe5\x93' 2 +3053 b'\xe5\x94' 2 +3054 b'\xe5\x95' 2 +3055 b'\xe5\x96' 2 +3056 b'\xe5\x99' 2 +3057 b'\xe5\x9b' 2 +3058 b'\xe5\x9c' 2 +3059 b'\xe5\x9d' 2 +3060 b'\xe5\x9e' 2 +3061 b'\xe5\x9f' 2 +3062 b'\xe5\xa0' 2 +3063 b'\xe5\xa1' 2 +3064 b'\xe5\xa2' 2 +3065 b'\xe5\xa3' 2 +3066 b'\xe5\xa4' 2 +3067 b'\xe5\xa5' 2 +3068 b'\xe5\xa6' 2 +3069 b'\xe5\xa7' 2 +3070 b'\xe5\xad' 2 +3071 b'\xe5\xae' 2 +3072 b'\xe5\xaf' 2 +3073 b'\xe5\xb0' 2 +3074 b'\xe5\xb1' 2 +3075 b'\xe5\xb2' 2 +3076 b'\xe5\xb7' 2 +3077 b'\xe5\xb8' 2 +3078 b'\xe5\xb9' 2 +3079 b'\xe5\xba' 2 +3080 b'\xe5\xbb' 2 +3081 b'\xe5\xbc' 2 +3082 b'\xe5\xbd' 2 +3083 b'\xe5\xbe' 2 +3084 b'\xe5\xbf' 2 +3085 b'\xe6\x80' 2 +3086 b'\xe6\x81' 2 +3087 b'\xe6\x82' 2 +3088 b'\xe6\x83' 2 +3089 b'\xe6\x84' 2 +3090 b'\xe6\x85' 2 +3091 b'\xe6\x88' 2 +3092 b'\xe6\x89' 2 +3093 b'\xe6\x8a' 2 +3094 b'\xe6\x8b' 2 +3095 b'\xe6\x8c' 2 +3096 b'\xe6\x8d' 2 +3097 b'\xe6\x8e' 2 +3098 b'\xe6\x8f' 2 +3099 b'\xe6\x91' 2 +3100 b'\xe6\x92' 2 +3101 b'\xe6\x93' 2 +3102 b'\xe6\x94' 2 +3103 b'\xe6\x95' 2 +3104 b'\xe6\x96' 2 +3105 b'\xe6\x97' 2 +3106 b'\xe6\x98' 2 +3107 b'\xe6\x99' 2 +3108 b'\xe6\x9a' 2 +3109 b'\xe6\x9b' 2 +3110 b'\xe6\x9c' 2 +3111 b'\xe6\x9d' 2 +3112 b'\xe6\x9e' 2 +3113 b'\xe6\x9f' 2 +3114 b'\xe6\xa0' 2 +3115 b'\xe6\xa1' 2 +3116 b'\xe6\xa2' 2 +3117 b'\xe6\xa3' 2 +3118 b'\xe6\xa5' 2 +3119 b'\xe6\xa7' 2 +3120 b'\xe6\xa8' 2 +3121 b'\xe6\xa9' 2 +3122 b'\xe6\xac' 2 +3123 b'\xe6\xad' 2 +3124 b'\xe6\xae' 2 +3125 b'\xe6\xaf' 2 +3126 b'\xe6\xb0' 2 +3127 b'\xe6\xb1' 2 +3128 b'\xe6\xb2' 2 +3129 b'\xe6\xb3' 2 +3130 b'\xe6\xb4' 2 +3131 b'\xe6\xb5' 2 +3132 b'\xe6\xb6' 2 +3133 b'\xe6\xb7' 2 +3134 b'\xe6\xb8' 2 +3135 b'\xe6\xb9' 2 +3136 b'\xe6\xba' 2 +3137 b'\xe6\xbb' 2 +3138 b'\xe6\xbc' 2 +3139 b'\xe7\x81' 2 +3140 b'\xe7\x82' 2 +3141 b'\xe7\x84' 2 +3142 b'\xe7\x88' 2 +3143 b'\xe7\x89' 2 +3144 b'\xe7\x8a' 2 +3145 b'\xe7\x8e' 2 +3146 b'\xe7\x8f' 2 +3147 b'\xe7\x90' 2 +3148 b'\xe7\x94' 2 +3149 b'\xe7\x95' 2 +3150 b'\xe7\x97' 2 +3151 b'\xe7\x99' 2 +3152 b'\xe7\x9a' 2 +3153 b'\xe7\x9b' 2 +3154 b'\xe7\x9c' 2 +3155 b'\xe7\x9d' 2 +3156 b'\xe7\x9f' 2 +3157 b'\xe7\xa0' 2 +3158 b'\xe7\xa1' 2 +3159 b'\xe7\xa2' 2 +3160 b'\xe7\xa4' 2 +3161 b'\xe7\xa5' 2 +3162 b'\xe7\xa6' 2 +3163 b'\xe7\xa7' 2 +3164 b'\xe7\xa8' 2 +3165 b'\xe7\xa9' 2 +3166 b'\xe7\xaa' 2 +3167 b'\xe7\xab' 2 +3168 b'\xe7\xac' 2 +3169 b'\xe7\xad' 2 +3170 b'\xe7\xae' 2 +3171 b'\xe7\xaf' 2 +3172 b'\xe7\xb1' 2 +3173 b'\xe7\xb2' 2 +3174 b'\xe7\xb3' 2 +3175 b'\xe7\xb4' 2 +3176 b'\xe7\xb5' 2 +3177 b'\xe7\xb6' 2 +3178 b'\xe7\xb7' 2 +3179 b'\xe7\xba' 2 +3180 b'\xe7\xbb' 2 +3181 b'\xe7\xbc' 2 +3182 b'\xe7\xbd' 2 +3183 b'\xe7\xbe' 2 +3184 b'\xe7\xbf' 2 +3185 b'\xe8\x80' 2 +3186 b'\xe8\x81' 2 +3187 b'\xe8\x82' 2 +3188 b'\xe8\x83' 2 +3189 b'\xe8\x84' 2 +3190 b'\xe8\x87' 2 +3191 b'\xe8\x88' 2 +3192 b'\xe8\x89' 2 +3193 b'\xe8\x8a' 2 +3194 b'\xe8\x8b' 2 +3195 b'\xe8\x8c' 2 +3196 b'\xe8\x8d' 2 +3197 b'\xe8\x8e' 2 +3198 b'\xe8\x90' 2 +3199 b'\xe8\x99' 2 +3200 b'\xe8\xa1' 2 +3201 b'\xe8\xa2' 2 +3202 b'\xe8\xa3' 2 +3203 b'\xe8\xa6' 2 +3204 b'\xe8\xa7' 2 +3205 b'\xe8\xa8' 2 +3206 b'\xe8\xa9' 2 +3207 b'\xe8\xaa' 2 +3208 b'\xe8\xab' 2 +3209 b'\xe8\xad' 2 +3210 b'\xe8\xae' 2 +3211 b'\xe8\xaf' 2 +3212 b'\xe8\xb0' 2 +3213 b'\xe8\xb1' 2 +3214 b'\xe8\xb2' 2 +3215 b'\xe8\xb3' 2 +3216 b'\xe8\xb4' 2 +3217 b'\xe8\xb5' 2 +3218 b'\xe8\xb6' 2 +3219 b'\xe8\xb7' 2 +3220 b'\xe8\xba' 2 +3221 b'\xe8\xbb' 2 +3222 b'\xe8\xbc' 2 +3223 b'\xe8\xbd' 2 +3224 b'\xe8\xbe' 2 +3225 b'\xe8\xbf' 2 +3226 b'\xe9\x80' 2 +3227 b'\xe9\x81' 2 +3228 b'\xe9\x82' 2 +3229 b'\xe9\x83' 2 +3230 b'\xe9\x85' 2 +3231 b'\xe9\x87' 2 +3232 b'\xe9\x8c' 2 +3233 b'\xe9\x92' 2 +3234 b'\xe9\x93' 2 +3235 b'\xe9\x94' 2 +3236 b'\xe9\x95' 2 +3237 b'\xe9\x96' 2 +3238 b'\xe9\x97' 2 +3239 b'\xe9\x98' 2 +3240 b'\xe9\x99' 2 +3241 b'\xe9\x9a' 2 +3242 b'\xe9\x9b' 2 +3243 b'\xe9\x9c' 2 +3244 b'\xe9\x9d' 2 +3245 b'\xe9\x9f' 2 +3246 b'\xe9\xa0' 2 +3247 b'\xe9\xa1' 2 +3248 b'\xe9\xa2' 2 +3249 b'\xe9\xa3' 2 +3250 b'\xe9\xa6' 2 +3251 b'\xe9\xa9' 2 +3252 b'\xe9\xaa' 2 +3253 b'\xe9\xab' 2 +3254 b'\xe9\xbb' 2 +3255 b'\xe9\xbe' 2 +3256 b'\xea\xb0' 2 +3257 b'\xea\xb1' 2 +3258 b'\xea\xb2' 2 +3259 b'\xea\xb3' 2 +3260 b'\xea\xb5' 2 +3261 b'\xea\xb7' 2 +3262 b'\xea\xb8' 2 +3263 b'\xea\xb9' 2 +3264 b'\xeb\x82' 2 +3265 b'\xeb\x84' 2 +3266 b'\xeb\x85' 2 +3267 b'\xeb\x8a' 2 +3268 b'\xeb\x8b' 2 +3269 b'\xeb\x8d' 2 +3270 b'\xeb\x8f' 2 +3271 b'\xeb\x90' 2 +3272 b'\xeb\x93' 2 +3273 b'\xeb\x9e' 2 +3274 b'\xeb\x9f' 2 +3275 b'\xeb\xa0' 2 +3276 b'\xeb\xa1' 2 +3277 b'\xeb\xa3' 2 +3278 b'\xeb\xa5' 2 +3279 b'\xeb\xa6' 2 +3280 b'\xeb\xa7' 2 +3281 b'\xeb\xa9' 2 +3282 b'\xeb\xaa' 2 +3283 b'\xeb\xb0' 2 +3284 b'\xeb\xb2' 2 +3285 b'\xeb\xb3' 2 +3286 b'\xeb\xb6' 2 +3287 b'\xec\x83' 2 +3288 b'\xec\x84' 2 +3289 b'\xec\x85' 2 +3290 b'\xec\x86' 2 +3291 b'\xec\x8a' 2 +3292 b'\xec\x8b' 2 +3293 b'\xec\x95' 2 +3294 b'\xec\x96' 2 +3295 b'\xec\x97' 2 +3296 b'\xec\x98' 2 +3297 b'\xec\x99' 2 +3298 b'\xec\x9a' 2 +3299 b'\xec\x9b' 2 +3300 b'\xec\x9c' 2 +3301 b'\xec\x9d' 2 +3302 b'\xec\x9e' 2 +3303 b'\xec\xa0' 2 +3304 b'\xec\xa7' 2 +3305 b'\xec\xb0' 2 +3306 b'\xec\xb2' 2 +3307 b'\xec\xb9' 2 +3308 b'\xed\x83' 2 +3309 b'\xed\x8a' 2 +3310 b'\xed\x8c' 2 +3311 b'\xed\x95' 2 +3312 b'\xed\x98' 2 +3313 b'\xed\x99' 2 +3314 b'\xef\xb8' 2 +3315 b'\xef\xbc' 2 +3316 b'\xef\xbd' 2 +3317 b'\xef\xbf' 2 +3318 b'\xf0\x9d' 2 +3319 b'\xf0\x9f' 2 +3320 '\t\t\t' 3 +3321 '\t\t\n' 3 +3322 '\t\t ' 3 +3323 '\t ' 3 +3324 '\n\t\t' 3 +3325 '\n\t\n' 3 +3326 '\n\t ' 3 +3327 '\n\n\t' 3 +3328 '\n\n\n' 3 +3329 '\n\n ' 3 +3330 '\n \n' 3 +3331 '\n ' 3 +3332 '\r\n\t' 3 +3333 '\r\n ' 3 +3334 ' \t\t' 3 +3335 ' \n\t' 3 +3336 ' \n\n' 3 +3337 ' \n ' 3 +3338 ' \r\n' 3 +3339 ' \n' 3 +3340 ' ' 3 +3341 ' !!' 3 +3342 ' !(' 3 +3343 ' !=' 3 +3344 ' ""' 3 +3345 ' "#' 3 +3346 ' "$' 3 +3347 ' "%' 3 +3348 ' "&' 3 +3349 ' "\'' 3 +3350 ' "(' 3 +3351 ' ")' 3 +3352 ' "*' 3 +3353 ' "+' 3 +3354 ' ",' 3 +3355 ' "-' 3 +3356 ' ".' 3 +3357 ' "/' 3 +3358 ' ":' 3 +3359 ' ";' 3 +3360 ' "<' 3 +3361 ' ">' 3 +3362 ' "@' 3 +3363 ' "[' 3 +3364 ' "\\' 3 +3365 ' "]' 3 +3366 ' "^' 3 +3367 ' "_' 3 +3368 ' "`' 3 +3369 ' "{' 3 +3370 ' "~' 3 +3371 ' #"' 3 +3372 ' ##' 3 +3373 ' #(' 3 +3374 ' #:' 3 +3375 ' #[' 3 +3376 ' #{' 3 +3377 ' $$' 3 +3378 ' $(' 3 +3379 ' $,' 3 +3380 ' $.' 3 +3381 ' $\\' 3 +3382 ' $_' 3 +3383 ' ${' 3 +3384 ' %%' 3 +3385 ' %.' 3 +3386 ' %>' 3 +3387 ' %{' 3 +3388 ' %}' 3 +3389 ' &#' 3 +3390 ' &$' 3 +3391 ' &&' 3 +3392 ' &=' 3 +3393 ' \'"' 3 +3394 " '#" 3 +3395 " '$" 3 +3396 " '%" 3 +3397 " '&" 3 +3398 " ''" 3 +3399 " ')" 3 +3400 " '*" 3 +3401 " '+" 3 +3402 " '," 3 +3403 " '-" 3 +3404 " '." 3 +3405 " '/" 3 +3406 " ':" 3 +3407 " ';" 3 +3408 " '<" 3 +3409 " '@" 3 +3410 " '[" 3 +3411 " '\\" 3 +3412 " '_" 3 +3413 " '{" 3 +3414 ' (!' 3 +3415 ' ("' 3 +3416 ' (#' 3 +3417 ' ($' 3 +3418 ' (%' 3 +3419 ' (&' 3 +3420 " ('" 3 +3421 ' ((' 3 +3422 ' ()' 3 +3423 ' (*' 3 +3424 ' (+' 3 +3425 ' (-' 3 +3426 ' (.' 3 +3427 ' (/' 3 +3428 ' (:' 3 +3429 ' (;' 3 +3430 ' (<' 3 +3431 ' (=' 3 +3432 ' (>' 3 +3433 ' (?' 3 +3434 ' (@' 3 +3435 ' ([' 3 +3436 ' (\\' 3 +3437 ' (_' 3 +3438 ' (`' 3 +3439 ' ({' 3 +3440 ' ))' 3 +3441 ' ),' 3 +3442 ' ).' 3 +3443 ' ):' 3 +3444 ' );' 3 +3445 ' ){' 3 +3446 ' *(' 3 +3447 ' *)' 3 +3448 ' **' 3 +3449 ' *,' 3 +3450 ' *.' 3 +3451 ' */' 3 +3452 ' *=' 3 +3453 ' *[' 3 +3454 ' +"' 3 +3455 ' ++' 3 +3456 ' +=' 3 +3457 ' +\\' 3 +3458 ' ,"' 3 +3459 ' -(' 3 +3460 ' --' 3 +3461 ' -.' 3 +3462 ' -=' 3 +3463 ' ->' 3 +3464 ' ."' 3 +3465 ' ..' 3 +3466 ' ./' 3 +3467 ' .=' 3 +3468 ' /*' 3 +3469 ' //' 3 +3470 ' /=' 3 +3471 ' />' 3 +3472 ' /\\' 3 +3473 ' 00' 3 +3474 ' 01' 3 +3475 ' 02' 3 +3476 ' 03' 3 +3477 ' 04' 3 +3478 ' 05' 3 +3479 ' 06' 3 +3480 ' 07' 3 +3481 ' 08' 3 +3482 ' 09' 3 +3483 ' 10' 3 +3484 ' 11' 3 +3485 ' 12' 3 +3486 ' 13' 3 +3487 ' 14' 3 +3488 ' 15' 3 +3489 ' 16' 3 +3490 ' 17' 3 +3491 ' 18' 3 +3492 ' 19' 3 +3493 ' 20' 3 +3494 ' 21' 3 +3495 ' 22' 3 +3496 ' 23' 3 +3497 ' 24' 3 +3498 ' 25' 3 +3499 ' 26' 3 +3500 ' 27' 3 +3501 ' 28' 3 +3502 ' 29' 3 +3503 ' 30' 3 +3504 ' 31' 3 +3505 ' 32' 3 +3506 ' 33' 3 +3507 ' 34' 3 +3508 ' 35' 3 +3509 ' 36' 3 +3510 ' 37' 3 +3511 ' 38' 3 +3512 ' 39' 3 +3513 ' 40' 3 +3514 ' 41' 3 +3515 ' 42' 3 +3516 ' 43' 3 +3517 ' 44' 3 +3518 ' 45' 3 +3519 ' 46' 3 +3520 ' 47' 3 +3521 ' 48' 3 +3522 ' 49' 3 +3523 ' 50' 3 +3524 ' 51' 3 +3525 ' 52' 3 +3526 ' 53' 3 +3527 ' 54' 3 +3528 ' 55' 3 +3529 ' 56' 3 +3530 ' 57' 3 +3531 ' 58' 3 +3532 ' 59' 3 +3533 ' 60' 3 +3534 ' 61' 3 +3535 ' 62' 3 +3536 ' 63' 3 +3537 ' 64' 3 +3538 ' 65' 3 +3539 ' 66' 3 +3540 ' 67' 3 +3541 ' 68' 3 +3542 ' 69' 3 +3543 ' 70' 3 +3544 ' 71' 3 +3545 ' 72' 3 +3546 ' 73' 3 +3547 ' 74' 3 +3548 ' 75' 3 +3549 ' 76' 3 +3550 ' 77' 3 +3551 ' 78' 3 +3552 ' 79' 3 +3553 ' 80' 3 +3554 ' 81' 3 +3555 ' 82' 3 +3556 ' 83' 3 +3557 ' 84' 3 +3558 ' 85' 3 +3559 ' 86' 3 +3560 ' 87' 3 +3561 ' 88' 3 +3562 ' 89' 3 +3563 ' 90' 3 +3564 ' 91' 3 +3565 ' 92' 3 +3566 ' 93' 3 +3567 ' 94' 3 +3568 ' 95' 3 +3569 ' 96' 3 +3570 ' 97' 3 +3571 ' 98' 3 +3572 ' 99' 3 +3573 ' :"' 3 +3574 ' :(' 3 +3575 ' :)' 3 +3576 ' :-' 3 +3577 ' ::' 3 +3578 ' :=' 3 +3579 ' ;)' 3 +3580 ' ;;' 3 +3581 ' ' 3 +3588 ' ' 3 +3592 ' =\\' 3 +3593 ' =~' 3 +3594 ' >=' 3 +3595 ' >>' 3 +3596 ' ?,' 3 +3597 ' ?>' 3 +3598 ' ??' 3 +3599 ' @"' 3 +3600 ' @@' 3 +3601 ' AA' 3 +3602 ' AB' 3 +3603 ' AC' 3 +3604 ' AD' 3 +3605 ' AE' 3 +3606 ' AF' 3 +3607 ' AG' 3 +3608 ' AH' 3 +3609 ' AI' 3 +3610 ' AJ' 3 +3611 ' AK' 3 +3612 ' AL' 3 +3613 ' AM' 3 +3614 ' AN' 3 +3615 ' AO' 3 +3616 ' AP' 3 +3617 ' AQ' 3 +3618 ' AR' 3 +3619 ' AS' 3 +3620 ' AT' 3 +3621 ' AU' 3 +3622 ' AV' 3 +3623 ' AW' 3 +3624 ' AX' 3 +3625 ' AZ' 3 +3626 ' Ab' 3 +3627 ' Ac' 3 +3628 ' Ad' 3 +3629 ' Af' 3 +3630 ' Ag' 3 +3631 ' Ah' 3 +3632 ' Ai' 3 +3633 ' Aj' 3 +3634 ' Ak' 3 +3635 ' Al' 3 +3636 ' Am' 3 +3637 ' An' 3 +3638 ' Ao' 3 +3639 ' Ap' 3 +3640 ' Ar' 3 +3641 ' As' 3 +3642 ' At' 3 +3643 ' Au' 3 +3644 ' Av' 3 +3645 ' Aw' 3 +3646 ' Ax' 3 +3647 ' Ay' 3 +3648 ' Az' 3 +3649 ' BA' 3 +3650 ' BB' 3 +3651 ' BC' 3 +3652 ' BD' 3 +3653 ' BE' 3 +3654 ' BF' 3 +3655 ' BG' 3 +3656 ' BH' 3 +3657 ' BI' 3 +3658 ' BJ' 3 +3659 ' BL' 3 +3660 ' BM' 3 +3661 ' BN' 3 +3662 ' BO' 3 +3663 ' BP' 3 +3664 ' BR' 3 +3665 ' BS' 3 +3666 ' BT' 3 +3667 ' BU' 3 +3668 ' BV' 3 +3669 ' BW' 3 +3670 ' BY' 3 +3671 ' Ba' 3 +3672 ' Bd' 3 +3673 ' Be' 3 +3674 ' Bh' 3 +3675 ' Bi' 3 +3676 ' Bj' 3 +3677 ' Bl' 3 +3678 ' Bo' 3 +3679 ' Br' 3 +3680 ' Bu' 3 +3681 ' By' 3 +3682 ' CA' 3 +3683 ' CB' 3 +3684 ' CC' 3 +3685 ' CD' 3 +3686 ' CE' 3 +3687 ' CF' 3 +3688 ' CG' 3 +3689 ' CH' 3 +3690 ' CI' 3 +3691 ' CJ' 3 +3692 ' CK' 3 +3693 ' CL' 3 +3694 ' CM' 3 +3695 ' CN' 3 +3696 ' CO' 3 +3697 ' CP' 3 +3698 ' CR' 3 +3699 ' CS' 3 +3700 ' CT' 3 +3701 ' CU' 3 +3702 ' CV' 3 +3703 ' CW' 3 +3704 ' CX' 3 +3705 ' CY' 3 +3706 ' Ca' 3 +3707 ' Cd' 3 +3708 ' Ce' 3 +3709 ' Cf' 3 +3710 ' Ch' 3 +3711 ' Ci' 3 +3712 ' Cl' 3 +3713 ' Co' 3 +3714 ' Cp' 3 +3715 ' Cr' 3 +3716 ' Cs' 3 +3717 ' Ct' 3 +3718 ' Cu' 3 +3719 ' Cy' 3 +3720 ' Cz' 3 +3721 ' DA' 3 +3722 ' DB' 3 +3723 ' DC' 3 +3724 ' DD' 3 +3725 ' DE' 3 +3726 ' DF' 3 +3727 ' DG' 3 +3728 ' DH' 3 +3729 ' DI' 3 +3730 ' DJ' 3 +3731 ' DK' 3 +3732 ' DL' 3 +3733 ' DM' 3 +3734 ' DN' 3 +3735 ' DO' 3 +3736 ' DP' 3 +3737 ' DR' 3 +3738 ' DS' 3 +3739 ' DT' 3 +3740 ' DU' 3 +3741 ' DV' 3 +3742 ' DW' 3 +3743 ' DX' 3 +3744 ' Da' 3 +3745 ' Db' 3 +3746 ' De' 3 +3747 ' Dh' 3 +3748 ' Di' 3 +3749 ' Dj' 3 +3750 ' Do' 3 +3751 ' Dr' 3 +3752 ' Du' 3 +3753 ' Dw' 3 +3754 ' Dy' 3 +3755 ' EA' 3 +3756 ' EB' 3 +3757 ' EC' 3 +3758 ' ED' 3 +3759 ' EE' 3 +3760 ' EF' 3 +3761 ' EG' 3 +3762 ' EH' 3 +3763 ' EL' 3 +3764 ' EM' 3 +3765 ' EN' 3 +3766 ' EO' 3 +3767 ' EP' 3 +3768 ' EQ' 3 +3769 ' ER' 3 +3770 ' ES' 3 +3771 ' ET' 3 +3772 ' EU' 3 +3773 ' EV' 3 +3774 ' EW' 3 +3775 ' EX' 3 +3776 ' Eb' 3 +3777 ' Ec' 3 +3778 ' Ed' 3 +3779 ' Eg' 3 +3780 ' Eh' 3 +3781 ' Ej' 3 +3782 ' Ek' 3 +3783 ' El' 3 +3784 ' Em' 3 +3785 ' En' 3 +3786 ' Ep' 3 +3787 ' Eq' 3 +3788 ' Er' 3 +3789 ' Es' 3 +3790 ' Et' 3 +3791 ' Eu' 3 +3792 ' Ev' 3 +3793 ' Ex' 3 +3794 ' Ey' 3 +3795 ' Ez' 3 +3796 ' FA' 3 +3797 ' FB' 3 +3798 ' FC' 3 +3799 ' FD' 3 +3800 ' FE' 3 +3801 ' FF' 3 +3802 ' FG' 3 +3803 ' FH' 3 +3804 ' FI' 3 +3805 ' FK' 3 +3806 ' FL' 3 +3807 ' FM' 3 +3808 ' FN' 3 +3809 ' FO' 3 +3810 ' FP' 3 +3811 ' FR' 3 +3812 ' FS' 3 +3813 ' FT' 3 +3814 ' FW' 3 +3815 ' FX' 3 +3816 ' FY' 3 +3817 ' Fa' 3 +3818 ' Fe' 3 +3819 ' Fi' 3 +3820 ' Fl' 3 +3821 ' Fo' 3 +3822 ' Fr' 3 +3823 ' Ft' 3 +3824 ' Fu' 3 +3825 ' GA' 3 +3826 ' GB' 3 +3827 ' GC' 3 +3828 ' GD' 3 +3829 ' GE' 3 +3830 ' GF' 3 +3831 ' GG' 3 +3832 ' GH' 3 +3833 ' GI' 3 +3834 ' GL' 3 +3835 ' GM' 3 +3836 ' GN' 3 +3837 ' GO' 3 +3838 ' GP' 3 +3839 ' GR' 3 +3840 ' GS' 3 +3841 ' GT' 3 +3842 ' GU' 3 +3843 ' GV' 3 +3844 ' GW' 3 +3845 ' Ga' 3 +3846 ' Ge' 3 +3847 ' Gh' 3 +3848 ' Gi' 3 +3849 ' Gl' 3 +3850 ' Gn' 3 +3851 ' Go' 3 +3852 ' Gr' 3 +3853 ' Gu' 3 +3854 ' Gy' 3 +3855 ' HA' 3 +3856 ' HB' 3 +3857 ' HC' 3 +3858 ' HD' 3 +3859 ' HE' 3 +3860 ' HF' 3 +3861 ' HG' 3 +3862 ' HH' 3 +3863 ' HI' 3 +3864 ' HK' 3 +3865 ' HL' 3 +3866 ' HM' 3 +3867 ' HO' 3 +3868 ' HP' 3 +3869 ' HQ' 3 +3870 ' HR' 3 +3871 ' HS' 3 +3872 ' HT' 3 +3873 ' HU' 3 +3874 ' HV' 3 +3875 ' HW' 3 +3876 ' HY' 3 +3877 ' Ha' 3 +3878 ' He' 3 +3879 ' Hg' 3 +3880 ' Hi' 3 +3881 ' Ho' 3 +3882 ' Hu' 3 +3883 ' Hy' 3 +3884 ' Hz' 3 +3885 ' IA' 3 +3886 ' IB' 3 +3887 ' IC' 3 +3888 ' ID' 3 +3889 ' IE' 3 +3890 ' IF' 3 +3891 ' IG' 3 +3892 ' II' 3 +3893 ' IK' 3 +3894 ' IL' 3 +3895 ' IM' 3 +3896 ' IN' 3 +3897 ' IO' 3 +3898 ' IP' 3 +3899 ' IQ' 3 +3900 ' IR' 3 +3901 ' IS' 3 +3902 ' IT' 3 +3903 ' IU' 3 +3904 ' IV' 3 +3905 ' IX' 3 +3906 ' Ib' 3 +3907 ' Id' 3 +3908 ' If' 3 +3909 ' Ig' 3 +3910 ' Ik' 3 +3911 ' Il' 3 +3912 ' Im' 3 +3913 ' In' 3 +3914 ' Io' 3 +3915 ' Ip' 3 +3916 ' Ir' 3 +3917 ' Is' 3 +3918 ' It' 3 +3919 ' Iv' 3 +3920 ' Iz' 3 +3921 ' JA' 3 +3922 ' JC' 3 +3923 ' JD' 3 +3924 ' JE' 3 +3925 ' JJ' 3 +3926 ' JM' 3 +3927 ' JO' 3 +3928 ' JP' 3 +3929 ' JR' 3 +3930 ' JS' 3 +3931 ' JU' 3 +3932 ' Ja' 3 +3933 ' Je' 3 +3934 ' Ji' 3 +3935 ' Jo' 3 +3936 ' Jr' 3 +3937 ' Ju' 3 +3938 ' KA' 3 +3939 ' KB' 3 +3940 ' KC' 3 +3941 ' KD' 3 +3942 ' KE' 3 +3943 ' KG' 3 +3944 ' KH' 3 +3945 ' KK' 3 +3946 ' KL' 3 +3947 ' KM' 3 +3948 ' KN' 3 +3949 ' KO' 3 +3950 ' KP' 3 +3951 ' KR' 3 +3952 ' KS' 3 +3953 ' KT' 3 +3954 ' KY' 3 +3955 ' Ka' 3 +3956 ' Ke' 3 +3957 ' Kh' 3 +3958 ' Ki' 3 +3959 ' Kl' 3 +3960 ' Kn' 3 +3961 ' Ko' 3 +3962 ' Kr' 3 +3963 ' Ku' 3 +3964 ' Kw' 3 +3965 ' Ky' 3 +3966 ' LA' 3 +3967 ' LB' 3 +3968 ' LC' 3 +3969 ' LD' 3 +3970 ' LE' 3 +3971 ' LF' 3 +3972 ' LG' 3 +3973 ' LH' 3 +3974 ' LI' 3 +3975 ' LL' 3 +3976 ' LM' 3 +3977 ' LN' 3 +3978 ' LO' 3 +3979 ' LP' 3 +3980 ' LR' 3 +3981 ' LS' 3 +3982 ' LT' 3 +3983 ' LU' 3 +3984 ' LV' 3 +3985 ' LW' 3 +3986 ' LX' 3 +3987 ' La' 3 +3988 ' Le' 3 +3989 ' Li' 3 +3990 ' Ll' 3 +3991 ' Lo' 3 +3992 ' Lt' 3 +3993 ' Lu' 3 +3994 ' Lv' 3 +3995 ' Ly' 3 +3996 ' MA' 3 +3997 ' MB' 3 +3998 ' MC' 3 +3999 ' MD' 3 +4000 ' ME' 3 +4001 ' MF' 3 +4002 ' MG' 3 +4003 ' MH' 3 +4004 ' MI' 3 +4005 ' MJ' 3 +4006 ' MK' 3 +4007 ' ML' 3 +4008 ' MM' 3 +4009 ' MN' 3 +4010 ' MO' 3 +4011 ' MP' 3 +4012 ' MQ' 3 +4013 ' MR' 3 +4014 ' MS' 3 +4015 ' MT' 3 +4016 ' MU' 3 +4017 ' MV' 3 +4018 ' MW' 3 +4019 ' MX' 3 +4020 ' MY' 3 +4021 ' Ma' 3 +4022 ' Mb' 3 +4023 ' Mc' 3 +4024 ' Md' 3 +4025 ' Me' 3 +4026 ' Mg' 3 +4027 ' Mi' 3 +4028 ' Mk' 3 +4029 ' Mn' 3 +4030 ' Mo' 3 +4031 ' Mr' 3 +4032 ' Ms' 3 +4033 ' Mt' 3 +4034 ' Mu' 3 +4035 ' My' 3 +4036 ' NA' 3 +4037 ' NB' 3 +4038 ' NC' 3 +4039 ' ND' 3 +4040 ' NE' 3 +4041 ' NF' 3 +4042 ' NG' 3 +4043 ' NH' 3 +4044 ' NI' 3 +4045 ' NJ' 3 +4046 ' NK' 3 +4047 ' NL' 3 +4048 ' NM' 3 +4049 ' NN' 3 +4050 ' NO' 3 +4051 ' NP' 3 +4052 ' NR' 3 +4053 ' NS' 3 +4054 ' NT' 3 +4055 ' NU' 3 +4056 ' NV' 3 +4057 ' NW' 3 +4058 ' NY' 3 +4059 ' NZ' 3 +4060 ' Na' 3 +4061 ' Nb' 3 +4062 ' Nd' 3 +4063 ' Ne' 3 +4064 ' Ng' 3 +4065 ' Ni' 3 +4066 ' No' 3 +4067 ' Nr' 3 +4068 ' Nu' 3 +4069 ' Ny' 3 +4070 ' OA' 3 +4071 ' OB' 3 +4072 ' OC' 3 +4073 ' OD' 3 +4074 ' OF' 3 +4075 ' OH' 3 +4076 ' OK' 3 +4077 ' OL' 3 +4078 ' OM' 3 +4079 ' ON' 3 +4080 ' OP' 3 +4081 ' OR' 3 +4082 ' OS' 3 +4083 ' OT' 3 +4084 ' OU' 3 +4085 ' OV' 3 +4086 ' Ob' 3 +4087 ' Oc' 3 +4088 ' Od' 3 +4089 ' Of' 3 +4090 ' Og' 3 +4091 ' Oh' 3 +4092 ' Ok' 3 +4093 ' Ol' 3 +4094 ' Om' 3 +4095 ' On' 3 +4096 ' Op' 3 +4097 ' Or' 3 +4098 ' Os' 3 +4099 ' Ot' 3 +4100 ' Ou' 3 +4101 ' Ow' 3 +4102 ' Ox' 3 +4103 ' Oz' 3 +4104 ' PA' 3 +4105 ' PB' 3 +4106 ' PC' 3 +4107 ' PD' 3 +4108 ' PE' 3 +4109 ' PF' 3 +4110 ' PG' 3 +4111 ' PH' 3 +4112 ' PI' 3 +4113 ' PJ' 3 +4114 ' PK' 3 +4115 ' PL' 3 +4116 ' PM' 3 +4117 ' PN' 3 +4118 ' PO' 3 +4119 ' PP' 3 +4120 ' PR' 3 +4121 ' PS' 3 +4122 ' PT' 3 +4123 ' PU' 3 +4124 ' PV' 3 +4125 ' PW' 3 +4126 ' PY' 3 +4127 ' Pa' 3 +4128 ' Pb' 3 +4129 ' Pe' 3 +4130 ' Pf' 3 +4131 ' Ph' 3 +4132 ' Pi' 3 +4133 ' Pl' 3 +4134 ' Po' 3 +4135 ' Pr' 3 +4136 ' Ps' 3 +4137 ' Pt' 3 +4138 ' Pu' 3 +4139 ' Py' 3 +4140 ' QA' 3 +4141 ' QB' 3 +4142 ' QC' 3 +4143 ' QQ' 3 +4144 ' QR' 3 +4145 ' QS' 3 +4146 ' QT' 3 +4147 ' QU' 3 +4148 ' Qi' 3 +4149 ' Qt' 3 +4150 ' Qu' 3 +4151 ' RA' 3 +4152 ' RB' 3 +4153 ' RC' 3 +4154 ' RD' 3 +4155 ' RE' 3 +4156 ' RF' 3 +4157 ' RG' 3 +4158 ' RH' 3 +4159 ' RI' 3 +4160 ' RJ' 3 +4161 ' RL' 3 +4162 ' RM' 3 +4163 ' RN' 3 +4164 ' RO' 3 +4165 ' RP' 3 +4166 ' RR' 3 +4167 ' RS' 3 +4168 ' RT' 3 +4169 ' RU' 3 +4170 ' RV' 3 +4171 ' RW' 3 +4172 ' RX' 3 +4173 ' Ra' 3 +4174 ' Rd' 3 +4175 ' Re' 3 +4176 ' Rh' 3 +4177 ' Ri' 3 +4178 ' Ro' 3 +4179 ' Rs' 3 +4180 ' Ru' 3 +4181 ' Rx' 3 +4182 ' Ry' 3 +4183 ' SA' 3 +4184 ' SB' 3 +4185 ' SC' 3 +4186 ' SD' 3 +4187 ' SE' 3 +4188 ' SF' 3 +4189 ' SG' 3 +4190 ' SH' 3 +4191 ' SI' 3 +4192 ' SJ' 3 +4193 ' SK' 3 +4194 ' SL' 3 +4195 ' SM' 3 +4196 ' SN' 3 +4197 ' SO' 3 +4198 ' SP' 3 +4199 ' SQ' 3 +4200 ' SR' 3 +4201 ' SS' 3 +4202 ' ST' 3 +4203 ' SU' 3 +4204 ' SV' 3 +4205 ' SW' 3 +4206 ' SY' 3 +4207 ' SZ' 3 +4208 ' Sa' 3 +4209 ' Sc' 3 +4210 ' Se' 3 +4211 ' Sh' 3 +4212 ' Si' 3 +4213 ' Sk' 3 +4214 ' Sl' 3 +4215 ' Sm' 3 +4216 ' Sn' 3 +4217 ' So' 3 +4218 ' Sp' 3 +4219 ' Sr' 3 +4220 ' St' 3 +4221 ' Su' 3 +4222 ' Sv' 3 +4223 ' Sw' 3 +4224 ' Sy' 3 +4225 ' Sz' 3 +4226 ' TA' 3 +4227 ' TB' 3 +4228 ' TC' 3 +4229 ' TD' 3 +4230 ' TE' 3 +4231 ' TF' 3 +4232 ' TG' 3 +4233 ' TH' 3 +4234 ' TI' 3 +4235 ' TK' 3 +4236 ' TL' 3 +4237 ' TM' 3 +4238 ' TN' 3 +4239 ' TO' 3 +4240 ' TP' 3 +4241 ' TR' 3 +4242 ' TS' 3 +4243 ' TT' 3 +4244 ' TU' 3 +4245 ' TV' 3 +4246 ' TW' 3 +4247 ' TX' 3 +4248 ' TY' 3 +4249 ' Ta' 3 +4250 ' Tb' 3 +4251 ' Te' 3 +4252 ' Th' 3 +4253 ' Ti' 3 +4254 ' Tk' 3 +4255 ' To' 3 +4256 ' Tr' 3 +4257 ' Ts' 3 +4258 ' Tu' 3 +4259 ' Tw' 3 +4260 ' Tx' 3 +4261 ' Ty' 3 +4262 ' UA' 3 +4263 ' UC' 3 +4264 ' UD' 3 +4265 ' UE' 3 +4266 ' UI' 3 +4267 ' UK' 3 +4268 ' UL' 3 +4269 ' UM' 3 +4270 ' UN' 3 +4271 ' UP' 3 +4272 ' UR' 3 +4273 ' US' 3 +4274 ' UT' 3 +4275 ' UV' 3 +4276 ' UW' 3 +4277 ' UX' 3 +4278 ' Ub' 3 +4279 ' Ud' 3 +4280 ' Ug' 3 +4281 ' Uh' 3 +4282 ' Ui' 3 +4283 ' Uk' 3 +4284 ' Ul' 3 +4285 ' Um' 3 +4286 ' Un' 3 +4287 ' Up' 3 +4288 ' Ur' 3 +4289 ' Us' 3 +4290 ' Ut' 3 +4291 ' VA' 3 +4292 ' VB' 3 +4293 ' VC' 3 +4294 ' VE' 3 +4295 ' VG' 3 +4296 ' VI' 3 +4297 ' VK' 3 +4298 ' VL' 3 +4299 ' VM' 3 +4300 ' VO' 3 +4301 ' VP' 3 +4302 ' VR' 3 +4303 ' VS' 3 +4304 ' VT' 3 +4305 ' VW' 3 +4306 ' Va' 3 +4307 ' Ve' 3 +4308 ' Vi' 3 +4309 ' Vo' 3 +4310 ' Vs' 3 +4311 ' Vu' 3 +4312 ' Vy' 3 +4313 ' WA' 3 +4314 ' WB' 3 +4315 ' WC' 3 +4316 ' WD' 3 +4317 ' WE' 3 +4318 ' WF' 3 +4319 ' WG' 3 +4320 ' WH' 3 +4321 ' WI' 3 +4322 ' WL' 3 +4323 ' WM' 3 +4324 ' WO' 3 +4325 ' WP' 3 +4326 ' WR' 3 +4327 ' WS' 3 +4328 ' WT' 3 +4329 ' WW' 3 +4330 ' Wa' 3 +4331 ' We' 3 +4332 ' Wh' 3 +4333 ' Wi' 3 +4334 ' Wo' 3 +4335 ' Wr' 3 +4336 ' Wu' 3 +4337 ' Wy' 3 +4338 ' XI' 3 +4339 ' XL' 3 +4340 ' XP' 3 +4341 ' XS' 3 +4342 ' XV' 3 +4343 ' XX' 3 +4344 ' XY' 3 +4345 ' Xi' 3 +4346 ' Xu' 3 +4347 ' YA' 3 +4348 ' YE' 3 +4349 ' Ya' 3 +4350 ' Ye' 3 +4351 ' Yi' 3 +4352 ' Yo' 3 +4353 ' Yu' 3 +4354 ' ZZ' 3 +4355 ' Za' 3 +4356 ' Ze' 3 +4357 ' Zh' 3 +4358 ' Zi' 3 +4359 ' Zn' 3 +4360 ' Zo' 3 +4361 ' Zu' 3 +4362 ' Zw' 3 +4363 ' ["' 3 +4364 ' [$' 3 +4365 " ['" 3 +4366 ' [(' 3 +4367 ' [*' 3 +4368 ' [-' 3 +4369 ' [:' 3 +4370 ' [<' 3 +4371 ' [[' 3 +4372 ' [\\' 3 +4373 ' []' 3 +4374 ' [_' 3 +4375 ' [`' 3 +4376 ' [{' 3 +4377 ' \\"' 3 +4378 ' \\$' 3 +4379 " \\'" 3 +4380 ' \\(' 3 +4381 ' \\;' 3 +4382 ' \\<' 3 +4383 ' \\\\' 3 +4384 ' \\{' 3 +4385 ' \\|' 3 +4386 ' ],' 3 +4387 ' ];' 3 +4388 ' ]]' 3 +4389 ' ^{' 3 +4390 ' _(' 3 +4391 ' _)' 3 +4392 ' _,' 3 +4393 ' _.' 3 +4394 ' __' 3 +4395 ' _{' 3 +4396 ' `%' 3 +4397 " `'" 3 +4398 ' `(' 3 +4399 ' `-' 3 +4400 ' `.' 3 +4401 ' `[' 3 +4402 ' `\\' 3 +4403 ' `_' 3 +4404 ' ``' 3 +4405 ' `{' 3 +4406 ' aa' 3 +4407 ' ab' 3 +4408 ' ac' 3 +4409 ' ad' 3 +4410 ' ae' 3 +4411 ' af' 3 +4412 ' ag' 3 +4413 ' ah' 3 +4414 ' ai' 3 +4415 ' aj' 3 +4416 ' ak' 3 +4417 ' al' 3 +4418 ' am' 3 +4419 ' an' 3 +4420 ' ao' 3 +4421 ' ap' 3 +4422 ' ar' 3 +4423 ' as' 3 +4424 ' at' 3 +4425 ' au' 3 +4426 ' av' 3 +4427 ' aw' 3 +4428 ' ax' 3 +4429 ' ay' 3 +4430 ' az' 3 +4431 ' ba' 3 +4432 ' bb' 3 +4433 ' bc' 3 +4434 ' bd' 3 +4435 ' be' 3 +4436 ' bf' 3 +4437 ' bg' 3 +4438 ' bh' 3 +4439 ' bi' 3 +4440 ' bl' 3 +4441 ' bm' 3 +4442 ' bn' 3 +4443 ' bo' 3 +4444 ' bp' 3 +4445 ' br' 3 +4446 ' bs' 3 +4447 ' bt' 3 +4448 ' bu' 3 +4449 ' bw' 3 +4450 ' by' 3 +4451 ' bz' 3 +4452 ' ca' 3 +4453 ' cb' 3 +4454 ' cc' 3 +4455 ' cd' 3 +4456 ' ce' 3 +4457 ' cf' 3 +4458 ' cg' 3 +4459 ' ch' 3 +4460 ' ci' 3 +4461 ' ck' 3 +4462 ' cl' 3 +4463 ' cm' 3 +4464 ' cn' 3 +4465 ' co' 3 +4466 ' cp' 3 +4467 ' cr' 3 +4468 ' cs' 3 +4469 ' ct' 3 +4470 ' cu' 3 +4471 ' cv' 3 +4472 ' cw' 3 +4473 ' cx' 3 +4474 ' cy' 3 +4475 ' cz' 3 +4476 ' dB' 3 +4477 ' da' 3 +4478 ' db' 3 +4479 ' dc' 3 +4480 ' dd' 3 +4481 ' de' 3 +4482 ' df' 3 +4483 ' dg' 3 +4484 ' dh' 3 +4485 ' di' 3 +4486 ' dj' 3 +4487 ' dk' 3 +4488 ' dl' 3 +4489 ' dm' 3 +4490 ' dn' 3 +4491 ' do' 3 +4492 ' dp' 3 +4493 ' dq' 3 +4494 ' dr' 3 +4495 ' ds' 3 +4496 ' dt' 3 +4497 ' du' 3 +4498 ' dv' 3 +4499 ' dw' 3 +4500 ' dx' 3 +4501 ' dy' 3 +4502 ' dz' 3 +4503 ' eV' 3 +4504 ' ea' 3 +4505 ' eb' 3 +4506 ' ec' 3 +4507 ' ed' 3 +4508 ' ee' 3 +4509 ' ef' 3 +4510 ' eg' 3 +4511 ' eh' 3 +4512 ' ei' 3 +4513 ' ej' 3 +4514 ' ek' 3 +4515 ' el' 3 +4516 ' em' 3 +4517 ' en' 3 +4518 ' ep' 3 +4519 ' eq' 3 +4520 ' er' 3 +4521 ' es' 3 +4522 ' et' 3 +4523 ' eu' 3 +4524 ' ev' 3 +4525 ' ew' 3 +4526 ' ex' 3 +4527 ' ey' 3 +4528 ' ez' 3 +4529 ' fa' 3 +4530 ' fb' 3 +4531 ' fc' 3 +4532 ' fd' 3 +4533 ' fe' 3 +4534 ' ff' 3 +4535 ' fi' 3 +4536 ' fj' 3 +4537 ' fl' 3 +4538 ' fm' 3 +4539 ' fn' 3 +4540 ' fo' 3 +4541 ' fp' 3 +4542 ' fr' 3 +4543 ' fs' 3 +4544 ' ft' 3 +4545 ' fu' 3 +4546 ' fx' 3 +4547 ' fy' 3 +4548 ' ga' 3 +4549 ' gb' 3 +4550 ' gc' 3 +4551 ' ge' 3 +4552 ' gg' 3 +4553 ' gh' 3 +4554 ' gi' 3 +4555 ' gj' 3 +4556 ' gl' 3 +4557 ' gm' 3 +4558 ' gn' 3 +4559 ' go' 3 +4560 ' gp' 3 +4561 ' gr' 3 +4562 ' gs' 3 +4563 ' gt' 3 +4564 ' gu' 3 +4565 ' gw' 3 +4566 ' gy' 3 +4567 ' ha' 3 +4568 ' hd' 3 +4569 ' he' 3 +4570 ' hf' 3 +4571 ' hi' 3 +4572 ' hl' 3 +4573 ' ho' 3 +4574 ' hp' 3 +4575 ' hr' 3 +4576 ' hs' 3 +4577 ' ht' 3 +4578 ' hu' 3 +4579 ' hv' 3 +4580 ' hw' 3 +4581 ' hy' 3 +4582 ' iT' 3 +4583 ' ia' 3 +4584 ' ib' 3 +4585 ' ic' 3 +4586 ' id' 3 +4587 ' ie' 3 +4588 ' if' 3 +4589 ' ig' 3 +4590 ' ih' 3 +4591 ' ii' 3 +4592 ' ij' 3 +4593 ' ik' 3 +4594 ' il' 3 +4595 ' im' 3 +4596 ' in' 3 +4597 ' io' 3 +4598 ' ip' 3 +4599 ' ir' 3 +4600 ' is' 3 +4601 ' it' 3 +4602 ' iv' 3 +4603 ' ix' 3 +4604 ' iy' 3 +4605 ' iz' 3 +4606 ' ja' 3 +4607 ' je' 3 +4608 ' ji' 3 +4609 ' jj' 3 +4610 ' jo' 3 +4611 ' js' 3 +4612 ' ju' 3 +4613 ' kB' 3 +4614 ' kW' 3 +4615 ' ka' 3 +4616 ' kb' 3 +4617 ' ke' 3 +4618 ' kg' 3 +4619 ' kh' 3 +4620 ' ki' 3 +4621 ' kj' 3 +4622 ' kk' 3 +4623 ' kl' 3 +4624 ' km' 3 +4625 ' kn' 3 +4626 ' ko' 3 +4627 ' kp' 3 +4628 ' kr' 3 +4629 ' ks' 3 +4630 ' kt' 3 +4631 ' ku' 3 +4632 ' kv' 3 +4633 ' kw' 3 +4634 ' ky' 3 +4635 ' la' 3 +4636 ' lb' 3 +4637 ' lc' 3 +4638 ' ld' 3 +4639 ' le' 3 +4640 ' lg' 3 +4641 ' li' 3 +4642 ' ll' 3 +4643 ' lm' 3 +4644 ' ln' 3 +4645 ' lo' 3 +4646 ' lp' 3 +4647 ' lr' 3 +4648 ' ls' 3 +4649 ' lt' 3 +4650 ' lu' 3 +4651 ' lv' 3 +4652 ' lw' 3 +4653 ' ly' 3 +4654 ' mL' 3 +4655 ' mM' 3 +4656 ' ma' 3 +4657 ' mb' 3 +4658 ' mc' 3 +4659 ' md' 3 +4660 ' me' 3 +4661 ' mf' 3 +4662 ' mg' 3 +4663 ' mi' 3 +4664 ' mk' 3 +4665 ' ml' 3 +4666 ' mm' 3 +4667 ' mn' 3 +4668 ' mo' 3 +4669 ' mp' 3 +4670 ' mr' 3 +4671 ' ms' 3 +4672 ' mt' 3 +4673 ' mu' 3 +4674 ' mv' 3 +4675 ' mw' 3 +4676 ' mx' 3 +4677 ' my' 3 +4678 ' na' 3 +4679 ' nb' 3 +4680 ' nc' 3 +4681 ' nd' 3 +4682 ' ne' 3 +4683 ' nf' 3 +4684 ' ng' 3 +4685 ' nh' 3 +4686 ' ni' 3 +4687 ' nj' 3 +4688 ' nk' 3 +4689 ' nl' 3 +4690 ' nm' 3 +4691 ' nn' 3 +4692 ' no' 3 +4693 ' np' 3 +4694 ' nr' 3 +4695 ' ns' 3 +4696 ' nt' 3 +4697 ' nu' 3 +4698 ' nv' 3 +4699 ' nw' 3 +4700 ' nx' 3 +4701 ' ny' 3 +4702 ' nz' 3 +4703 ' ob' 3 +4704 ' oc' 3 +4705 ' od' 3 +4706 ' of' 3 +4707 ' og' 3 +4708 ' oh' 3 +4709 ' ok' 3 +4710 ' ol' 3 +4711 ' om' 3 +4712 ' on' 3 +4713 ' oo' 3 +4714 ' op' 3 +4715 ' or' 3 +4716 ' os' 3 +4717 ' ot' 3 +4718 ' ou' 3 +4719 ' ov' 3 +4720 ' ow' 3 +4721 ' ox' 3 +4722 ' oy' 3 +4723 ' oz' 3 +4724 ' pH' 3 +4725 ' pa' 3 +4726 ' pb' 3 +4727 ' pc' 3 +4728 ' pd' 3 +4729 ' pe' 3 +4730 ' pf' 3 +4731 ' pg' 3 +4732 ' ph' 3 +4733 ' pi' 3 +4734 ' pk' 3 +4735 ' pl' 3 +4736 ' pm' 3 +4737 ' pn' 3 +4738 ' po' 3 +4739 ' pp' 3 +4740 ' pq' 3 +4741 ' pr' 3 +4742 ' ps' 3 +4743 ' pt' 3 +4744 ' pu' 3 +4745 ' pv' 3 +4746 ' pw' 3 +4747 ' px' 3 +4748 ' py' 3 +4749 ' qi' 3 +4750 ' qq' 3 +4751 ' qt' 3 +4752 ' qu' 3 +4753 ' ra' 3 +4754 ' rb' 3 +4755 ' rc' 3 +4756 ' rd' 3 +4757 ' re' 3 +4758 ' rf' 3 +4759 ' rg' 3 +4760 ' rh' 3 +4761 ' ri' 3 +4762 ' rm' 3 +4763 ' rn' 3 +4764 ' ro' 3 +4765 ' rp' 3 +4766 ' rr' 3 +4767 ' rs' 3 +4768 ' rt' 3 +4769 ' ru' 3 +4770 ' rv' 3 +4771 ' rw' 3 +4772 ' rx' 3 +4773 ' ry' 3 +4774 ' sa' 3 +4775 ' sb' 3 +4776 ' sc' 3 +4777 ' sd' 3 +4778 ' se' 3 +4779 ' sf' 3 +4780 ' sg' 3 +4781 ' sh' 3 +4782 ' si' 3 +4783 ' sj' 3 +4784 ' sk' 3 +4785 ' sl' 3 +4786 ' sm' 3 +4787 ' sn' 3 +4788 ' so' 3 +4789 ' sp' 3 +4790 ' sq' 3 +4791 ' sr' 3 +4792 ' ss' 3 +4793 ' st' 3 +4794 ' su' 3 +4795 ' sv' 3 +4796 ' sw' 3 +4797 ' sy' 3 +4798 ' sz' 3 +4799 ' ta' 3 +4800 ' tb' 3 +4801 ' tc' 3 +4802 ' td' 3 +4803 ' te' 3 +4804 ' tf' 3 +4805 ' th' 3 +4806 ' ti' 3 +4807 ' tk' 3 +4808 ' tl' 3 +4809 ' tm' 3 +4810 ' tn' 3 +4811 ' to' 3 +4812 ' tp' 3 +4813 ' tr' 3 +4814 ' ts' 3 +4815 ' tt' 3 +4816 ' tu' 3 +4817 ' tv' 3 +4818 ' tw' 3 +4819 ' tx' 3 +4820 ' ty' 3 +4821 ' tz' 3 +4822 ' ua' 3 +4823 ' ub' 3 +4824 ' uc' 3 +4825 ' ud' 3 +4826 ' ug' 3 +4827 ' uh' 3 +4828 ' ui' 3 +4829 ' uk' 3 +4830 ' ul' 3 +4831 ' um' 3 +4832 ' un' 3 +4833 ' up' 3 +4834 ' ur' 3 +4835 ' us' 3 +4836 ' ut' 3 +4837 ' uv' 3 +4838 ' uw' 3 +4839 ' uz' 3 +4840 ' va' 3 +4841 ' vb' 3 +4842 ' vc' 3 +4843 ' ve' 3 +4844 ' vi' 3 +4845 ' vl' 3 +4846 ' vm' 3 +4847 ' vn' 3 +4848 ' vo' 3 +4849 ' vp' 3 +4850 ' vr' 3 +4851 ' vs' 3 +4852 ' vt' 3 +4853 ' vu' 3 +4854 ' vy' 3 +4855 ' wa' 3 +4856 ' wb' 3 +4857 ' wc' 3 +4858 ' we' 3 +4859 ' wf' 3 +4860 ' wh' 3 +4861 ' wi' 3 +4862 ' wk' 3 +4863 ' wo' 3 +4864 ' wp' 3 +4865 ' wr' 3 +4866 ' ws' 3 +4867 ' wt' 3 +4868 ' ww' 3 +4869 ' wx' 3 +4870 ' wy' 3 +4871 ' xe' 3 +4872 ' xi' 3 +4873 ' xl' 3 +4874 ' xs' 3 +4875 ' xt' 3 +4876 ' xx' 3 +4877 ' xy' 3 +4878 ' ya' 3 +4879 ' ye' 3 +4880 ' yi' 3 +4881 ' yo' 3 +4882 ' yr' 3 +4883 ' ys' 3 +4884 ' yy' 3 +4885 ' za' 3 +4886 ' ze' 3 +4887 ' zh' 3 +4888 ' zi' 3 +4889 ' zo' 3 +4890 ' zu' 3 +4891 ' zw' 3 +4892 ' zz' 3 +4893 ' {"' 3 +4894 ' {$' 3 +4895 ' {%' 3 +4896 " {'" 3 +4897 ' {(' 3 +4898 ' {-' 3 +4899 ' {:' 3 +4900 ' {@' 3 +4901 ' {\\' 3 +4902 ' {{' 3 +4903 ' {}' 3 +4904 ' |=' 3 +4905 ' |\\' 3 +4906 ' ||' 3 +4907 ' })' 3 +4908 ' },' 3 +4909 ' };' 3 +4910 ' }\\' 3 +4911 ' }]' 3 +4912 ' }{' 3 +4913 ' }}' 3 +4914 ' ~/' 3 +4915 ' \xa0' 3 +4916 ' ¡' 3 +4917 ' ¢' 3 +4918 ' £' 3 +4919 ' ¤' 3 +4920 ' ¥' 3 +4921 ' ¦' 3 +4922 ' §' 3 +4923 ' ©' 3 +4924 ' «' 3 +4925 ' ¬' 3 +4926 ' \xad' 3 +4927 ' ®' 3 +4928 ' °' 3 +4929 ' ±' 3 +4930 ' µ' 3 +4931 ' ¶' 3 +4932 ' ·' 3 +4933 ' »' 3 +4934 ' ¼' 3 +4935 ' ½' 3 +4936 ' ¿' 3 +4937 ' À' 3 +4938 ' Á' 3 +4939 ' Â' 3 +4940 ' Ã' 3 +4941 ' Ä' 3 +4942 ' Å' 3 +4943 ' Ç' 3 +4944 ' È' 3 +4945 ' É' 3 +4946 ' Ê' 3 +4947 ' Í' 3 +4948 ' Î' 3 +4949 ' Ð' 3 +4950 ' Ñ' 3 +4951 ' Ò' 3 +4952 ' Ó' 3 +4953 ' Ô' 3 +4954 ' Ö' 3 +4955 ' ×' 3 +4956 ' Ø' 3 +4957 ' Ú' 3 +4958 ' Ü' 3 +4959 ' Þ' 3 +4960 ' ß' 3 +4961 ' à' 3 +4962 ' á' 3 +4963 ' â' 3 +4964 ' ã' 3 +4965 ' ä' 3 +4966 ' å' 3 +4967 ' æ' 3 +4968 ' ç' 3 +4969 ' è' 3 +4970 ' é' 3 +4971 ' ê' 3 +4972 ' ë' 3 +4973 ' ì' 3 +4974 ' í' 3 +4975 ' î' 3 +4976 ' ð' 3 +4977 ' ñ' 3 +4978 ' ó' 3 +4979 ' ô' 3 +4980 ' ö' 3 +4981 ' ÷' 3 +4982 ' ø' 3 +4983 ' ú' 3 +4984 ' ü' 3 +4985 ' þ' 3 +4986 ' Ā' 3 +4987 ' ā' 3 +4988 ' ĉ' 3 +4989 ' Č' 3 +4990 ' č' 3 +4991 ' Đ' 3 +4992 ' đ' 3 +4993 ' İ' 3 +4994 ' Ł' 3 +4995 ' ł' 3 +4996 ' ő' 3 +4997 ' œ' 3 +4998 ' ř' 3 +4999 ' Ś' 3 +5000 ' ś' 3 +5001 ' ŝ' 3 +5002 ' Ş' 3 +5003 ' ş' 3 +5004 ' Š' 3 +5005 ' š' 3 +5006 ' ū' 3 +5007 ' Ż' 3 +5008 ' ż' 3 +5009 ' Ž' 3 +5010 ' ž' 3 +5011 ' ǫ' 3 +5012 ' ́' 3 +5013 ' ̃' 3 +5014 ' ̄' 3 +5015 ' ̇' 3 +5016 ' ̈' 3 +5017 ' ̊' 3 +5018 ' ̧' 3 +5019 ' Α' 3 +5020 ' Γ' 3 +5021 ' Δ' 3 +5022 ' Ε' 3 +5023 ' Θ' 3 +5024 ' Κ' 3 +5025 ' Λ' 3 +5026 ' Μ' 3 +5027 ' Π' 3 +5028 ' Σ' 3 +5029 ' Τ' 3 +5030 ' Φ' 3 +5031 ' Ψ' 3 +5032 ' Ω' 3 +5033 ' έ' 3 +5034 ' α' 3 +5035 ' β' 3 +5036 ' γ' 3 +5037 ' δ' 3 +5038 ' ε' 3 +5039 ' ζ' 3 +5040 ' η' 3 +5041 ' θ' 3 +5042 ' ι' 3 +5043 ' κ' 3 +5044 ' λ' 3 +5045 ' μ' 3 +5046 ' ν' 3 +5047 ' ξ' 3 +5048 ' ο' 3 +5049 ' π' 3 +5050 ' ρ' 3 +5051 ' σ' 3 +5052 ' τ' 3 +5053 ' υ' 3 +5054 ' φ' 3 +5055 ' χ' 3 +5056 ' ψ' 3 +5057 ' ω' 3 +5058 ' ό' 3 +5059 ' Є' 3 +5060 ' І' 3 +5061 ' Ј' 3 +5062 ' А' 3 +5063 ' Б' 3 +5064 ' В' 3 +5065 ' Г' 3 +5066 ' Д' 3 +5067 ' Е' 3 +5068 ' Ж' 3 +5069 ' З' 3 +5070 ' И' 3 +5071 ' Й' 3 +5072 ' К' 3 +5073 ' Л' 3 +5074 ' М' 3 +5075 ' Н' 3 +5076 ' О' 3 +5077 ' П' 3 +5078 ' Р' 3 +5079 ' С' 3 +5080 ' Т' 3 +5081 ' У' 3 +5082 ' Ф' 3 +5083 ' Х' 3 +5084 ' Ц' 3 +5085 ' Ч' 3 +5086 ' Ш' 3 +5087 ' Щ' 3 +5088 ' Э' 3 +5089 ' Ю' 3 +5090 ' Я' 3 +5091 ' а' 3 +5092 ' б' 3 +5093 ' в' 3 +5094 ' г' 3 +5095 ' д' 3 +5096 ' е' 3 +5097 ' ж' 3 +5098 ' з' 3 +5099 ' и' 3 +5100 ' й' 3 +5101 ' к' 3 +5102 ' л' 3 +5103 ' м' 3 +5104 ' н' 3 +5105 ' о' 3 +5106 ' п' 3 +5107 ' р' 3 +5108 ' с' 3 +5109 ' т' 3 +5110 ' у' 3 +5111 ' ф' 3 +5112 ' х' 3 +5113 ' ц' 3 +5114 ' ч' 3 +5115 ' ш' 3 +5116 ' щ' 3 +5117 ' э' 3 +5118 ' ю' 3 +5119 ' я' 3 +5120 ' є' 3 +5121 ' і' 3 +5122 ' ї' 3 +5123 ' ј' 3 +5124 ' א' 3 +5125 ' ב' 3 +5126 ' ה' 3 +5127 ' ו' 3 +5128 ' י' 3 +5129 ' כ' 3 +5130 ' ל' 3 +5131 ' מ' 3 +5132 ' נ' 3 +5133 ' ע' 3 +5134 ' ש' 3 +5135 ' آ' 3 +5136 ' أ' 3 +5137 ' إ' 3 +5138 ' ا' 3 +5139 ' ب' 3 +5140 ' ت' 3 +5141 ' ج' 3 +5142 ' ح' 3 +5143 ' خ' 3 +5144 ' د' 3 +5145 ' ر' 3 +5146 ' س' 3 +5147 ' ش' 3 +5148 ' ص' 3 +5149 ' ع' 3 +5150 ' ف' 3 +5151 ' ق' 3 +5152 ' ك' 3 +5153 ' ل' 3 +5154 ' م' 3 +5155 ' ن' 3 +5156 ' ه' 3 +5157 ' و' 3 +5158 ' ي' 3 +5159 ' پ' 3 +5160 ' ک' 3 +5161 ' گ' 3 +5162 ' ی' 3 +5163 '!!!' 3 +5164 '!")' 3 +5165 '!",' 3 +5166 "!'," 3 +5167 '!),' 3 +5168 '!).' 3 +5169 '!--' 3 +5170 '"""' 3 +5171 '"",' 3 +5172 '"))' 3 +5173 '"),' 3 +5174 '").' 3 +5175 '"):' 3 +5176 '");' 3 +5177 '")]' 3 +5178 '","' 3 +5179 '"--' 3 +5180 '"/>' 3 +5181 '":"' 3 +5182 '":[' 3 +5183 '":{' 3 +5184 '"' 3 +5186 '">&' 3 +5187 '">\'' 3 +5188 '"><' 3 +5189 '"?>' 3 +5190 '"])' 3 +5191 '"],' 3 +5192 '"].' 3 +5193 '"]:' 3 +5194 '"];' 3 +5195 '"][' 3 +5196 '"]]' 3 +5197 '"]}' 3 +5198 '"})' 3 +5199 '"},' 3 +5200 '"}}' 3 +5201 '###' 3 +5202 '%),' 3 +5203 '%).' 3 +5204 '\'",' 3 +5205 "'''" 3 +5206 "')(" 3 +5207 "'))" 3 +5208 "')," 3 +5209 "')." 3 +5210 "'):" 3 +5211 "');" 3 +5212 "')[" 3 +5213 "')]" 3 +5214 "','" 3 +5215 "':'" 3 +5216 "'" 3 +5218 "'><" 3 +5219 "'])" 3 +5220 "']*" 3 +5221 "']," 3 +5222 "']." 3 +5223 "']:" 3 +5224 "'];" 3 +5225 "']=" 3 +5226 "'][" 3 +5227 "']]" 3 +5228 "']}" 3 +5229 "'ll" 3 +5230 "'re" 3 +5231 "'ve" 3 +5232 "'})" 3 +5233 "'}," 3 +5234 '(""' 3 +5235 '("#' 3 +5236 '("%' 3 +5237 '("+' 3 +5238 '(",' 3 +5239 '("-' 3 +5240 '(".' 3 +5241 '("/' 3 +5242 '(":' 3 +5243 '("<' 3 +5244 '("@' 3 +5245 '("\\' 3 +5246 '($_' 3 +5247 "('#" 3 +5248 "('$" 3 +5249 "('," 3 +5250 "('-" 3 +5251 "('." 3 +5252 "('/" 3 +5253 "(':" 3 +5254 "('<" 3 +5255 "('@" 3 +5256 "('[" 3 +5257 "('\\" 3 +5258 "('^" 3 +5259 "('_" 3 +5260 "(('" 3 +5261 '(((' 3 +5262 '(()' 3 +5263 '()"' 3 +5264 '()(' 3 +5265 '())' 3 +5266 '(),' 3 +5267 '().' 3 +5268 '():' 3 +5269 '();' 3 +5270 '()[' 3 +5271 '()]' 3 +5272 '()`' 3 +5273 '(){' 3 +5274 '()}' 3 +5275 '(*)' 3 +5276 '(**' 3 +5277 '(?:' 3 +5278 '(@"' 3 +5279 '(["' 3 +5280 "(['" 3 +5281 '([[' 3 +5282 '([\\' 3 +5283 '([^' 3 +5284 '(__' 3 +5285 "({'" 3 +5286 ')")' 3 +5287 ')",' 3 +5288 ')".' 3 +5289 ')">' 3 +5290 ")'," 3 +5291 ')(?' 3 +5292 ')))' 3 +5293 '))*' 3 +5294 ')),' 3 +5295 ')).' 3 +5296 '))/' 3 +5297 ')):' 3 +5298 '));' 3 +5299 '))?' 3 +5300 '))\\' 3 +5301 '))]' 3 +5302 ')){' 3 +5303 ')*(' 3 +5304 ')**' 3 +5305 ')+(' 3 +5306 '),(' 3 +5307 '),\\' 3 +5308 ')--' 3 +5309 ')->' 3 +5310 ')."' 3 +5311 ')..' 3 +5312 ').[' 3 +5313 ').\\' 3 +5314 ')/(' 3 +5315 ');\\' 3 +5316 ')' 3 +5349 '->_' 3 +5350 '.""' 3 +5351 '.")' 3 +5352 '.",' 3 +5353 '."[' 3 +5354 '.$$' 3 +5355 '.\'"' 3 +5356 ".''" 3 +5357 ".')" 3 +5358 ".'," 3 +5359 '.),' 3 +5360 '.).' 3 +5361 '.--' 3 +5362 '...' 3 +5363 '../' 3 +5364 '.' 3 +5371 "/')" 3 +5372 "/'," 3 +5373 '/*!' 3 +5374 '/**' 3 +5375 '/*.' 3 +5376 '///' 3 +5377 '/>.' 3 +5378 '/__' 3 +5379 ':")' 3 +5380 ':",' 3 +5381 ":')" 3 +5382 ":'," 3 +5383 ':**' 3 +5384 ':--' 3 +5385 '://' 3 +5386 ':' 3 +5393 ';")' 3 +5431 '>",' 3 +5432 '>";' 3 +5433 ">')" 3 +5434 ">'," 3 +5435 ">';" 3 +5436 '>()' 3 +5437 '>).' 3 +5438 '>::' 3 +5439 '>>>' 3 +5441 '>{{' 3 +5442 '?",' 3 +5443 "?'," 3 +5444 '?),' 3 +5445 '?).' 3 +5446 '???' 3 +5447 'AAA' 3 +5448 'ABA' 3 +5449 'ABC' 3 +5450 'ABI' 3 +5451 'ABS' 3 +5452 'ACA' 3 +5453 'ACC' 3 +5454 'ACE' 3 +5455 'ACH' 3 +5456 'ACK' 3 +5457 'ACP' 3 +5458 'ACS' 3 +5459 'ACT' 3 +5460 'ADA' 3 +5461 'ADC' 3 +5462 'ADD' 3 +5463 'ADE' 3 +5464 'ADO' 3 +5465 'ADS' 3 +5466 'AES' 3 +5467 'AFF' 3 +5468 'AFP' 3 +5469 'AGE' 3 +5470 'AGG' 3 +5471 'AIL' 3 +5472 'AIN' 3 +5473 'AIR' 3 +5474 'ALA' 3 +5475 'ALE' 3 +5476 'ALK' 3 +5477 'ALL' 3 +5478 'ALS' 3 +5479 'ALT' 3 +5480 'AMA' 3 +5481 'AMB' 3 +5482 'AMD' 3 +5483 'AME' 3 +5484 'AMI' 3 +5485 'AML' 3 +5486 'AMP' 3 +5487 'AMS' 3 +5488 'ANA' 3 +5489 'ANC' 3 +5490 'AND' 3 +5491 'ANE' 3 +5492 'ANG' 3 +5493 'ANI' 3 +5494 'ANK' 3 +5495 'ANN' 3 +5496 'ANO' 3 +5497 'ANS' 3 +5498 'ANT' 3 +5499 'ANY' 3 +5500 'APE' 3 +5501 'APH' 3 +5502 'API' 3 +5503 'APP' 3 +5504 'APS' 3 +5505 'ARA' 3 +5506 'ARB' 3 +5507 'ARC' 3 +5508 'ARD' 3 +5509 'ARE' 3 +5510 'ARG' 3 +5511 'ARI' 3 +5512 'ARK' 3 +5513 'ARM' 3 +5514 'ARN' 3 +5515 'ARP' 3 +5516 'ARR' 3 +5517 'ARS' 3 +5518 'ART' 3 +5519 'ARY' 3 +5520 'ASA' 3 +5521 'ASC' 3 +5522 'ASE' 3 +5523 'ASH' 3 +5524 'ASK' 3 +5525 'ASM' 3 +5526 'ASP' 3 +5527 'ASS' 3 +5528 'AST' 3 +5529 'ASY' 3 +5530 'ATA' 3 +5531 'ATE' 3 +5532 'ATH' 3 +5533 'ATI' 3 +5534 'ATO' 3 +5535 'ATS' 3 +5536 'ATT' 3 +5537 'AUD' 3 +5538 'AUT' 3 +5539 'AVA' 3 +5540 'AVE' 3 +5541 'AWS' 3 +5542 'Abs' 3 +5543 'Acc' 3 +5544 'Ack' 3 +5545 'Act' 3 +5546 'Ada' 3 +5547 'Add' 3 +5548 'Adj' 3 +5549 'Adv' 3 +5550 'Aff' 3 +5551 'Age' 3 +5552 'Agg' 3 +5553 'Air' 3 +5554 'Akt' 3 +5555 'Ald' 3 +5556 'Ale' 3 +5557 'Alg' 3 +5558 'Ali' 3 +5559 'All' 3 +5560 'Alt' 3 +5561 'Amb' 3 +5562 'Amy' 3 +5563 'And' 3 +5564 'Ang' 3 +5565 'Ann' 3 +5566 'Ans' 3 +5567 'Ant' 3 +5568 'Any' 3 +5569 'Api' 3 +5570 'App' 3 +5571 'Apr' 3 +5572 'Aqu' 3 +5573 'Arc' 3 +5574 'Are' 3 +5575 'Arg' 3 +5576 'Ari' 3 +5577 'Arm' 3 +5578 'Arn' 3 +5579 'Arr' 3 +5580 'Art' 3 +5581 'Asc' 3 +5582 'Ash' 3 +5583 'Ask' 3 +5584 'Asp' 3 +5585 'Ass' 3 +5586 'Ast' 3 +5587 'Ath' 3 +5588 'Atl' 3 +5589 'Att' 3 +5590 'Aud' 3 +5591 'Aug' 3 +5592 'Aut' 3 +5593 'Aux' 3 +5594 'Avg' 3 +5595 'Aws' 3 +5596 'BAD' 3 +5597 'BAL' 3 +5598 'BAR' 3 +5599 'BAS' 3 +5600 'BAT' 3 +5601 'BBC' 3 +5602 'BER' 3 +5603 'BIG' 3 +5604 'BIN' 3 +5605 'BIT' 3 +5606 'BLE' 3 +5607 'BMI' 3 +5608 'BOT' 3 +5609 'BOX' 3 +5610 'BRE' 3 +5611 'BSD' 3 +5612 'BUF' 3 +5613 'BUG' 3 +5614 'BUR' 3 +5615 'BUS' 3 +5616 'Bab' 3 +5617 'Bad' 3 +5618 'Bag' 3 +5619 'Bah' 3 +5620 'Bal' 3 +5621 'Ban' 3 +5622 'Bar' 3 +5623 'Bas' 3 +5624 'Bat' 3 +5625 'Bay' 3 +5626 'Bbb' 3 +5627 'Bed' 3 +5628 'Bel' 3 +5629 'Ben' 3 +5630 'Ber' 3 +5631 'Bes' 3 +5632 'Bet' 3 +5633 'Bib' 3 +5634 'Bid' 3 +5635 'Big' 3 +5636 'Bin' 3 +5637 'Bio' 3 +5638 'Bir' 3 +5639 'Bit' 3 +5640 'Blo' 3 +5641 'Bob' 3 +5642 'Bol' 3 +5643 'Bon' 3 +5644 'Bor' 3 +5645 'Bot' 3 +5646 'Bow' 3 +5647 'Box' 3 +5648 'Boy' 3 +5649 'Bra' 3 +5650 'Bre' 3 +5651 'Bro' 3 +5652 'Btn' 3 +5653 'Buf' 3 +5654 'Bug' 3 +5655 'Bul' 3 +5656 'Bur' 3 +5657 'Bus' 3 +5658 'But' 3 +5659 'Buy' 3 +5660 'CAC' 3 +5661 'CAD' 3 +5662 'CAL' 3 +5663 'CAM' 3 +5664 'CAN' 3 +5665 'CAP' 3 +5666 'CAR' 3 +5667 'CAS' 3 +5668 'CAT' 3 +5669 'CBC' 3 +5670 'CBS' 3 +5671 'CCA' 3 +5672 'CCC' 3 +5673 'CDC' 3 +5674 'CDF' 3 +5675 'CEL' 3 +5676 'CEO' 3 +5677 'CEP' 3 +5678 'CER' 3 +5679 'CES' 3 +5680 'CFG' 3 +5681 'CHA' 3 +5682 'CHE' 3 +5683 'CHO' 3 +5684 'CHR' 3 +5685 'CID' 3 +5686 'CLA' 3 +5687 'CLC' 3 +5688 'CLE' 3 +5689 'CLI' 3 +5690 'CLK' 3 +5691 'CLS' 3 +5692 'CLU' 3 +5693 'CMD' 3 +5694 'CMS' 3 +5695 'CNN' 3 +5696 'CNT' 3 +5697 'COD' 3 +5698 'COL' 3 +5699 'COM' 3 +5700 'CON' 3 +5701 'COR' 3 +5702 'COS' 3 +5703 'CPP' 3 +5704 'CPU' 3 +5705 'CRC' 3 +5706 'CRE' 3 +5707 'CSI' 3 +5708 'CSS' 3 +5709 'CSV' 3 +5710 'CTC' 3 +5711 'CTL' 3 +5712 'CTT' 3 +5713 'CTX' 3 +5714 'CUR' 3 +5715 'Cab' 3 +5716 'Cad' 3 +5717 'Cal' 3 +5718 'Cam' 3 +5719 'Can' 3 +5720 'Cap' 3 +5721 'Car' 3 +5722 'Cas' 3 +5723 'Cat' 3 +5724 'Cel' 3 +5725 'Cfg' 3 +5726 'Cha' 3 +5727 'Che' 3 +5728 'Chi' 3 +5729 'Cho' 3 +5730 'Cir' 3 +5731 'Cit' 3 +5732 'Cla' 3 +5733 'Cle' 3 +5734 'Cli' 3 +5735 'Clo' 3 +5736 'Cmd' 3 +5737 'Cnt' 3 +5738 'CoV' 3 +5739 'Cod' 3 +5740 'Cog' 3 +5741 'Col' 3 +5742 'Com' 3 +5743 'Con' 3 +5744 'Cop' 3 +5745 'Cor' 3 +5746 'Cos' 3 +5747 'Cov' 3 +5748 'Cre' 3 +5749 'Cro' 3 +5750 'Css' 3 +5751 'Csv' 3 +5752 'Ctr' 3 +5753 'Ctx' 3 +5754 'Cur' 3 +5755 'Cut' 3 +5756 'DAC' 3 +5757 'DAG' 3 +5758 'DAO' 3 +5759 'DAT' 3 +5760 'DAY' 3 +5761 'DBC' 3 +5762 'DEC' 3 +5763 'DED' 3 +5764 'DEF' 3 +5765 'DEL' 3 +5766 'DEM' 3 +5767 'DEN' 3 +5768 'DEP' 3 +5769 'DER' 3 +5770 'DES' 3 +5771 'DET' 3 +5772 'DEV' 3 +5773 'DEX' 3 +5774 'DIC' 3 +5775 'DIG' 3 +5776 'DIM' 3 +5777 'DIR' 3 +5778 'DIS' 3 +5779 'DIV' 3 +5780 'DLL' 3 +5781 'DNA' 3 +5782 'DNS' 3 +5783 'DOC' 3 +5784 'DOM' 3 +5785 'DON' 3 +5786 'DOT' 3 +5787 'DTD' 3 +5788 'DVD' 3 +5789 'Dal' 3 +5790 'Dam' 3 +5791 'Dan' 3 +5792 'Dao' 3 +5793 'Dar' 3 +5794 'Das' 3 +5795 'Dat' 3 +5796 'Dav' 3 +5797 'Day' 3 +5798 'Deb' 3 +5799 'Dec' 3 +5800 'Def' 3 +5801 'Deg' 3 +5802 'Del' 3 +5803 'Dem' 3 +5804 'Den' 3 +5805 'Dep' 3 +5806 'Der' 3 +5807 'Des' 3 +5808 'Det' 3 +5809 'Dev' 3 +5810 'Dic' 3 +5811 'Did' 3 +5812 'Die' 3 +5813 'Dig' 3 +5814 'Dim' 3 +5815 'Dir' 3 +5816 'Dis' 3 +5817 'Div' 3 +5818 'Dlg' 3 +5819 'Doc' 3 +5820 'Dog' 3 +5821 'Dom' 3 +5822 'Don' 3 +5823 'Dot' 3 +5824 'Dou' 3 +5825 'Dry' 3 +5826 'Dub' 3 +5827 'Due' 3 +5828 'Dup' 3 +5829 'Dur' 3 +5830 'Dyn' 3 +5831 'Dé' 3 +5832 'EAR' 3 +5833 'ECD' 3 +5834 'ECK' 3 +5835 'ECT' 3 +5836 'EEE' 3 +5837 'EEK' 3 +5838 'EFF' 3 +5839 'ELD' 3 +5840 'ELE' 3 +5841 'ELL' 3 +5842 'ELS' 3 +5843 'ELY' 3 +5844 'EMA' 3 +5845 'EMP' 3 +5846 'ENA' 3 +5847 'ENC' 3 +5848 'END' 3 +5849 'ENE' 3 +5850 'ENG' 3 +5851 'ENO' 3 +5852 'ENS' 3 +5853 'ENT' 3 +5854 'ENV' 3 +5855 'EOF' 3 +5856 'EPS' 3 +5857 'ERA' 3 +5858 'ERC' 3 +5859 'ERE' 3 +5860 'ERN' 3 +5861 'ERO' 3 +5862 'ERR' 3 +5863 'ERS' 3 +5864 'ERT' 3 +5865 'ERV' 3 +5866 'ERY' 3 +5867 'ESA' 3 +5868 'ESC' 3 +5869 'ESH' 3 +5870 'ESP' 3 +5871 'ESS' 3 +5872 'EST' 3 +5873 'ETA' 3 +5874 'ETH' 3 +5875 'ETS' 3 +5876 'EUR' 3 +5877 'EXP' 3 +5878 'EXT' 3 +5879 'Ear' 3 +5880 'Eff' 3 +5881 'Ele' 3 +5882 'Ell' 3 +5883 'Emb' 3 +5884 'Emp' 3 +5885 'Enc' 3 +5886 'End' 3 +5887 'Eng' 3 +5888 'Enh' 3 +5889 'Ent' 3 +5890 'Env' 3 +5891 'Equ' 3 +5892 'Err' 3 +5893 'Esc' 3 +5894 'Esp' 3 +5895 'Ess' 3 +5896 'Est' 3 +5897 'Eth' 3 +5898 'Exc' 3 +5899 'Exp' 3 +5900 'Ext' 3 +5901 'Eye' 3 +5902 'FER' 3 +5903 'FET' 3 +5904 'FFF' 3 +5905 'FFT' 3 +5906 'FIG' 3 +5907 'FIL' 3 +5908 'FIN' 3 +5909 'FIR' 3 +5910 'FIT' 3 +5911 'FIX' 3 +5912 'FLO' 3 +5913 'FOR' 3 +5914 'FUN' 3 +5915 'Fab' 3 +5916 'Fac' 3 +5917 'Fal' 3 +5918 'Fan' 3 +5919 'Far' 3 +5920 'Fat' 3 +5921 'Feb' 3 +5922 'Fed' 3 +5923 'Fel' 3 +5924 'Fer' 3 +5925 'Few' 3 +5926 'Fig' 3 +5927 'Fil' 3 +5928 'Fin' 3 +5929 'Fit' 3 +5930 'Fix' 3 +5931 'Flo' 3 +5932 'Flu' 3 +5933 'Fly' 3 +5934 'Fmt' 3 +5935 'Foo' 3 +5936 'For' 3 +5937 'Fox' 3 +5938 'Fra' 3 +5939 'Fre' 3 +5940 'Fri' 3 +5941 'Fun' 3 +5942 'GAL' 3 +5943 'GAN' 3 +5944 'GAT' 3 +5945 'GBT' 3 +5946 'GCC' 3 +5947 'GEN' 3 +5948 'GER' 3 +5949 'GES' 3 +5950 'GET' 3 +5951 'GHz' 3 +5952 'GIN' 3 +5953 'GIS' 3 +5954 'GIT' 3 +5955 'GLE' 3 +5956 'GMT' 3 +5957 'GNU' 3 +5958 'GPL' 3 +5959 'GPS' 3 +5960 'GPU' 3 +5961 'GRA' 3 +5962 'GRE' 3 +5963 'GRO' 3 +5964 'GRP' 3 +5965 'GUI' 3 +5966 'Gab' 3 +5967 'Gal' 3 +5968 'Gap' 3 +5969 'Gar' 3 +5970 'Gas' 3 +5971 'GeV' 3 +5972 'Gem' 3 +5973 'Gen' 3 +5974 'Geo' 3 +5975 'Ger' 3 +5976 'Get' 3 +5977 'Gib' 3 +5978 'Gil' 3 +5979 'Git' 3 +5980 'God' 3 +5981 'Got' 3 +5982 'Gra' 3 +5983 'Gre' 3 +5984 'Gro' 3 +5985 'Gui' 3 +5986 'Gun' 3 +5987 'Guy' 3 +5988 'HAL' 3 +5989 'HAS' 3 +5990 'HEL' 3 +5991 'HER' 3 +5992 'HIV' 3 +5993 'HOW' 3 +5994 'Had' 3 +5995 'Hal' 3 +5996 'Ham' 3 +5997 'Han' 3 +5998 'Har' 3 +5999 'Has' 3 +6000 'Haw' 3 +6001 'Hay' 3 +6002 'Haz' 3 +6003 'Hel' 3 +6004 'Hen' 3 +6005 'Her' 3 +6006 'Hex' 3 +6007 'Hey' 3 +6008 'Hig' 3 +6009 'Hip' 3 +6010 'His' 3 +6011 'Hit' 3 +6012 'Hol' 3 +6013 'Hom' 3 +6014 'Hon' 3 +6015 'Hop' 3 +6016 'Hor' 3 +6017 'Hot' 3 +6018 'How' 3 +6019 'Hub' 3 +6020 'Hum' 3 +6021 'IAL' 3 +6022 'IAN' 3 +6023 'IAS' 3 +6024 'IBM' 3 +6025 'ICA' 3 +6026 'ICC' 3 +6027 'ICE' 3 +6028 'ICH' 3 +6029 'ICI' 3 +6030 'ICK' 3 +6031 'ICO' 3 +6032 'ICS' 3 +6033 'ICT' 3 +6034 'IDA' 3 +6035 'IDD' 3 +6036 'IDE' 3 +6037 'IDI' 3 +6038 'IDS' 3 +6039 'IDs' 3 +6040 'IED' 3 +6041 'IER' 3 +6042 'IES' 3 +6043 'IEW' 3 +6044 'IFE' 3 +6045 'IFF' 3 +6046 'IFI' 3 +6047 'IFT' 3 +6048 'IFY' 3 +6049 'IGH' 3 +6050 'IGN' 3 +6051 'III' 3 +6052 'ILD' 3 +6053 'ILE' 3 +6054 'ILL' 3 +6055 'ILS' 3 +6056 'ILY' 3 +6057 'IMA' 3 +6058 'IME' 3 +6059 'IMG' 3 +6060 'IMO' 3 +6061 'IMP' 3 +6062 'IMS' 3 +6063 'INA' 3 +6064 'INC' 3 +6065 'IND' 3 +6066 'INE' 3 +6067 'INF' 3 +6068 'ING' 3 +6069 'INI' 3 +6070 'INK' 3 +6071 'INO' 3 +6072 'INS' 3 +6073 'INT' 3 +6074 'ION' 3 +6075 'IOR' 3 +6076 'IOS' 3 +6077 'IPA' 3 +6078 'IPP' 3 +6079 'IPS' 3 +6080 'IPT' 3 +6081 'IPV' 3 +6082 'IPv' 3 +6083 'IRA' 3 +6084 'IRC' 3 +6085 'IRD' 3 +6086 'IRE' 3 +6087 'IRS' 3 +6088 'IRT' 3 +6089 'ISA' 3 +6090 'ISC' 3 +6091 'ISE' 3 +6092 'ISH' 3 +6093 'ISM' 3 +6094 'ISO' 3 +6095 'ISP' 3 +6096 'ISS' 3 +6097 'IST' 3 +6098 'ITA' 3 +6099 'ITE' 3 +6100 'ITH' 3 +6101 'ITS' 3 +6102 'ITT' 3 +6103 'ITY' 3 +6104 'IUM' 3 +6105 'IVE' 3 +6106 'IZE' 3 +6107 'Ice' 3 +6108 'Ich' 3 +6109 'Ide' 3 +6110 'Ids' 3 +6111 'Idx' 3 +6112 'Ign' 3 +6113 'Ill' 3 +6114 'Img' 3 +6115 'Imm' 3 +6116 'Imp' 3 +6117 'Inc' 3 +6118 'Ind' 3 +6119 'Inf' 3 +6120 'Ing' 3 +6121 'Ini' 3 +6122 'Ins' 3 +6123 'Int' 3 +6124 'Inv' 3 +6125 'Ion' 3 +6126 'Isa' 3 +6127 'Isn' 3 +6128 'Iso' 3 +6129 'Iss' 3 +6130 'Its' 3 +6131 'JOB' 3 +6132 'JPG' 3 +6133 'Jac' 3 +6134 'Jam' 3 +6135 'Jan' 3 +6136 'Jar' 3 +6137 'Jay' 3 +6138 'Jen' 3 +6139 'Jer' 3 +6140 'Jet' 3 +6141 'Jim' 3 +6142 'Job' 3 +6143 'Joe' 3 +6144 'Joh' 3 +6145 'Jon' 3 +6146 'Jos' 3 +6147 'Joy' 3 +6148 'Jud' 3 +6149 'Jul' 3 +6150 'Jun' 3 +6151 'KEN' 3 +6152 'KER' 3 +6153 'KEY' 3 +6154 'Kal' 3 +6155 'Kam' 3 +6156 'Kar' 3 +6157 'Kat' 3 +6158 'Kay' 3 +6159 'Ken' 3 +6160 'Ker' 3 +6161 'Key' 3 +6162 'Kim' 3 +6163 'Kin' 3 +6164 'Kir' 3 +6165 'Kit' 3 +6166 'Kon' 3 +6167 'LAB' 3 +6168 'LAN' 3 +6169 'LAR' 3 +6170 'LAS' 3 +6171 'LAT' 3 +6172 'LAY' 3 +6173 'LED' 3 +6174 'LEN' 3 +6175 'LER' 3 +6176 'LES' 3 +6177 'LET' 3 +6178 'LEV' 3 +6179 'LEX' 3 +6180 'LEY' 3 +6181 'LIB' 3 +6182 'LIN' 3 +6183 'LOB' 3 +6184 'LOC' 3 +6185 'LOG' 3 +6186 'LOS' 3 +6187 'LOW' 3 +6188 'Lab' 3 +6189 'Lag' 3 +6190 'Lam' 3 +6191 'Lap' 3 +6192 'Lar' 3 +6193 'Las' 3 +6194 'Lat' 3 +6195 'Law' 3 +6196 'Lay' 3 +6197 'Lbl' 3 +6198 'Lee' 3 +6199 'Leg' 3 +6200 'Len' 3 +6201 'Les' 3 +6202 'Let' 3 +6203 'Lev' 3 +6204 'Lew' 3 +6205 'Lex' 3 +6206 'Lib' 3 +6207 'Lic' 3 +6208 'Lie' 3 +6209 'Lif' 3 +6210 'Lik' 3 +6211 'Lim' 3 +6212 'Lin' 3 +6213 'Lip' 3 +6214 'Lit' 3 +6215 'Lng' 3 +6216 'Loc' 3 +6217 'Log' 3 +6218 'Lon' 3 +6219 'Los' 3 +6220 'Lot' 3 +6221 'Lou' 3 +6222 'Low' 3 +6223 'Lua' 3 +6224 'Luc' 3 +6225 'Lux' 3 +6226 'MAC' 3 +6227 'MAG' 3 +6228 'MAL' 3 +6229 'MAN' 3 +6230 'MAP' 3 +6231 'MAR' 3 +6232 'MAS' 3 +6233 'MAT' 3 +6234 'MAX' 3 +6235 'MED' 3 +6236 'MEM' 3 +6237 'MEN' 3 +6238 'MER' 3 +6239 'MES' 3 +6240 'MET' 3 +6241 'MHz' 3 +6242 'MIC' 3 +6243 'MIN' 3 +6244 'MIS' 3 +6245 'MIT' 3 +6246 'MIX' 3 +6247 'MLE' 3 +6248 'MLP' 3 +6249 'MOD' 3 +6250 'MON' 3 +6251 'MOS' 3 +6252 'MOV' 3 +6253 'MPI' 3 +6254 'MPL' 3 +6255 'MRI' 3 +6256 'MSC' 3 +6257 'MSE' 3 +6258 'MSG' 3 +6259 'Mac' 3 +6260 'Mad' 3 +6261 'Mag' 3 +6262 'Mah' 3 +6263 'Mal' 3 +6264 'Man' 3 +6265 'Map' 3 +6266 'Mar' 3 +6267 'Mas' 3 +6268 'Mat' 3 +6269 'Max' 3 +6270 'May' 3 +6271 'McC' 3 +6272 'Med' 3 +6273 'Meg' 3 +6274 'Mel' 3 +6275 'Mem' 3 +6276 'Men' 3 +6277 'Mer' 3 +6278 'Mes' 3 +6279 'Met' 3 +6280 'Mex' 3 +6281 'Mgr' 3 +6282 'Mic' 3 +6283 'Mid' 3 +6284 'Mil' 3 +6285 'Min' 3 +6286 'Mir' 3 +6287 'Mis' 3 +6288 'Mit' 3 +6289 'Mix' 3 +6290 'Mob' 3 +6291 'Mod' 3 +6292 'Moh' 3 +6293 'Mol' 3 +6294 'Mom' 3 +6295 'Mon' 3 +6296 'Mor' 3 +6297 'Mot' 3 +6298 'Mov' 3 +6299 'Mrs' 3 +6300 'Msg' 3 +6301 'Mul' 3 +6302 'Mur' 3 +6303 'Mus' 3 +6304 'Mut' 3 +6305 'Mvc' 3 +6306 'NAL' 3 +6307 'NAM' 3 +6308 'NAS' 3 +6309 'NAT' 3 +6310 'NBC' 3 +6311 'NEL' 3 +6312 'NER' 3 +6313 'NES' 3 +6314 'NET' 3 +6315 'NEW' 3 +6316 'NON' 3 +6317 'NOR' 3 +6318 'NOT' 3 +6319 'NOW' 3 +6320 'NPC' 3 +6321 'NUM' 3 +6322 'NaN' 3 +6323 'Nam' 3 +6324 'Nan' 3 +6325 'Nat' 3 +6326 'Nav' 3 +6327 'Neg' 3 +6328 'Net' 3 +6329 'New' 3 +6330 'Nic' 3 +6331 'Nik' 3 +6332 'Nil' 3 +6333 'Nit' 3 +6334 'Nom' 3 +6335 'Non' 3 +6336 'Nor' 3 +6337 'Nos' 3 +6338 'Not' 3 +6339 'Nov' 3 +6340 'Now' 3 +6341 'Num' 3 +6342 'OBJ' 3 +6343 'OCI' 3 +6344 'OCK' 3 +6345 'OCT' 3 +6346 'ODE' 3 +6347 'ODO' 3 +6348 'ODY' 3 +6349 'OFF' 3 +6350 'OID' 3 +6351 'OLD' 3 +6352 'OME' 3 +6353 'ONA' 3 +6354 'OND' 3 +6355 'ONE' 3 +6356 'ONG' 3 +6357 'ONS' 3 +6358 'ONT' 3 +6359 'OPS' 3 +6360 'OPT' 3 +6361 'ORA' 3 +6362 'ORD' 3 +6363 'ORE' 3 +6364 'ORG' 3 +6365 'ORK' 3 +6366 'ORM' 3 +6367 'ORN' 3 +6368 'ORS' 3 +6369 'ORT' 3 +6370 'ORY' 3 +6371 'OSE' 3 +6372 'OSS' 3 +6373 'OST' 3 +6374 'OTA' 3 +6375 'OTE' 3 +6376 'OTH' 3 +6377 'OTO' 3 +6378 'OTP' 3 +6379 'OTS' 3 +6380 'OTT' 3 +6381 'OUR' 3 +6382 'OUS' 3 +6383 'OUT' 3 +6384 'OVA' 3 +6385 'OVE' 3 +6386 'OWN' 3 +6387 'Obj' 3 +6388 'Obs' 3 +6389 'Occ' 3 +6390 'Oct' 3 +6391 'Off' 3 +6392 'Old' 3 +6393 'One' 3 +6394 'Ont' 3 +6395 'Opp' 3 +6396 'Ops' 3 +6397 'Opt' 3 +6398 'Ord' 3 +6399 'Org' 3 +6400 'Ori' 3 +6401 'Our' 3 +6402 'Out' 3 +6403 'Own' 3 +6404 'PAD' 3 +6405 'PAN' 3 +6406 'PAR' 3 +6407 'PAS' 3 +6408 'PAT' 3 +6409 'PBS' 3 +6410 'PCA' 3 +6411 'PCI' 3 +6412 'PCM' 3 +6413 'PCR' 3 +6414 'PDF' 3 +6415 'PED' 3 +6416 'PEG' 3 +6417 'PER' 3 +6418 'PET' 3 +6419 'PHA' 3 +6420 'PHP' 3 +6421 'PIC' 3 +6422 'PID' 3 +6423 'PIN' 3 +6424 'PIO' 3 +6425 'PIP' 3 +6426 'PLA' 3 +6427 'PLC' 3 +6428 'PLE' 3 +6429 'PNG' 3 +6430 'POL' 3 +6431 'POP' 3 +6432 'POR' 3 +6433 'POS' 3 +6434 'PRE' 3 +6435 'PRI' 3 +6436 'PRO' 3 +6437 'PTR' 3 +6438 'PUT' 3 +6439 'PWM' 3 +6440 'Pac' 3 +6441 'Pad' 3 +6442 'Pag' 3 +6443 'Pak' 3 +6444 'Pal' 3 +6445 'Pan' 3 +6446 'Pap' 3 +6447 'Par' 3 +6448 'Pas' 3 +6449 'Pat' 3 +6450 'Pay' 3 +6451 'Pdf' 3 +6452 'Ped' 3 +6453 'Pen' 3 +6454 'Per' 3 +6455 'Pet' 3 +6456 'Phi' 3 +6457 'Pic' 3 +6458 'Pie' 3 +6459 'Pin' 3 +6460 'Pix' 3 +6461 'Pod' 3 +6462 'Pol' 3 +6463 'Pop' 3 +6464 'Por' 3 +6465 'Pos' 3 +6466 'Pot' 3 +6467 'Pow' 3 +6468 'Pre' 3 +6469 'Pri' 3 +6470 'Pro' 3 +6471 'Psi' 3 +6472 'Ptr' 3 +6473 'Pub' 3 +6474 'Pur' 3 +6475 'Put' 3 +6476 'QUE' 3 +6477 'Qty' 3 +6478 'Que' 3 +6479 'Qui' 3 +6480 'RAD' 3 +6481 'RAL' 3 +6482 'RAM' 3 +6483 'RAN' 3 +6484 'RAW' 3 +6485 'RAY' 3 +6486 'REC' 3 +6487 'RED' 3 +6488 'REE' 3 +6489 'REF' 3 +6490 'REG' 3 +6491 'REL' 3 +6492 'REM' 3 +6493 'REN' 3 +6494 'REP' 3 +6495 'REQ' 3 +6496 'RES' 3 +6497 'RET' 3 +6498 'RFC' 3 +6499 'RGB' 3 +6500 'RIC' 3 +6501 'RIX' 3 +6502 'RMS' 3 +6503 'RNA' 3 +6504 'RNN' 3 +6505 'ROC' 3 +6506 'ROI' 3 +6507 'ROL' 3 +6508 'ROM' 3 +6509 'RON' 3 +6510 'ROP' 3 +6511 'ROS' 3 +6512 'ROT' 3 +6513 'ROW' 3 +6514 'RPC' 3 +6515 'RSA' 3 +6516 'RSS' 3 +6517 'RTC' 3 +6518 'RUN' 3 +6519 'Rab' 3 +6520 'Rad' 3 +6521 'Ram' 3 +6522 'Rat' 3 +6523 'Raw' 3 +6524 'Ray' 3 +6525 'Rec' 3 +6526 'Red' 3 +6527 'Ref' 3 +6528 'Reg' 3 +6529 'Rel' 3 +6530 'Rem' 3 +6531 'Ren' 3 +6532 'Rep' 3 +6533 'Req' 3 +6534 'Res' 3 +6535 'Ret' 3 +6536 'Rev' 3 +6537 'Rew' 3 +6538 'Ric' 3 +6539 'Rob' 3 +6540 'Rod' 3 +6541 'Rol' 3 +6542 'Rom' 3 +6543 'Ron' 3 +6544 'Ros' 3 +6545 'Rot' 3 +6546 'Row' 3 +6547 'Roy' 3 +6548 'Rub' 3 +6549 'Run' 3 +6550 'Ré' 3 +6551 'SAM' 3 +6552 'SAN' 3 +6553 'SAT' 3 +6554 'SCH' 3 +6555 'SCI' 3 +6556 'SCO' 3 +6557 'SCR' 3 +6558 'SDK' 3 +6559 'SDL' 3 +6560 'SEC' 3 +6561 'SED' 3 +6562 'SEE' 3 +6563 'SEG' 3 +6564 'SEL' 3 +6565 'SEM' 3 +6566 'SEP' 3 +6567 'SEQ' 3 +6568 'SER' 3 +6569 'SET' 3 +6570 'SHA' 3 +6571 'SID' 3 +6572 'SIG' 3 +6573 'SIM' 3 +6574 'SMS' 3 +6575 'SNP' 3 +6576 'SOC' 3 +6577 'SOL' 3 +6578 'SON' 3 +6579 'SPE' 3 +6580 'SPI' 3 +6581 'SQL' 3 +6582 'SRC' 3 +6583 'SSH' 3 +6584 'SSL' 3 +6585 'STA' 3 +6586 'STD' 3 +6587 'STE' 3 +6588 'STM' 3 +6589 'STR' 3 +6590 'STS' 3 +6591 'SUB' 3 +6592 'SUM' 3 +6593 'SUP' 3 +6594 'SUR' 3 +6595 'SVG' 3 +6596 'SYS' 3 +6597 'Sab' 3 +6598 'Sac' 3 +6599 'Sad' 3 +6600 'Saf' 3 +6601 'Sal' 3 +6602 'Sam' 3 +6603 'San' 3 +6604 'Sar' 3 +6605 'Sat' 3 +6606 'Sav' 3 +6607 'Say' 3 +6608 'Sch' 3 +6609 'Sci' 3 +6610 'Sdk' 3 +6611 'Sea' 3 +6612 'Sec' 3 +6613 'See' 3 +6614 'Seg' 3 +6615 'Sel' 3 +6616 'Sem' 3 +6617 'Sen' 3 +6618 'Sep' 3 +6619 'Seq' 3 +6620 'Ser' 3 +6621 'Set' 3 +6622 'Sex' 3 +6623 'Sha' 3 +6624 'She' 3 +6625 'Sid' 3 +6626 'Sig' 3 +6627 'Sil' 3 +6628 'Sim' 3 +6629 'Sin' 3 +6630 'Sir' 3 +6631 'Sit' 3 +6632 'Six' 3 +6633 'Sky' 3 +6634 'Soc' 3 +6635 'Sol' 3 +6636 'Son' 3 +6637 'Sou' 3 +6638 'Spe' 3 +6639 'Spl' 3 +6640 'Spr' 3 +6641 'Spy' 3 +6642 'Sql' 3 +6643 'Squ' 3 +6644 'Src' 3 +6645 'Sta' 3 +6646 'Std' 3 +6647 'Ste' 3 +6648 'Sto' 3 +6649 'Str' 3 +6650 'Sty' 3 +6651 'Sub' 3 +6652 'Suc' 3 +6653 'Sud' 3 +6654 'Sum' 3 +6655 'Sun' 3 +6656 'Sup' 3 +6657 'Sur' 3 +6658 'Sus' 3 +6659 'Sym' 3 +6660 'Syn' 3 +6661 'Sys' 3 +6662 'TAB' 3 +6663 'TAG' 3 +6664 'TCP' 3 +6665 'TED' 3 +6666 'TEM' 3 +6667 'TER' 3 +6668 'TES' 3 +6669 'TEX' 3 +6670 'THE' 3 +6671 'TIM' 3 +6672 'TLS' 3 +6673 'TMP' 3 +6674 'TON' 3 +6675 'TOP' 3 +6676 'TOR' 3 +6677 'TRA' 3 +6678 'TRY' 3 +6679 'Tab' 3 +6680 'Tag' 3 +6681 'Tai' 3 +6682 'Tak' 3 +6683 'Tal' 3 +6684 'Tam' 3 +6685 'Tan' 3 +6686 'Tap' 3 +6687 'Tar' 3 +6688 'Tax' 3 +6689 'TeV' 3 +6690 'TeX' 3 +6691 'Ted' 3 +6692 'Tek' 3 +6693 'Tel' 3 +6694 'Tem' 3 +6695 'Ten' 3 +6696 'Ter' 3 +6697 'Tes' 3 +6698 'Tex' 3 +6699 'The' 3 +6700 'Thu' 3 +6701 'Tim' 3 +6702 'Tip' 3 +6703 'Tit' 3 +6704 'Tmp' 3 +6705 'Tok' 3 +6706 'Tom' 3 +6707 'Ton' 3 +6708 'Too' 3 +6709 'Top' 3 +6710 'Tor' 3 +6711 'Tot' 3 +6712 'Toy' 3 +6713 'Tra' 3 +6714 'Tre' 3 +6715 'Tri' 3 +6716 'Tro' 3 +6717 'Try' 3 +6718 'Tue' 3 +6719 'Tur' 3 +6720 'Two' 3 +6721 'Txt' 3 +6722 'Typ' 3 +6723 'UAL' 3 +6724 'UCK' 3 +6725 'UCT' 3 +6726 'UDP' 3 +6727 'UES' 3 +6728 'UFF' 3 +6729 'UGH' 3 +6730 'UID' 3 +6731 'UIT' 3 +6732 'ULD' 3 +6733 'ULE' 3 +6734 'ULL' 3 +6735 'ULT' 3 +6736 'UME' 3 +6737 'UMN' 3 +6738 'UMP' 3 +6739 'UNC' 3 +6740 'UND' 3 +6741 'UNE' 3 +6742 'UNK' 3 +6743 'UNT' 3 +6744 'URA' 3 +6745 'URE' 3 +6746 'URI' 3 +6747 'URL' 3 +6748 'URN' 3 +6749 'URS' 3 +6750 'USA' 3 +6751 'USB' 3 +6752 'USD' 3 +6753 'USE' 3 +6754 'USH' 3 +6755 'USS' 3 +6756 'UST' 3 +6757 'UTC' 3 +6758 'UTE' 3 +6759 'UTF' 3 +6760 'UTH' 3 +6761 'Uid' 3 +6762 'Ult' 3 +6763 'Und' 3 +6764 'Uni' 3 +6765 'Uns' 3 +6766 'Uri' 3 +6767 'Url' 3 +6768 'Use' 3 +6769 'Usu' 3 +6770 'VAL' 3 +6771 'VAR' 3 +6772 'VED' 3 +6773 'VEL' 3 +6774 'VEN' 3 +6775 'VER' 3 +6776 'VES' 3 +6777 'VIC' 3 +6778 'VID' 3 +6779 'VIE' 3 +6780 'VII' 3 +6781 'VIS' 3 +6782 'VOL' 3 +6783 'VPN' 3 +6784 'Vac' 3 +6785 'Val' 3 +6786 'Van' 3 +6787 'Var' 3 +6788 'Vec' 3 +6789 'Vel' 3 +6790 'Ven' 3 +6791 'Ver' 3 +6792 'Via' 3 +6793 'Vin' 3 +6794 'Vir' 3 +6795 'Vis' 3 +6796 'Vol' 3 +6797 'WAR' 3 +6798 'WAY' 3 +6799 'WEB' 3 +6800 'WER' 3 +6801 'WHO' 3 +6802 'WID' 3 +6803 'WIN' 3 +6804 'WOR' 3 +6805 'Wal' 3 +6806 'War' 3 +6807 'Was' 3 +6808 'Wat' 3 +6809 'Way' 3 +6810 'Web' 3 +6811 'Wed' 3 +6812 'Wel' 3 +6813 'Who' 3 +6814 'Why' 3 +6815 'Wik' 3 +6816 'Wil' 3 +6817 'Win' 3 +6818 'Wol' 3 +6819 'Won' 3 +6820 'Wow' 3 +6821 'XML' 3 +6822 'XXX' 3 +6823 'XYZ' 3 +6824 'Xiv' 3 +6825 'Xml' 3 +6826 'YES' 3 +6827 'YLE' 3 +6828 'YOU' 3 +6829 'YPE' 3 +6830 'YYY' 3 +6831 'Yes' 3 +6832 'Yet' 3 +6833 'You' 3 +6834 'ZIP' 3 +6835 'Zen' 3 +6836 'Zip' 3 +6837 "['_" 3 +6838 '[:,' 3 +6839 '[:-' 3 +6840 '[:]' 3 +6841 "[['" 3 +6842 '[])' 3 +6843 '[],' 3 +6844 '[]{' 3 +6845 '\\""' 3 +6846 '\\",' 3 +6847 '\\":' 3 +6848 '\\">' 3 +6849 '\\\\\\' 3 +6850 '\\}$' 3 +6851 ']")' 3 +6852 ']",' 3 +6853 "]'," 3 +6854 '](#' 3 +6855 ']))' 3 +6856 ']),' 3 +6857 ']).' 3 +6858 ']):' 3 +6859 ']);' 3 +6860 '],[' 3 +6861 ']->' 3 +6862 '].[' 3 +6863 ']="' 3 +6864 ']["' 3 +6865 "]['" 3 +6866 ']\\\\' 3 +6867 ']])' 3 +6868 ']],' 3 +6869 ']];' 3 +6870 ']},' 3 +6871 '^{+' 3 +6872 '^{-' 3 +6873 '^{\\' 3 +6874 '_("' 3 +6875 "_('" 3 +6876 '_->' 3 +6877 '__(' 3 +6878 '__)' 3 +6879 '__,' 3 +6880 '__.' 3 +6881 '___' 3 +6882 '_{\\' 3 +6883 '`).' 3 +6884 '```' 3 +6885 'aaa' 3 +6886 'aab' 3 +6887 'aan' 3 +6888 'aar' 3 +6889 'aba' 3 +6890 'abb' 3 +6891 'abc' 3 +6892 'abd' 3 +6893 'abe' 3 +6894 'abi' 3 +6895 'abl' 3 +6896 'abo' 3 +6897 'abr' 3 +6898 'abs' 3 +6899 'aby' 3 +6900 'aca' 3 +6901 'acc' 3 +6902 'ace' 3 +6903 'ach' 3 +6904 'aci' 3 +6905 'ack' 3 +6906 'acl' 3 +6907 'aco' 3 +6908 'acs' 3 +6909 'act' 3 +6910 'acy' 3 +6911 'ada' 3 +6912 'adb' 3 +6913 'add' 3 +6914 'ade' 3 +6915 'adh' 3 +6916 'adi' 3 +6917 'adj' 3 +6918 'adm' 3 +6919 'ado' 3 +6920 'adr' 3 +6921 'ads' 3 +6922 'adt' 3 +6923 'adu' 3 +6924 'adv' 3 +6925 'ady' 3 +6926 'aea' 3 +6927 'ael' 3 +6928 'aes' 3 +6929 'afa' 3 +6930 'afe' 3 +6931 'aff' 3 +6932 'afi' 3 +6933 'aft' 3 +6934 'aga' 3 +6935 'age' 3 +6936 'agg' 3 +6937 'agh' 3 +6938 'agi' 3 +6939 'agn' 3 +6940 'ago' 3 +6941 'agr' 3 +6942 'ags' 3 +6943 'agt' 3 +6944 'agu' 3 +6945 'agy' 3 +6946 'aha' 3 +6947 'ahi' 3 +6948 'ahl' 3 +6949 'ahn' 3 +6950 'aho' 3 +6951 'ahr' 3 +6952 'ahu' 3 +6953 'aic' 3 +6954 'aid' 3 +6955 'ail' 3 +6956 'aim' 3 +6957 'ain' 3 +6958 'air' 3 +6959 'ais' 3 +6960 'ait' 3 +6961 'aja' 3 +6962 'aje' 3 +6963 'aji' 3 +6964 'ajo' 3 +6965 'aju' 3 +6966 'aka' 3 +6967 'ake' 3 +6968 'akh' 3 +6969 'aki' 3 +6970 'akk' 3 +6971 'ako' 3 +6972 'aks' 3 +6973 'akt' 3 +6974 'aku' 3 +6975 'aky' 3 +6976 'ala' 3 +6977 'alc' 3 +6978 'ald' 3 +6979 'ale' 3 +6980 'alf' 3 +6981 'alg' 3 +6982 'ali' 3 +6983 'alk' 3 +6984 'all' 3 +6985 'alm' 3 +6986 'alo' 3 +6987 'als' 3 +6988 'alt' 3 +6989 'alu' 3 +6990 'aly' 3 +6991 'ama' 3 +6992 'amb' 3 +6993 'amd' 3 +6994 'ame' 3 +6995 'ami' 3 +6996 'aml' 3 +6997 'amm' 3 +6998 'amo' 3 +6999 'amp' 3 +7000 'ams' 3 +7001 'amt' 3 +7002 'amy' 3 +7003 'ana' 3 +7004 'anc' 3 +7005 'and' 3 +7006 'ane' 3 +7007 'ang' 3 +7008 'anh' 3 +7009 'ani' 3 +7010 'anj' 3 +7011 'ank' 3 +7012 'ann' 3 +7013 'ano' 3 +7014 'ans' 3 +7015 'ant' 3 +7016 'anu' 3 +7017 'any' 3 +7018 'anz' 3 +7019 'aos' 3 +7020 'apa' 3 +7021 'ape' 3 +7022 'aph' 3 +7023 'api' 3 +7024 'apk' 3 +7025 'apo' 3 +7026 'app' 3 +7027 'apr' 3 +7028 'aps' 3 +7029 'apt' 3 +7030 'apy' 3 +7031 'aqu' 3 +7032 'ara' 3 +7033 'arb' 3 +7034 'arc' 3 +7035 'ard' 3 +7036 'are' 3 +7037 'arf' 3 +7038 'arg' 3 +7039 'ari' 3 +7040 'ark' 3 +7041 'arl' 3 +7042 'arm' 3 +7043 'arn' 3 +7044 'aro' 3 +7045 'arp' 3 +7046 'arr' 3 +7047 'ars' 3 +7048 'art' 3 +7049 'aru' 3 +7050 'ary' 3 +7051 'asa' 3 +7052 'asc' 3 +7053 'ase' 3 +7054 'ash' 3 +7055 'asi' 3 +7056 'ask' 3 +7057 'asm' 3 +7058 'aso' 3 +7059 'asp' 3 +7060 'ass' 3 +7061 'ast' 3 +7062 'asu' 3 +7063 'asy' 3 +7064 'asz' 3 +7065 'ata' 3 +7066 'ate' 3 +7067 'ath' 3 +7068 'ati' 3 +7069 'atl' 3 +7070 'ato' 3 +7071 'atr' 3 +7072 'ats' 3 +7073 'att' 3 +7074 'atu' 3 +7075 'aty' 3 +7076 'atz' 3 +7077 'auc' 3 +7078 'aud' 3 +7079 'auf' 3 +7080 'aug' 3 +7081 'aul' 3 +7082 'aur' 3 +7083 'aus' 3 +7084 'aut' 3 +7085 'aux' 3 +7086 'ava' 3 +7087 'ave' 3 +7088 'avg' 3 +7089 'avi' 3 +7090 'avo' 3 +7091 'avy' 3 +7092 'awa' 3 +7093 'awi' 3 +7094 'awk' 3 +7095 'awn' 3 +7096 'aws' 3 +7097 'awt' 3 +7098 'axe' 3 +7099 'axy' 3 +7100 'aya' 3 +7101 'aye' 3 +7102 'ays' 3 +7103 'aza' 3 +7104 'aze' 3 +7105 'azi' 3 +7106 'azo' 3 +7107 'azu' 3 +7108 'azy' 3 +7109 'azz' 3 +7110 'añ' 3 +7111 'ać' 3 +7112 'ał' 3 +7113 'aż' 3 +7114 'bab' 3 +7115 'bac' 3 +7116 'bad' 3 +7117 'bag' 3 +7118 'bah' 3 +7119 'bai' 3 +7120 'bak' 3 +7121 'bal' 3 +7122 'bam' 3 +7123 'ban' 3 +7124 'bar' 3 +7125 'bas' 3 +7126 'bat' 3 +7127 'bau' 3 +7128 'bay' 3 +7129 'baz' 3 +7130 'bbc' 3 +7131 'bbe' 3 +7132 'bdd' 3 +7133 'bec' 3 +7134 'bed' 3 +7135 'bee' 3 +7136 'bef' 3 +7137 'beg' 3 +7138 'beh' 3 +7139 'bei' 3 +7140 'bek' 3 +7141 'bel' 3 +7142 'ben' 3 +7143 'ber' 3 +7144 'bes' 3 +7145 'bet' 3 +7146 'bey' 3 +7147 'bfd' 3 +7148 'bia' 3 +7149 'bib' 3 +7150 'bic' 3 +7151 'bid' 3 +7152 'bie' 3 +7153 'big' 3 +7154 'bil' 3 +7155 'bin' 3 +7156 'bio' 3 +7157 'bir' 3 +7158 'bis' 3 +7159 'bit' 3 +7160 'biz' 3 +7161 'bla' 3 +7162 'ble' 3 +7163 'blk' 3 +7164 'blo' 3 +7165 'blr' 3 +7166 'bly' 3 +7167 'bmp' 3 +7168 'bnb' 3 +7169 'boa' 3 +7170 'bob' 3 +7171 'bol' 3 +7172 'bon' 3 +7173 'boo' 3 +7174 'bor' 3 +7175 'bos' 3 +7176 'bot' 3 +7177 'bow' 3 +7178 'box' 3 +7179 'boy' 3 +7180 'bps' 3 +7181 'bra' 3 +7182 'bre' 3 +7183 'bro' 3 +7184 'bru' 3 +7185 'bsd' 3 +7186 'bst' 3 +7187 'btn' 3 +7188 'bud' 3 +7189 'buf' 3 +7190 'bug' 3 +7191 'bul' 3 +7192 'bum' 3 +7193 'bur' 3 +7194 'bus' 3 +7195 'but' 3 +7196 'buy' 3 +7197 'bye' 3 +7198 'bys' 3 +7199 'bé' 3 +7200 'bü' 3 +7201 'bě' 3 +7202 'cab' 3 +7203 'cac' 3 +7204 'cad' 3 +7205 'cal' 3 +7206 'cam' 3 +7207 'can' 3 +7208 'cap' 3 +7209 'car' 3 +7210 'cas' 3 +7211 'cat' 3 +7212 'cca' 3 +7213 'ccc' 3 +7214 'cci' 3 +7215 'cco' 3 +7216 'cdf' 3 +7217 'cdn' 3 +7218 'cea' 3 +7219 'ced' 3 +7220 'cel' 3 +7221 'cem' 3 +7222 'cen' 3 +7223 'cep' 3 +7224 'cer' 3 +7225 'ces' 3 +7226 'ceu' 3 +7227 'cfg' 3 +7228 'cgi' 3 +7229 'cha' 3 +7230 'che' 3 +7231 'chi' 3 +7232 'chk' 3 +7233 'chl' 3 +7234 'chn' 3 +7235 'cho' 3 +7236 'chr' 3 +7237 'chs' 3 +7238 'cht' 3 +7239 'chu' 3 +7240 'chy' 3 +7241 'cia' 3 +7242 'cid' 3 +7243 'cie' 3 +7244 'cig' 3 +7245 'cii' 3 +7246 'cil' 3 +7247 'cin' 3 +7248 'cio' 3 +7249 'cip' 3 +7250 'cir' 3 +7251 'cis' 3 +7252 'cit' 3 +7253 'cke' 3 +7254 'cki' 3 +7255 'cko' 3 +7256 'cks' 3 +7257 'cla' 3 +7258 'cle' 3 +7259 'clf' 3 +7260 'cli' 3 +7261 'clk' 3 +7262 'clo' 3 +7263 'cls' 3 +7264 'cmb' 3 +7265 'cmd' 3 +7266 'cmp' 3 +7267 'cms' 3 +7268 'cnt' 3 +7269 'cod' 3 +7270 'coe' 3 +7271 'col' 3 +7272 'com' 3 +7273 'con' 3 +7274 'cop' 3 +7275 'cor' 3 +7276 'cos' 3 +7277 'cot' 3 +7278 'cou' 3 +7279 'cov' 3 +7280 'cow' 3 +7281 'cox' 3 +7282 'cpp' 3 +7283 'cpu' 3 +7284 'cpy' 3 +7285 'cra' 3 +7286 'crc' 3 +7287 'cre' 3 +7288 'cri' 3 +7289 'cro' 3 +7290 'cru' 3 +7291 'cry' 3 +7292 'csr' 3 +7293 'css' 3 +7294 'csv' 3 +7295 'cta' 3 +7296 'ctl' 3 +7297 'ctr' 3 +7298 'ctu' 3 +7299 'ctx' 3 +7300 'cub' 3 +7301 'cue' 3 +7302 'cul' 3 +7303 'cum' 3 +7304 'cup' 3 +7305 'cur' 3 +7306 'cus' 3 +7307 'cut' 3 +7308 'cwd' 3 +7309 'czy' 3 +7310 'cé' 3 +7311 'cí' 3 +7312 'dac' 3 +7313 'dad' 3 +7314 'dag' 3 +7315 'dal' 3 +7316 'dam' 3 +7317 'dan' 3 +7318 'dao' 3 +7319 'dap' 3 +7320 'dar' 3 +7321 'das' 3 +7322 'dat' 3 +7323 'dav' 3 +7324 'day' 3 +7325 'dbc' 3 +7326 'dbg' 3 +7327 'dbl' 3 +7328 'ddd' 3 +7329 'dea' 3 +7330 'deb' 3 +7331 'dec' 3 +7332 'ded' 3 +7333 'dee' 3 +7334 'def' 3 +7335 'deg' 3 +7336 'dek' 3 +7337 'del' 3 +7338 'dem' 3 +7339 'den' 3 +7340 'dep' 3 +7341 'der' 3 +7342 'des' 3 +7343 'det' 3 +7344 'dev' 3 +7345 'dex' 3 +7346 'dez' 3 +7347 'dfs' 3 +7348 'dia' 3 +7349 'dic' 3 +7350 'did' 3 +7351 'die' 3 +7352 'dif' 3 +7353 'dig' 3 +7354 'dil' 3 +7355 'dim' 3 +7356 'din' 3 +7357 'dio' 3 +7358 'dip' 3 +7359 'dir' 3 +7360 'dis' 3 +7361 'dit' 3 +7362 'div' 3 +7363 'dle' 3 +7364 'dll' 3 +7365 'dna' 3 +7366 'dob' 3 +7367 'doc' 3 +7368 'dof' 3 +7369 'dog' 3 +7370 'doi' 3 +7371 'dol' 3 +7372 'dom' 3 +7373 'don' 3 +7374 'dor' 3 +7375 'dos' 3 +7376 'dot' 3 +7377 'dou' 3 +7378 'dpi' 3 +7379 'dra' 3 +7380 'dre' 3 +7381 'dri' 3 +7382 'dro' 3 +7383 'drv' 3 +7384 'dry' 3 +7385 'dst' 3 +7386 'dtd' 3 +7387 'duc' 3 +7388 'due' 3 +7389 'dup' 3 +7390 'dur' 3 +7391 'dyn' 3 +7392 'ead' 3 +7393 'eah' 3 +7394 'ean' 3 +7395 'ear' 3 +7396 'eas' 3 +7397 'eat' 3 +7398 'eau' 3 +7399 'eba' 3 +7400 'ebb' 3 +7401 'eca' 3 +7402 'ecc' 3 +7403 'ecd' 3 +7404 'ece' 3 +7405 'ech' 3 +7406 'eck' 3 +7407 'ecl' 3 +7408 'eco' 3 +7409 'ecs' 3 +7410 'ect' 3 +7411 'eda' 3 +7412 'edd' 3 +7413 'ede' 3 +7414 'edi' 3 +7415 'edo' 3 +7416 'eds' 3 +7417 'edu' 3 +7418 'edy' 3 +7419 'eed' 3 +7420 'een' 3 +7421 'eer' 3 +7422 'ees' 3 +7423 'efe' 3 +7424 'eff' 3 +7425 'eft' 3 +7426 'ega' 3 +7427 'egg' 3 +7428 'ego' 3 +7429 'egr' 3 +7430 'egu' 3 +7431 'eil' 3 +7432 'ein' 3 +7433 'eka' 3 +7434 'eki' 3 +7435 'eks' 3 +7436 'ekt' 3 +7437 'ela' 3 +7438 'eld' 3 +7439 'ele' 3 +7440 'elf' 3 +7441 'eli' 3 +7442 'ell' 3 +7443 'elm' 3 +7444 'eln' 3 +7445 'elo' 3 +7446 'elp' 3 +7447 'els' 3 +7448 'elt' 3 +7449 'elu' 3 +7450 'ely' 3 +7451 'ema' 3 +7452 'emb' 3 +7453 'eme' 3 +7454 'emi' 3 +7455 'emn' 3 +7456 'emo' 3 +7457 'emp' 3 +7458 'ems' 3 +7459 'emu' 3 +7460 'emy' 3 +7461 'ena' 3 +7462 'enc' 3 +7463 'end' 3 +7464 'ene' 3 +7465 'enf' 3 +7466 'eng' 3 +7467 'enh' 3 +7468 'eni' 3 +7469 'enk' 3 +7470 'enn' 3 +7471 'eno' 3 +7472 'ens' 3 +7473 'ent' 3 +7474 'enu' 3 +7475 'env' 3 +7476 'eny' 3 +7477 'enz' 3 +7478 'eof' 3 +7479 'eon' 3 +7480 'eor' 3 +7481 'eph' 3 +7482 'epi' 3 +7483 'eps' 3 +7484 'ept' 3 +7485 'eqn' 3 +7486 'equ' 3 +7487 'era' 3 +7488 'erb' 3 +7489 'erc' 3 +7490 'erd' 3 +7491 'ere' 3 +7492 'erg' 3 +7493 'eri' 3 +7494 'erk' 3 +7495 'erm' 3 +7496 'ern' 3 +7497 'ero' 3 +7498 'erp' 3 +7499 'err' 3 +7500 'ers' 3 +7501 'ert' 3 +7502 'erv' 3 +7503 'ery' 3 +7504 'esa' 3 +7505 'esc' 3 +7506 'ese' 3 +7507 'esh' 3 +7508 'esi' 3 +7509 'esk' 3 +7510 'eso' 3 +7511 'esp' 3 +7512 'ess' 3 +7513 'est' 3 +7514 'esy' 3 +7515 'eta' 3 +7516 'etc' 3 +7517 'ete' 3 +7518 'eth' 3 +7519 'eti' 3 +7520 'eto' 3 +7521 'etr' 3 +7522 'ets' 3 +7523 'ett' 3 +7524 'etu' 3 +7525 'ety' 3 +7526 'etz' 3 +7527 'eur' 3 +7528 'eus' 3 +7529 'eva' 3 +7530 'eve' 3 +7531 'evt' 3 +7532 'ews' 3 +7533 'exc' 3 +7534 'exe' 3 +7535 'exp' 3 +7536 'ext' 3 +7537 'eye' 3 +7538 'fab' 3 +7539 'fac' 3 +7540 'fal' 3 +7541 'fan' 3 +7542 'far' 3 +7543 'fas' 3 +7544 'fat' 3 +7545 'fav' 3 +7546 'fax' 3 +7547 'feb' 3 +7548 'fed' 3 +7549 'fee' 3 +7550 'fel' 3 +7551 'fem' 3 +7552 'fen' 3 +7553 'fer' 3 +7554 'fet' 3 +7555 'few' 3 +7556 'ffe' 3 +7557 'fff' 3 +7558 'ffi' 3 +7559 'fft' 3 +7560 'fib' 3 +7561 'fic' 3 +7562 'fid' 3 +7563 'fif' 3 +7564 'fig' 3 +7565 'fil' 3 +7566 'fin' 3 +7567 'fir' 3 +7568 'fit' 3 +7569 'fix' 3 +7570 'fld' 3 +7571 'fle' 3 +7572 'flo' 3 +7573 'flu' 3 +7574 'fly' 3 +7575 'fmt' 3 +7576 'fol' 3 +7577 'fon' 3 +7578 'foo' 3 +7579 'for' 3 +7580 'fos' 3 +7581 'fox' 3 +7582 'fra' 3 +7583 'fre' 3 +7584 'fri' 3 +7585 'frm' 3 +7586 'fro' 3 +7587 'fst' 3 +7588 'fte' 3 +7589 'ftp' 3 +7590 'fts' 3 +7591 'fty' 3 +7592 'ful' 3 +7593 'fun' 3 +7594 'fur' 3 +7595 'fut' 3 +7596 'fé' 3 +7597 'fø' 3 +7598 'fü' 3 +7599 'gae' 3 +7600 'gal' 3 +7601 'gam' 3 +7602 'gan' 3 +7603 'gap' 3 +7604 'gar' 3 +7605 'gas' 3 +7606 'gat' 3 +7607 'gay' 3 +7608 'gca' 3 +7609 'gcc' 3 +7610 'gcd' 3 +7611 'geb' 3 +7612 'ged' 3 +7613 'geh' 3 +7614 'gel' 3 +7615 'gem' 3 +7616 'gen' 3 +7617 'geo' 3 +7618 'geq' 3 +7619 'ger' 3 +7620 'ges' 3 +7621 'get' 3 +7622 'gew' 3 +7623 'gex' 3 +7624 'ght' 3 +7625 'gia' 3 +7626 'gid' 3 +7627 'gie' 3 +7628 'gif' 3 +7629 'gil' 3 +7630 'gin' 3 +7631 'gio' 3 +7632 'gis' 3 +7633 'git' 3 +7634 'gle' 3 +7635 'gly' 3 +7636 'gmt' 3 +7637 'gnu' 3 +7638 'god' 3 +7639 'gol' 3 +7640 'gom' 3 +7641 'gon' 3 +7642 'gor' 3 +7643 'gos' 3 +7644 'got' 3 +7645 'gov' 3 +7646 'gow' 3 +7647 'gpu' 3 +7648 'gra' 3 +7649 'gre' 3 +7650 'gro' 3 +7651 'grp' 3 +7652 'gru' 3 +7653 'gte' 3 +7654 'gtk' 3 +7655 'gua' 3 +7656 'gue' 3 +7657 'gui' 3 +7658 'gun' 3 +7659 'gut' 3 +7660 'hab' 3 +7661 'had' 3 +7662 'hai' 3 +7663 'hal' 3 +7664 'ham' 3 +7665 'han' 3 +7666 'hao' 3 +7667 'hap' 3 +7668 'har' 3 +7669 'has' 3 +7670 'hat' 3 +7671 'hav' 3 +7672 'haw' 3 +7673 'hay' 3 +7674 'haz' 3 +7675 'hdr' 3 +7676 'hea' 3 +7677 'hed' 3 +7678 'hee' 3 +7679 'hei' 3 +7680 'hel' 3 +7681 'hem' 3 +7682 'hen' 3 +7683 'hep' 3 +7684 'her' 3 +7685 'hes' 3 +7686 'het' 3 +7687 'hev' 3 +7688 'hew' 3 +7689 'hex' 3 +7690 'hey' 3 +7691 'hib' 3 +7692 'hic' 3 +7693 'hid' 3 +7694 'hig' 3 +7695 'hil' 3 +7696 'him' 3 +7697 'hin' 3 +7698 'hip' 3 +7699 'hir' 3 +7700 'his' 3 +7701 'hit' 3 +7702 'hma' 3 +7703 'hoc' 3 +7704 'hod' 3 +7705 'hoe' 3 +7706 'hof' 3 +7707 'hog' 3 +7708 'hol' 3 +7709 'hom' 3 +7710 'hon' 3 +7711 'hop' 3 +7712 'hor' 3 +7713 'hos' 3 +7714 'hot' 3 +7715 'hou' 3 +7716 'hov' 3 +7717 'how' 3 +7718 'hpp' 3 +7719 'hra' 3 +7720 'hta' 3 +7721 'hti' 3 +7722 'htm' 3 +7723 'htt' 3 +7724 'hua' 3 +7725 'hub' 3 +7726 'hue' 3 +7727 'hui' 3 +7728 'hum' 3 +7729 'hur' 3 +7730 'hus' 3 +7731 'hyd' 3 +7732 'hyp' 3 +7733 'há' 3 +7734 'hã' 3 +7735 'hä' 3 +7736 'hé' 3 +7737 'hö' 3 +7738 'iOS' 3 +7739 'iac' 3 +7740 'iae' 3 +7741 'iah' 3 +7742 'iak' 3 +7743 'ial' 3 +7744 'iam' 3 +7745 'ian' 3 +7746 'iao' 3 +7747 'iar' 3 +7748 'ias' 3 +7749 'iat' 3 +7750 'iaz' 3 +7751 'iba' 3 +7752 'ibe' 3 +7753 'ibi' 3 +7754 'ibo' 3 +7755 'ibr' 3 +7756 'ibu' 3 +7757 'ica' 3 +7758 'icc' 3 +7759 'ice' 3 +7760 'ich' 3 +7761 'ici' 3 +7762 'ick' 3 +7763 'icl' 3 +7764 'ico' 3 +7765 'ics' 3 +7766 'ict' 3 +7767 'icy' 3 +7768 'icz' 3 +7769 'ida' 3 +7770 'idd' 3 +7771 'ide' 3 +7772 'idi' 3 +7773 'idl' 3 +7774 'ido' 3 +7775 'ids' 3 +7776 'idx' 3 +7777 'idy' 3 +7778 'iec' 3 +7779 'ied' 3 +7780 'ief' 3 +7781 'ieg' 3 +7782 'iei' 3 +7783 'iej' 3 +7784 'iek' 3 +7785 'iel' 3 +7786 'iem' 3 +7787 'ien' 3 +7788 'ier' 3 +7789 'ies' 3 +7790 'iet' 3 +7791 'ieu' 3 +7792 'iev' 3 +7793 'iew' 3 +7794 'iez' 3 +7795 'ifa' 3 +7796 'ife' 3 +7797 'iff' 3 +7798 'ifi' 3 +7799 'ifs' 3 +7800 'ift' 3 +7801 'ify' 3 +7802 'iga' 3 +7803 'ige' 3 +7804 'igg' 3 +7805 'igh' 3 +7806 'igi' 3 +7807 'igl' 3 +7808 'igm' 3 +7809 'ign' 3 +7810 'igo' 3 +7811 'igr' 3 +7812 'igs' 3 +7813 'igt' 3 +7814 'igu' 3 +7815 'iii' 3 +7816 'ija' 3 +7817 'ije' 3 +7818 'iji' 3 +7819 'ijk' 3 +7820 'ijn' 3 +7821 'ijo' 3 +7822 'iju' 3 +7823 'ika' 3 +7824 'ike' 3 +7825 'ikh' 3 +7826 'iki' 3 +7827 'ikk' 3 +7828 'iko' 3 +7829 'iks' 3 +7830 'ikt' 3 +7831 'iku' 3 +7832 'ila' 3 +7833 'ild' 3 +7834 'ile' 3 +7835 'ili' 3 +7836 'ilk' 3 +7837 'ill' 3 +7838 'ilo' 3 +7839 'ils' 3 +7840 'ilt' 3 +7841 'ily' 3 +7842 'ima' 3 +7843 'imb' 3 +7844 'ime' 3 +7845 'img' 3 +7846 'imi' 3 +7847 'imm' 3 +7848 'imo' 3 +7849 'imp' 3 +7850 'ims' 3 +7851 'ina' 3 +7852 'inc' 3 +7853 'ind' 3 +7854 'ine' 3 +7855 'inf' 3 +7856 'ing' 3 +7857 'inh' 3 +7858 'ini' 3 +7859 'inj' 3 +7860 'ink' 3 +7861 'inn' 3 +7862 'ino' 3 +7863 'inp' 3 +7864 'ins' 3 +7865 'int' 3 +7866 'inu' 3 +7867 'inv' 3 +7868 'inx' 3 +7869 'iny' 3 +7870 'inz' 3 +7871 'iod' 3 +7872 'iol' 3 +7873 'iom' 3 +7874 'ion' 3 +7875 'iop' 3 +7876 'ior' 3 +7877 'ios' 3 +7878 'iot' 3 +7879 'iou' 3 +7880 'iov' 3 +7881 'iox' 3 +7882 'ipa' 3 +7883 'ipe' 3 +7884 'iph' 3 +7885 'ipl' 3 +7886 'ipo' 3 +7887 'ipp' 3 +7888 'ips' 3 +7889 'ipt' 3 +7890 'ipv' 3 +7891 'ipy' 3 +7892 'iqu' 3 +7893 'ira' 3 +7894 'irc' 3 +7895 'ird' 3 +7896 'ire' 3 +7897 'iri' 3 +7898 'irk' 3 +7899 'irl' 3 +7900 'irm' 3 +7901 'iro' 3 +7902 'irq' 3 +7903 'irs' 3 +7904 'irt' 3 +7905 'iry' 3 +7906 'isa' 3 +7907 'isc' 3 +7908 'isd' 3 +7909 'ise' 3 +7910 'isf' 3 +7911 'ish' 3 +7912 'isi' 3 +7913 'isk' 3 +7914 'isl' 3 +7915 'ism' 3 +7916 'iso' 3 +7917 'isp' 3 +7918 'iss' 3 +7919 'ist' 3 +7920 'isu' 3 +7921 'isy' 3 +7922 'isz' 3 +7923 'ita' 3 +7924 'ite' 3 +7925 'ith' 3 +7926 'iti' 3 +7927 'itm' 3 +7928 'ito' 3 +7929 'itr' 3 +7930 'its' 3 +7931 'itt' 3 +7932 'itu' 3 +7933 'ity' 3 +7934 'itz' 3 +7935 'ium' 3 +7936 'ius' 3 +7937 'iva' 3 +7938 'ive' 3 +7939 'ivi' 3 +7940 'ivo' 3 +7941 'ivy' 3 +7942 'ixa' 3 +7943 'ixo' 3 +7944 'iya' 3 +7945 'iza' 3 +7946 'ize' 3 +7947 'izi' 3 +7948 'izo' 3 +7949 'izu' 3 +7950 'izz' 3 +7951 'iß' 3 +7952 'ié' 3 +7953 'ië' 3 +7954 'ió' 3 +7955 'ią' 3 +7956 'ić' 3 +7957 'ič' 3 +7958 'ię' 3 +7959 'ił' 3 +7960 'iş' 3 +7961 'iš' 3 +7962 'jab' 3 +7963 'jac' 3 +7964 'jad' 3 +7965 'jah' 3 +7966 'jak' 3 +7967 'jal' 3 +7968 'jam' 3 +7969 'jan' 3 +7970 'jar' 3 +7971 'jas' 3 +7972 'jav' 3 +7973 'jax' 3 +7974 'jay' 3 +7975 'jdk' 3 +7976 'jee' 3 +7977 'jel' 3 +7978 'jem' 3 +7979 'jen' 3 +7980 'jer' 3 +7981 'jes' 3 +7982 'jet' 3 +7983 'jid' 3 +7984 'jin' 3 +7985 'jis' 3 +7986 'jit' 3 +7987 'job' 3 +7988 'jon' 3 +7989 'jor' 3 +7990 'jos' 3 +7991 'jou' 3 +7992 'joy' 3 +7993 'jpg' 3 +7994 'jsp' 3 +7995 'jud' 3 +7996 'jug' 3 +7997 'jul' 3 +7998 'jun' 3 +7999 'jur' 3 +8000 'jà' 3 +8001 'jä' 3 +8002 'jö' 3 +8003 'jø' 3 +8004 'ją' 3 +8005 'ję' 3 +8006 'kal' 3 +8007 'kan' 3 +8008 'kap' 3 +8009 'kar' 3 +8010 'kas' 3 +8011 'kat' 3 +8012 'ked' 3 +8013 'kee' 3 +8014 'keh' 3 +8015 'kel' 3 +8016 'ken' 3 +8017 'ker' 3 +8018 'kes' 3 +8019 'ket' 3 +8020 'key' 3 +8021 'kid' 3 +8022 'kie' 3 +8023 'kil' 3 +8024 'kim' 3 +8025 'kin' 3 +8026 'kip' 3 +8027 'kir' 3 +8028 'kit' 3 +8029 'kle' 3 +8030 'kok' 3 +8031 'kol' 3 +8032 'kom' 3 +8033 'kon' 3 +8034 'kop' 3 +8035 'kor' 3 +8036 'kos' 3 +8037 'kov' 3 +8038 'kow' 3 +8039 'ksi' 3 +8040 'kte' 3 +8041 'kun' 3 +8042 'kur' 3 +8043 'kus' 3 +8044 'ká' 3 +8045 'kä' 3 +8046 'ké' 3 +8047 'kö' 3 +8048 'ką' 3 +8049 'kę' 3 +8050 'lab' 3 +8051 'lac' 3 +8052 'lad' 3 +8053 'lag' 3 +8054 'lah' 3 +8055 'lam' 3 +8056 'lan' 3 +8057 'lap' 3 +8058 'lar' 3 +8059 'las' 3 +8060 'lat' 3 +8061 'lav' 3 +8062 'law' 3 +8063 'lay' 3 +8064 'lbl' 3 +8065 'lea' 3 +8066 'lec' 3 +8067 'led' 3 +8068 'lee' 3 +8069 'lef' 3 +8070 'leg' 3 +8071 'lei' 3 +8072 'lek' 3 +8073 'lem' 3 +8074 'len' 3 +8075 'lep' 3 +8076 'leq' 3 +8077 'ler' 3 +8078 'les' 3 +8079 'let' 3 +8080 'lev' 3 +8081 'lew' 3 +8082 'lex' 3 +8083 'ley' 3 +8084 'lez' 3 +8085 'lia' 3 +8086 'lib' 3 +8087 'lic' 3 +8088 'lid' 3 +8089 'lie' 3 +8090 'lif' 3 +8091 'lig' 3 +8092 'lij' 3 +8093 'lik' 3 +8094 'lim' 3 +8095 'lin' 3 +8096 'lio' 3 +8097 'lip' 3 +8098 'lis' 3 +8099 'lit' 3 +8100 'liv' 3 +8101 'lla' 3 +8102 'lle' 3 +8103 'lli' 3 +8104 'llo' 3 +8105 'lng' 3 +8106 'lob' 3 +8107 'loc' 3 +8108 'lod' 3 +8109 'loe' 3 +8110 'log' 3 +8111 'lon' 3 +8112 'loo' 3 +8113 'lop' 3 +8114 'lor' 3 +8115 'los' 3 +8116 'lot' 3 +8117 'lou' 3 +8118 'lov' 3 +8119 'low' 3 +8120 'loy' 3 +8121 'lst' 3 +8122 'lua' 3 +8123 'luc' 3 +8124 'lum' 3 +8125 'lun' 3 +8126 'lus' 3 +8127 'lut' 3 +8128 'lux' 3 +8129 'lvl' 3 +8130 'lyn' 3 +8131 'lys' 3 +8132 'là' 3 +8133 'lá' 3 +8134 'lä' 3 +8135 'lé' 3 +8136 'ló' 3 +8137 'lö' 3 +8138 'lą' 3 +8139 'lı' 3 +8140 'mAh' 3 +8141 'mac' 3 +8142 'mad' 3 +8143 'mag' 3 +8144 'mai' 3 +8145 'maj' 3 +8146 'mak' 3 +8147 'mal' 3 +8148 'man' 3 +8149 'map' 3 +8150 'mar' 3 +8151 'mas' 3 +8152 'mat' 3 +8153 'max' 3 +8154 'may' 3 +8155 'maz' 3 +8156 'mbH' 3 +8157 'med' 3 +8158 'meg' 3 +8159 'mek' 3 +8160 'mel' 3 +8161 'mem' 3 +8162 'men' 3 +8163 'mer' 3 +8164 'mes' 3 +8165 'met' 3 +8166 'mez' 3 +8167 'mgr' 3 +8168 'mia' 3 +8169 'mic' 3 +8170 'mid' 3 +8171 'mie' 3 +8172 'mil' 3 +8173 'mim' 3 +8174 'min' 3 +8175 'mir' 3 +8176 'mis' 3 +8177 'mit' 3 +8178 'mix' 3 +8179 'mma' 3 +8180 'mmm' 3 +8181 'mob' 3 +8182 'mod' 3 +8183 'mol' 3 +8184 'mom' 3 +8185 'mon' 3 +8186 'mor' 3 +8187 'mos' 3 +8188 'mot' 3 +8189 'mov' 3 +8190 'moz' 3 +8191 'mph' 3 +8192 'mpi' 3 +8193 'mpl' 3 +8194 'mse' 3 +8195 'msg' 3 +8196 'mud' 3 +8197 'mul' 3 +8198 'mun' 3 +8199 'mur' 3 +8200 'mus' 3 +8201 'mut' 3 +8202 'mux' 3 +8203 'mys' 3 +8204 'mé' 3 +8205 'nad' 3 +8206 'nah' 3 +8207 'nai' 3 +8208 'nak' 3 +8209 'nal' 3 +8210 'nam' 3 +8211 'nan' 3 +8212 'nap' 3 +8213 'nar' 3 +8214 'nas' 3 +8215 'nat' 3 +8216 'nav' 3 +8217 'nbr' 3 +8218 'nce' 3 +8219 'nda' 3 +8220 'nds' 3 +8221 'nea' 3 +8222 'ned' 3 +8223 'nee' 3 +8224 'neg' 3 +8225 'neh' 3 +8226 'nej' 3 +8227 'nek' 3 +8228 'nel' 3 +8229 'nem' 3 +8230 'nen' 3 +8231 'neo' 3 +8232 'neq' 3 +8233 'ner' 3 +8234 'nes' 3 +8235 'net' 3 +8236 'neu' 3 +8237 'new' 3 +8238 'nex' 3 +8239 'ney' 3 +8240 'nez' 3 +8241 'nia' 3 +8242 'nic' 3 +8243 'nie' 3 +8244 'nih' 3 +8245 'nik' 3 +8246 'nil' 3 +8247 'nim' 3 +8248 'nin' 3 +8249 'nio' 3 +8250 'nis' 3 +8251 'nit' 3 +8252 'nob' 3 +8253 'noc' 3 +8254 'nod' 3 +8255 'nom' 3 +8256 'non' 3 +8257 'nop' 3 +8258 'nor' 3 +8259 'nos' 3 +8260 'not' 3 +8261 'nou' 3 +8262 'nov' 3 +8263 'now' 3 +8264 'nox' 3 +8265 'npc' 3 +8266 'npm' 3 +8267 'npy' 3 +8268 'nth' 3 +8269 'num' 3 +8270 'nut' 3 +8271 'nya' 3 +8272 'ná' 3 +8273 'né' 3 +8274 'ní' 3 +8275 'ný' 3 +8276 'ną' 3 +8277 'nę' 3 +8278 'ně' 3 +8279 'oad' 3 +8280 'oba' 3 +8281 'obb' 3 +8282 'obe' 3 +8283 'obi' 3 +8284 'obj' 3 +8285 'obl' 3 +8286 'obo' 3 +8287 'obs' 3 +8288 'oby' 3 +8289 'oca' 3 +8290 'occ' 3 +8291 'oce' 3 +8292 'och' 3 +8293 'oci' 3 +8294 'ock' 3 +8295 'ocl' 3 +8296 'oco' 3 +8297 'ocr' 3 +8298 'ocs' 3 +8299 'oct' 3 +8300 'ocy' 3 +8301 'oda' 3 +8302 'odb' 3 +8303 'odd' 3 +8304 'ode' 3 +8305 'odi' 3 +8306 'odo' 3 +8307 'ods' 3 +8308 'ody' 3 +8309 'oen' 3 +8310 'oes' 3 +8311 'off' 3 +8312 'ofs' 3 +8313 'oft' 3 +8314 'oga' 3 +8315 'oge' 3 +8316 'ogg' 3 +8317 'ogh' 3 +8318 'ogi' 3 +8319 'ogl' 3 +8320 'ogn' 3 +8321 'ogo' 3 +8322 'ogr' 3 +8323 'ogs' 3 +8324 'ogy' 3 +8325 'ohl' 3 +8326 'ohn' 3 +8327 'oho' 3 +8328 'oid' 3 +8329 'oil' 3 +8330 'oin' 3 +8331 'oir' 3 +8332 'ois' 3 +8333 'oit' 3 +8334 'oka' 3 +8335 'oke' 3 +8336 'oki' 3 +8337 'oko' 3 +8338 'oks' 3 +8339 'oku' 3 +8340 'oky' 3 +8341 'ola' 3 +8342 'old' 3 +8343 'ole' 3 +8344 'olf' 3 +8345 'oli' 3 +8346 'olk' 3 +8347 'oll' 3 +8348 'oln' 3 +8349 'olo' 3 +8350 'ols' 3 +8351 'olt' 3 +8352 'olu' 3 +8353 'oly' 3 +8354 'oma' 3 +8355 'omb' 3 +8356 'ome' 3 +8357 'omi' 3 +8358 'omm' 3 +8359 'omo' 3 +8360 'omp' 3 +8361 'oms' 3 +8362 'omy' 3 +8363 'ona' 3 +8364 'onc' 3 +8365 'ond' 3 +8366 'one' 3 +8367 'ong' 3 +8368 'oni' 3 +8369 'onn' 3 +8370 'ono' 3 +8371 'ons' 3 +8372 'ont' 3 +8373 'ony' 3 +8374 'onz' 3 +8375 'ood' 3 +8376 'ook' 3 +8377 'ool' 3 +8378 'oom' 3 +8379 'oon' 3 +8380 'ooo' 3 +8381 'oop' 3 +8382 'oor' 3 +8383 'oot' 3 +8384 'opa' 3 +8385 'ope' 3 +8386 'opf' 3 +8387 'oph' 3 +8388 'opi' 3 +8389 'opl' 3 +8390 'opo' 3 +8391 'opp' 3 +8392 'ops' 3 +8393 'opt' 3 +8394 'opy' 3 +8395 'ora' 3 +8396 'orb' 3 +8397 'orc' 3 +8398 'ord' 3 +8399 'ore' 3 +8400 'orf' 3 +8401 'org' 3 +8402 'ori' 3 +8403 'ork' 3 +8404 'orm' 3 +8405 'orn' 3 +8406 'oro' 3 +8407 'orp' 3 +8408 'orr' 3 +8409 'ors' 3 +8410 'ort' 3 +8411 'oru' 3 +8412 'ory' 3 +8413 'osa' 3 +8414 'osc' 3 +8415 'ose' 3 +8416 'osh' 3 +8417 'osi' 3 +8418 'oso' 3 +8419 'osp' 3 +8420 'oss' 3 +8421 'ost' 3 +8422 'ota' 3 +8423 'ote' 3 +8424 'oth' 3 +8425 'oti' 3 +8426 'oto' 3 +8427 'ots' 3 +8428 'ott' 3 +8429 'oty' 3 +8430 'oub' 3 +8431 'oud' 3 +8432 'oug' 3 +8433 'oui' 3 +8434 'ouk' 3 +8435 'oul' 3 +8436 'oun' 3 +8437 'oup' 3 +8438 'our' 3 +8439 'ous' 3 +8440 'out' 3 +8441 'ouv' 3 +8442 'oux' 3 +8443 'ova' 3 +8444 'ove' 3 +8445 'ovi' 3 +8446 'ovo' 3 +8447 'ovy' 3 +8448 'owa' 3 +8449 'owe' 3 +8450 'owi' 3 +8451 'owl' 3 +8452 'own' 3 +8453 'owo' 3 +8454 'ows' 3 +8455 'owy' 3 +8456 'oxy' 3 +8457 'oya' 3 +8458 'oyo' 3 +8459 'ozo' 3 +8460 'ozy' 3 +8461 'oł' 3 +8462 'pac' 3 +8463 'pad' 3 +8464 'pag' 3 +8465 'pak' 3 +8466 'pal' 3 +8467 'pan' 3 +8468 'pap' 3 +8469 'par' 3 +8470 'pas' 3 +8471 'pat' 3 +8472 'pay' 3 +8473 'pci' 3 +8474 'pdb' 3 +8475 'pdf' 3 +8476 'pec' 3 +8477 'ped' 3 +8478 'pee' 3 +8479 'peg' 3 +8480 'pei' 3 +8481 'pel' 3 +8482 'pem' 3 +8483 'pen' 3 +8484 'per' 3 +8485 'pes' 3 +8486 'pet' 3 +8487 'pex' 3 +8488 'pez' 3 +8489 'pha' 3 +8490 'phe' 3 +8491 'phi' 3 +8492 'php' 3 +8493 'phy' 3 +8494 'pic' 3 +8495 'pid' 3 +8496 'pie' 3 +8497 'pig' 3 +8498 'pin' 3 +8499 'pio' 3 +8500 'pip' 3 +8501 'pir' 3 +8502 'pis' 3 +8503 'pit' 3 +8504 'pix' 3 +8505 'pkg' 3 +8506 'pkl' 3 +8507 'pla' 3 +8508 'ple' 3 +8509 'plt' 3 +8510 'ply' 3 +8511 'png' 3 +8512 'pod' 3 +8513 'pol' 3 +8514 'pom' 3 +8515 'pon' 3 +8516 'pop' 3 +8517 'por' 3 +8518 'pos' 3 +8519 'pot' 3 +8520 'pow' 3 +8521 'ppa' 3 +8522 'ppe' 3 +8523 'ppo' 3 +8524 'pps' 3 +8525 'ppy' 3 +8526 'pra' 3 +8527 'pre' 3 +8528 'pri' 3 +8529 'pro' 3 +8530 'psi' 3 +8531 'psy' 3 +8532 'pta' 3 +8533 'pte' 3 +8534 'pth' 3 +8535 'pto' 3 +8536 'ptr' 3 +8537 'pts' 3 +8538 'pty' 3 +8539 'pub' 3 +8540 'pul' 3 +8541 'pun' 3 +8542 'pur' 3 +8543 'pus' 3 +8544 'put' 3 +8545 'pwd' 3 +8546 'qrt' 3 +8547 'qty' 3 +8548 'qua' 3 +8549 'que' 3 +8550 'qui' 3 +8551 'quo' 3 +8552 'rab' 3 +8553 'rac' 3 +8554 'rad' 3 +8555 'rae' 3 +8556 'raf' 3 +8557 'rag' 3 +8558 'rah' 3 +8559 'rai' 3 +8560 'raj' 3 +8561 'rak' 3 +8562 'ral' 3 +8563 'ram' 3 +8564 'ran' 3 +8565 'rap' 3 +8566 'raq' 3 +8567 'rar' 3 +8568 'ras' 3 +8569 'rat' 3 +8570 'rav' 3 +8571 'raw' 3 +8572 'rax' 3 +8573 'ray' 3 +8574 'raz' 3 +8575 'rdf' 3 +8576 'rea' 3 +8577 'reb' 3 +8578 'rec' 3 +8579 'red' 3 +8580 'ree' 3 +8581 'ref' 3 +8582 'reg' 3 +8583 'reh' 3 +8584 'rei' 3 +8585 'rek' 3 +8586 'rel' 3 +8587 'rem' 3 +8588 'ren' 3 +8589 'reo' 3 +8590 'rep' 3 +8591 'req' 3 +8592 'rer' 3 +8593 'res' 3 +8594 'ret' 3 +8595 'reu' 3 +8596 'rev' 3 +8597 'rew' 3 +8598 'rex' 3 +8599 'rey' 3 +8600 'rez' 3 +8601 'rgb' 3 +8602 'rho' 3 +8603 'rhs' 3 +8604 'ria' 3 +8605 'rib' 3 +8606 'ric' 3 +8607 'rid' 3 +8608 'rie' 3 +8609 'rif' 3 +8610 'rig' 3 +8611 'rij' 3 +8612 'rik' 3 +8613 'ril' 3 +8614 'rim' 3 +8615 'rin' 3 +8616 'rio' 3 +8617 'rip' 3 +8618 'rir' 3 +8619 'ris' 3 +8620 'rit' 3 +8621 'riv' 3 +8622 'rix' 3 +8623 'riz' 3 +8624 'rms' 3 +8625 'rna' 3 +8626 'rnd' 3 +8627 'rng' 3 +8628 'rnn' 3 +8629 'rob' 3 +8630 'roc' 3 +8631 'rod' 3 +8632 'roe' 3 +8633 'rog' 3 +8634 'roi' 3 +8635 'rok' 3 +8636 'rol' 3 +8637 'rom' 3 +8638 'ron' 3 +8639 'rop' 3 +8640 'ror' 3 +8641 'ros' 3 +8642 'rot' 3 +8643 'rou' 3 +8644 'rov' 3 +8645 'row' 3 +8646 'rox' 3 +8647 'roy' 3 +8648 'roz' 3 +8649 'rpc' 3 +8650 'rpm' 3 +8651 'rsa' 3 +8652 'rsp' 3 +8653 'rss' 3 +8654 'rst' 3 +8655 'rtl' 3 +8656 'rub' 3 +8657 'rud' 3 +8658 'rue' 3 +8659 'rug' 3 +8660 'rum' 3 +8661 'run' 3 +8662 'rup' 3 +8663 'rus' 3 +8664 'rut' 3 +8665 'ryn' 3 +8666 'rys' 3 +8667 'rà' 3 +8668 'rá' 3 +8669 'rä' 3 +8670 'rå' 3 +8671 'ré' 3 +8672 'rí' 3 +8673 'ró' 3 +8674 'rę' 3 +8675 'sac' 3 +8676 'sad' 3 +8677 'saf' 3 +8678 'sal' 3 +8679 'sam' 3 +8680 'san' 3 +8681 'sar' 3 +8682 'sas' 3 +8683 'sat' 3 +8684 'sav' 3 +8685 'saw' 3 +8686 'say' 3 +8687 'sce' 3 +8688 'sch' 3 +8689 'sci' 3 +8690 'scr' 3 +8691 'sdk' 3 +8692 'sea' 3 +8693 'sec' 3 +8694 'sed' 3 +8695 'see' 3 +8696 'seg' 3 +8697 'sei' 3 +8698 'sek' 3 +8699 'sel' 3 +8700 'sem' 3 +8701 'sen' 3 +8702 'sep' 3 +8703 'seq' 3 +8704 'ser' 3 +8705 'ses' 3 +8706 'set' 3 +8707 'sex' 3 +8708 'sey' 3 +8709 'sez' 3 +8710 'sha' 3 +8711 'she' 3 +8712 'shi' 3 +8713 'shr' 3 +8714 'sic' 3 +8715 'sid' 3 +8716 'sie' 3 +8717 'sig' 3 +8718 'sil' 3 +8719 'sim' 3 +8720 'sin' 3 +8721 'sis' 3 +8722 'sit' 3 +8723 'six' 3 +8724 'ska' 3 +8725 'ske' 3 +8726 'ski' 3 +8727 'sku' 3 +8728 'sky' 3 +8729 'snd' 3 +8730 'soc' 3 +8731 'sof' 3 +8732 'sol' 3 +8733 'som' 3 +8734 'son' 3 +8735 'sor' 3 +8736 'sov' 3 +8737 'spe' 3 +8738 'spi' 3 +8739 'spl' 3 +8740 'spo' 3 +8741 'spr' 3 +8742 'spy' 3 +8743 'sql' 3 +8744 'squ' 3 +8745 'src' 3 +8746 'ssa' 3 +8747 'ssh' 3 +8748 'ssl' 3 +8749 'sta' 3 +8750 'std' 3 +8751 'ste' 3 +8752 'sth' 3 +8753 'sti' 3 +8754 'stm' 3 +8755 'sto' 3 +8756 'str' 3 +8757 'sts' 3 +8758 'stu' 3 +8759 'sty' 3 +8760 'sub' 3 +8761 'suc' 3 +8762 'sum' 3 +8763 'sun' 3 +8764 'sup' 3 +8765 'sur' 3 +8766 'sus' 3 +8767 'svg' 3 +8768 'svn' 3 +8769 'swe' 3 +8770 'sym' 3 +8771 'syn' 3 +8772 'sys' 3 +8773 'tab' 3 +8774 'tag' 3 +8775 'tah' 3 +8776 'tal' 3 +8777 'tam' 3 +8778 'tan' 3 +8779 'tap' 3 +8780 'tar' 3 +8781 'tas' 3 +8782 'tat' 3 +8783 'tau' 3 +8784 'tax' 3 +8785 'tbl' 3 +8786 'tcp' 3 +8787 'tea' 3 +8788 'tec' 3 +8789 'ted' 3 +8790 'tee' 3 +8791 'tek' 3 +8792 'tel' 3 +8793 'tem' 3 +8794 'ten' 3 +8795 'ter' 3 +8796 'tes' 3 +8797 'tet' 3 +8798 'tex' 3 +8799 'tgt' 3 +8800 'tha' 3 +8801 'the' 3 +8802 'thi' 3 +8803 'thm' 3 +8804 'thr' 3 +8805 'ths' 3 +8806 'thy' 3 +8807 'tic' 3 +8808 'tid' 3 +8809 'tie' 3 +8810 'tif' 3 +8811 'tig' 3 +8812 'tik' 3 +8813 'til' 3 +8814 'tim' 3 +8815 'tin' 3 +8816 'tip' 3 +8817 'tis' 3 +8818 'tit' 3 +8819 'tle' 3 +8820 'tls' 3 +8821 'tml' 3 +8822 'tmp' 3 +8823 'toc' 3 +8824 'tod' 3 +8825 'tok' 3 +8826 'tol' 3 +8827 'tom' 3 +8828 'ton' 3 +8829 'too' 3 +8830 'top' 3 +8831 'tor' 3 +8832 'tos' 3 +8833 'tot' 3 +8834 'tow' 3 +8835 'tpl' 3 +8836 'tra' 3 +8837 'tre' 3 +8838 'tri' 3 +8839 'trl' 3 +8840 'tro' 3 +8841 'tru' 3 +8842 'try' 3 +8843 'tte' 3 +8844 'tti' 3 +8845 'ttl' 3 +8846 'ttp' 3 +8847 'tty' 3 +8848 'tum' 3 +8849 'tun' 3 +8850 'tur' 3 +8851 'two' 3 +8852 'txt' 3 +8853 'typ' 3 +8854 'té' 3 +8855 'tó' 3 +8856 'ual' 3 +8857 'uan' 3 +8858 'uar' 3 +8859 'uba' 3 +8860 'ubb' 3 +8861 'ube' 3 +8862 'ubi' 3 +8863 'ubl' 3 +8864 'ubs' 3 +8865 'uby' 3 +8866 'uca' 3 +8867 'ucc' 3 +8868 'uce' 3 +8869 'uch' 3 +8870 'uci' 3 +8871 'uck' 3 +8872 'uct' 3 +8873 'uda' 3 +8874 'udd' 3 +8875 'ude' 3 +8876 'udi' 3 +8877 'udo' 3 +8878 'uds' 3 +8879 'ued' 3 +8880 'uel' 3 +8881 'uen' 3 +8882 'uer' 3 +8883 'ues' 3 +8884 'uet' 3 +8885 'uez' 3 +8886 'ufe' 3 +8887 'uff' 3 +8888 'uga' 3 +8889 'uge' 3 +8890 'ugg' 3 +8891 'ugh' 3 +8892 'ugi' 3 +8893 'ugo' 3 +8894 'ugs' 3 +8895 'ugu' 3 +8896 'uid' 3 +8897 'uil' 3 +8898 'uin' 3 +8899 'uir' 3 +8900 'uis' 3 +8901 'uit' 3 +8902 'uje' 3 +8903 'uka' 3 +8904 'uke' 3 +8905 'uki' 3 +8906 'uko' 3 +8907 'uks' 3 +8908 'uku' 3 +8909 'ula' 3 +8910 'uld' 3 +8911 'ule' 3 +8912 'ulf' 3 +8913 'uli' 3 +8914 'ulk' 3 +8915 'ull' 3 +8916 'ulo' 3 +8917 'ulp' 3 +8918 'uls' 3 +8919 'ult' 3 +8920 'ulu' 3 +8921 'uly' 3 +8922 'uma' 3 +8923 'umb' 3 +8924 'ume' 3 +8925 'umi' 3 +8926 'uml' 3 +8927 'umm' 3 +8928 'umn' 3 +8929 'umo' 3 +8930 'ump' 3 +8931 'ums' 3 +8932 'umu' 3 +8933 'una' 3 +8934 'unc' 3 +8935 'und' 3 +8936 'une' 3 +8937 'ung' 3 +8938 'uni' 3 +8939 'unj' 3 +8940 'unk' 3 +8941 'unn' 3 +8942 'uno' 3 +8943 'uns' 3 +8944 'unt' 3 +8945 'upa' 3 +8946 'upe' 3 +8947 'upo' 3 +8948 'upp' 3 +8949 'ups' 3 +8950 'upt' 3 +8951 'ura' 3 +8952 'urb' 3 +8953 'urd' 3 +8954 'ure' 3 +8955 'urf' 3 +8956 'urg' 3 +8957 'uri' 3 +8958 'urk' 3 +8959 'url' 3 +8960 'urm' 3 +8961 'urn' 3 +8962 'uro' 3 +8963 'urr' 3 +8964 'urs' 3 +8965 'urt' 3 +8966 'uru' 3 +8967 'ury' 3 +8968 'usa' 3 +8969 'usb' 3 +8970 'usc' 3 +8971 'use' 3 +8972 'ush' 3 +8973 'usi' 3 +8974 'usk' 3 +8975 'uso' 3 +8976 'usp' 3 +8977 'usr' 3 +8978 'uss' 3 +8979 'ust' 3 +8980 'usu' 3 +8981 'usz' 3 +8982 'uta' 3 +8983 'utc' 3 +8984 'ute' 3 +8985 'utf' 3 +8986 'uth' 3 +8987 'uti' 3 +8988 'utm' 3 +8989 'uto' 3 +8990 'uts' 3 +8991 'utt' 3 +8992 'uty' 3 +8993 'utz' 3 +8994 'uum' 3 +8995 'uve' 3 +8996 'uvo' 3 +8997 'uxe' 3 +8998 'uya' 3 +8999 'uzz' 3 +9000 'uß' 3 +9001 'ué' 3 +9002 'uí' 3 +9003 'už' 3 +9004 'vac' 3 +9005 'vae' 3 +9006 'val' 3 +9007 'van' 3 +9008 'var' 3 +9009 'vas' 3 +9010 'vat' 3 +9011 'vec' 3 +9012 'ved' 3 +9013 'vee' 3 +9014 'veg' 3 +9015 'veh' 3 +9016 'vel' 3 +9017 'ven' 3 +9018 'ver' 3 +9019 'ves' 3 +9020 'vet' 3 +9021 'vex' 3 +9022 'vey' 3 +9023 'vez' 3 +9024 'via' 3 +9025 'vic' 3 +9026 'vid' 3 +9027 'vie' 3 +9028 'vig' 3 +9029 'vii' 3 +9030 'vik' 3 +9031 'vil' 3 +9032 'vim' 3 +9033 'vin' 3 +9034 'vio' 3 +9035 'vip' 3 +9036 'vir' 3 +9037 'vis' 3 +9038 'vit' 3 +9039 'viv' 3 +9040 'viz' 3 +9041 'voc' 3 +9042 'vod' 3 +9043 'vol' 3 +9044 'von' 3 +9045 'vor' 3 +9046 'vos' 3 +9047 'vox' 3 +9048 'voy' 3 +9049 'vre' 3 +9050 'vue' 3 +9051 'vá' 3 +9052 'vä' 3 +9053 'vé' 3 +9054 'ví' 3 +9055 'vě' 3 +9056 'wal' 3 +9057 'wan' 3 +9058 'wap' 3 +9059 'war' 3 +9060 'was' 3 +9061 'wat' 3 +9062 'wav' 3 +9063 'way' 3 +9064 'web' 3 +9065 'wed' 3 +9066 'weg' 3 +9067 'wei' 3 +9068 'wel' 3 +9069 'wen' 3 +9070 'wer' 3 +9071 'wet' 3 +9072 'whe' 3 +9073 'who' 3 +9074 'why' 3 +9075 'wid' 3 +9076 'wie' 3 +9077 'wig' 3 +9078 'wik' 3 +9079 'wil' 3 +9080 'win' 3 +9081 'wis' 3 +9082 'wit' 3 +9083 'wol' 3 +9084 'won' 3 +9085 'wor' 3 +9086 'www' 3 +9087 'wyn' 3 +9088 'xFF' 3 +9089 'xcb' 3 +9090 'xed' 3 +9091 'xes' 3 +9092 'xfe' 3 +9093 'xff' 3 +9094 'xhr' 3 +9095 'xia' 3 +9096 'xic' 3 +9097 'xim' 3 +9098 'xin' 3 +9099 'xis' 3 +9100 'xit' 3 +9101 'xiv' 3 +9102 'xls' 3 +9103 'xml' 3 +9104 'xon' 3 +9105 'xor' 3 +9106 'xsd' 3 +9107 'xsl' 3 +9108 'xxx' 3 +9109 'xyz' 3 +9110 'yah' 3 +9111 'yal' 3 +9112 'yam' 3 +9113 'yan' 3 +9114 'yar' 3 +9115 'yaw' 3 +9116 'ych' 3 +9117 'ycl' 3 +9118 'yel' 3 +9119 'yen' 3 +9120 'yer' 3 +9121 'yes' 3 +9122 'yet' 3 +9123 'yla' 3 +9124 'yle' 3 +9125 'yll' 3 +9126 'yme' 3 +9127 'yml' 3 +9128 'yna' 3 +9129 'ync' 3 +9130 'yne' 3 +9131 'ynn' 3 +9132 'ynt' 3 +9133 'yon' 3 +9134 'yor' 3 +9135 'you' 3 +9136 'ype' 3 +9137 'yre' 3 +9138 'ysi' 3 +9139 'yst' 3 +9140 'ysz' 3 +9141 'yth' 3 +9142 'yun' 3 +9143 'yyy' 3 +9144 'yó' 3 +9145 'zag' 3 +9146 'zak' 3 +9147 'zan' 3 +9148 'zar' 3 +9149 'zas' 3 +9150 'zed' 3 +9151 'zee' 3 +9152 'zej' 3 +9153 'zek' 3 +9154 'zel' 3 +9155 'zem' 3 +9156 'zen' 3 +9157 'zer' 3 +9158 'zes' 3 +9159 'zet' 3 +9160 'zew' 3 +9161 'zia' 3 +9162 'zie' 3 +9163 'zig' 3 +9164 'zik' 3 +9165 'zin' 3 +9166 'zip' 3 +9167 'zon' 3 +9168 'zor' 3 +9169 'zos' 3 +9170 'zyk' 3 +9171 'zym' 3 +9172 'zza' 3 +9173 'zzi' 3 +9174 'zzo' 3 +9175 'zá' 3 +9176 'zó' 3 +9177 'zą' 3 +9178 'zę' 3 +9179 'ző' 3 +9180 '{{\\' 3 +9181 '{})' 3 +9182 '{},' 3 +9183 '{}.' 3 +9184 '{}\\' 3 +9185 '{}_' 3 +9186 '}")' 3 +9187 '}",' 3 +9188 '}$$' 3 +9189 "}')" 3 +9190 "}'," 3 +9191 '}))' 3 +9192 '}),' 3 +9193 '}).' 3 +9194 '});' 3 +9195 '})\\' 3 +9196 '},"' 3 +9197 '},{' 3 +9198 '}.{' 3 +9199 '}/{' 3 +9200 '}:{' 3 +9201 '}%' 4 +19277 ' \'"\'' 4 +19278 " '#'" 4 +19279 " '''" 4 +19280 " '')" 4 +19281 " ''," 4 +19282 " '';" 4 +19283 " '*'" 4 +19284 " '-'" 4 +19285 " '--" 4 +19286 " './" 4 +19287 " '/'" 4 +19288 " ':'" 4 +19289 " '' 4 +19312 ' ...' 4 +19313 ' ../' 4 +19314 ' /**' 4 +19315 ' ///' 4 +19316 ' /><' 4 +19317 ' :-)' 4 +19318 ' <%=' 4 +19319 ' <--' 4 +19320 ' ===' 4 +19321 ' ==>' 4 +19322 ' >>>' 4 +19323 ' ???' 4 +19324 ' AAA' 4 +19325 ' AAP' 4 +19326 ' ABC' 4 +19327 ' ABI' 4 +19328 ' ABS' 4 +19329 ' ACC' 4 +19330 ' ACE' 4 +19331 ' ACK' 4 +19332 ' ACL' 4 +19333 ' ACS' 4 +19334 ' ACT' 4 +19335 ' ADA' 4 +19336 ' ADC' 4 +19337 ' ADD' 4 +19338 ' AES' 4 +19339 ' AFC' 4 +19340 ' AFL' 4 +19341 ' AFP' 4 +19342 ' AIR' 4 +19343 ' ALL' 4 +19344 ' ALS' 4 +19345 ' ALT' 4 +19346 ' AMD' 4 +19347 ' AMP' 4 +19348 ' ANC' 4 +19349 ' AND' 4 +19350 ' ANN' 4 +19351 ' ANY' 4 +19352 ' APC' 4 +19353 ' API' 4 +19354 ' APP' 4 +19355 ' APR' 4 +19356 ' ARE' 4 +19357 ' ARG' 4 +19358 ' ARM' 4 +19359 ' ART' 4 +19360 ' ASC' 4 +19361 ' ASD' 4 +19362 ' ASE' 4 +19363 ' ASF' 4 +19364 ' ASP' 4 +19365 ' ASS' 4 +19366 ' AST' 4 +19367 ' ATM' 4 +19368 ' ATP' 4 +19369 ' ATT' 4 +19370 ' AUT' 4 +19371 ' AWS' 4 +19372 ' Abb' 4 +19373 ' Abd' 4 +19374 ' Abe' 4 +19375 ' Abl' 4 +19376 ' Abr' 4 +19377 ' Abs' 4 +19378 ' Abu' 4 +19379 ' Acc' 4 +19380 ' Ace' 4 +19381 ' Ach' 4 +19382 ' Act' 4 +19383 ' Ada' 4 +19384 ' Add' 4 +19385 ' Ade' 4 +19386 ' Adv' 4 +19387 ' Aer' 4 +19388 ' Aff' 4 +19389 ' Afr' 4 +19390 ' Age' 4 +19391 ' Agg' 4 +19392 ' Agr' 4 +19393 ' Agu' 4 +19394 ' Aid' 4 +19395 ' Aim' 4 +19396 ' Ain' 4 +19397 ' Air' 4 +19398 ' Akt' 4 +19399 ' Ala' 4 +19400 ' Alb' 4 +19401 ' Alc' 4 +19402 ' Ald' 4 +19403 ' Ale' 4 +19404 ' Alf' 4 +19405 ' Alg' 4 +19406 ' Ali' 4 +19407 ' All' 4 +19408 ' Alo' 4 +19409 ' Als' 4 +19410 ' Alt' 4 +19411 ' Ama' 4 +19412 ' Amb' 4 +19413 ' Amp' 4 +19414 ' Amy' 4 +19415 ' Ana' 4 +19416 ' Anc' 4 +19417 ' And' 4 +19418 ' Ang' 4 +19419 ' Ank' 4 +19420 ' Ann' 4 +19421 ' Ans' 4 +19422 ' Ant' 4 +19423 ' Any' 4 +19424 ' Aph' 4 +19425 ' Api' 4 +19426 ' App' 4 +19427 ' Apr' 4 +19428 ' Aqu' 4 +19429 ' Ara' 4 +19430 ' Arc' 4 +19431 ' Are' 4 +19432 ' Arg' 4 +19433 ' Ari' 4 +19434 ' Ark' 4 +19435 ' Arm' 4 +19436 ' Arn' 4 +19437 ' Arr' 4 +19438 ' Ars' 4 +19439 ' Art' 4 +19440 ' Asc' 4 +19441 ' Ash' 4 +19442 ' Ask' 4 +19443 ' Asp' 4 +19444 ' Ass' 4 +19445 ' Ast' 4 +19446 ' Ath' 4 +19447 ' Atl' 4 +19448 ' Att' 4 +19449 ' Aub' 4 +19450 ' Aud' 4 +19451 ' Auf' 4 +19452 ' Aug' 4 +19453 ' Aur' 4 +19454 ' Aus' 4 +19455 ' Aut' 4 +19456 ' Aux' 4 +19457 ' Ave' 4 +19458 ' Aβ' 4 +19459 ' BAL' 4 +19460 ' BAR' 4 +19461 ' BAS' 4 +19462 ' BAT' 4 +19463 ' BBB' 4 +19464 ' BBC' 4 +19465 ' BCE' 4 +19466 ' BEL' 4 +19467 ' BET' 4 +19468 ' BIG' 4 +19469 ' BIN' 4 +19470 ' BIT' 4 +19471 ' BJP' 4 +19472 ' BMC' 4 +19473 ' BMI' 4 +19474 ' BMP' 4 +19475 ' BMW' 4 +19476 ' BRE' 4 +19477 ' BSD' 4 +19478 ' BTC' 4 +19479 ' BUS' 4 +19480 ' BUT' 4 +19481 ' Bab' 4 +19482 ' Bac' 4 +19483 ' Bad' 4 +19484 ' Bag' 4 +19485 ' Bah' 4 +19486 ' Bai' 4 +19487 ' Bak' 4 +19488 ' Bal' 4 +19489 ' Bam' 4 +19490 ' Ban' 4 +19491 ' Bar' 4 +19492 ' Bas' 4 +19493 ' Bat' 4 +19494 ' Bau' 4 +19495 ' Bav' 4 +19496 ' Bay' 4 +19497 ' Baz' 4 +19498 ' Bea' 4 +19499 ' Bec' 4 +19500 ' Bed' 4 +19501 ' Bee' 4 +19502 ' Beg' 4 +19503 ' Beh' 4 +19504 ' Bei' 4 +19505 ' Bek' 4 +19506 ' Bel' 4 +19507 ' Ben' 4 +19508 ' Ber' 4 +19509 ' Bes' 4 +19510 ' Bet' 4 +19511 ' Bew' 4 +19512 ' Bey' 4 +19513 ' Bez' 4 +19514 ' Bib' 4 +19515 ' Bid' 4 +19516 ' Big' 4 +19517 ' Bij' 4 +19518 ' Bil' 4 +19519 ' Bin' 4 +19520 ' Bio' 4 +19521 ' Bir' 4 +19522 ' Bis' 4 +19523 ' Bit' 4 +19524 ' Ble' 4 +19525 ' Blo' 4 +19526 ' Blu' 4 +19527 ' Bob' 4 +19528 ' Bod' 4 +19529 ' Bog' 4 +19530 ' Boh' 4 +19531 ' Bol' 4 +19532 ' Bom' 4 +19533 ' Bon' 4 +19534 ' Bor' 4 +19535 ' Bos' 4 +19536 ' Bot' 4 +19537 ' Bou' 4 +19538 ' Bow' 4 +19539 ' Box' 4 +19540 ' Boy' 4 +19541 ' Bra' 4 +19542 ' Bre' 4 +19543 ' Bri' 4 +19544 ' Bro' 4 +19545 ' Bru' 4 +19546 ' Bry' 4 +19547 ' Buc' 4 +19548 ' Bud' 4 +19549 ' Bug' 4 +19550 ' Buk' 4 +19551 ' Bul' 4 +19552 ' Bun' 4 +19553 ' Bur' 4 +19554 ' Bus' 4 +19555 ' But' 4 +19556 ' Buy' 4 +19557 ' Byr' 4 +19558 ' Byz' 4 +19559 ' Bé' 4 +19560 ' Bö' 4 +19561 ' Bü' 4 +19562 ' CAB' 4 +19563 ' CAD' 4 +19564 ' CAL' 4 +19565 ' CAM' 4 +19566 ' CAN' 4 +19567 ' CAP' 4 +19568 ' CAR' 4 +19569 ' CAS' 4 +19570 ' CAT' 4 +19571 ' CBC' 4 +19572 ' CBD' 4 +19573 ' CBS' 4 +19574 ' CCC' 4 +19575 ' CCD' 4 +19576 ' CCT' 4 +19577 ' CDC' 4 +19578 ' CDs' 4 +19579 ' CEO' 4 +19580 ' CES' 4 +19581 ' CGI' 4 +19582 ' CHE' 4 +19583 ' CHO' 4 +19584 ' CIA' 4 +19585 ' CID' 4 +19586 ' CIF' 4 +19587 ' CIS' 4 +19588 ' CIT' 4 +19589 ' CLA' 4 +19590 ' CLI' 4 +19591 ' CMD' 4 +19592 ' CMS' 4 +19593 ' CNN' 4 +19594 ' CNS' 4 +19595 ' COL' 4 +19596 ' COM' 4 +19597 ' CON' 4 +19598 ' COP' 4 +19599 ' COR' 4 +19600 ' COS' 4 +19601 ' CPR' 4 +19602 ' CPU' 4 +19603 ' CRC' 4 +19604 ' CRE' 4 +19605 ' CRM' 4 +19606 ' CSR' 4 +19607 ' CSS' 4 +19608 ' CST' 4 +19609 ' CSV' 4 +19610 ' CTR' 4 +19611 ' CUR' 4 +19612 ' Cab' 4 +19613 ' Cad' 4 +19614 ' Caf' 4 +19615 ' Cal' 4 +19616 ' Cam' 4 +19617 ' Can' 4 +19618 ' Cap' 4 +19619 ' Car' 4 +19620 ' Cas' 4 +19621 ' Cat' 4 +19622 ' Cav' 4 +19623 ' Cay' 4 +19624 ' Cec' 4 +19625 ' Ced' 4 +19626 ' Cel' 4 +19627 ' Cer' 4 +19628 ' Ces' 4 +19629 ' Cet' 4 +19630 ' Cha' 4 +19631 ' Che' 4 +19632 ' Chi' 4 +19633 ' Cho' 4 +19634 ' Chr' 4 +19635 ' Chu' 4 +19636 ' Cic' 4 +19637 ' Cin' 4 +19638 ' Cir' 4 +19639 ' Cit' 4 +19640 ' Civ' 4 +19641 ' Cla' 4 +19642 ' Cle' 4 +19643 ' Cli' 4 +19644 ' Clo' 4 +19645 ' Cly' 4 +19646 ' Cob' 4 +19647 ' Coc' 4 +19648 ' Cod' 4 +19649 ' Coh' 4 +19650 ' Col' 4 +19651 ' Com' 4 +19652 ' Con' 4 +19653 ' Cop' 4 +19654 ' Cor' 4 +19655 ' Cos' 4 +19656 ' Cot' 4 +19657 ' Cou' 4 +19658 ' Cov' 4 +19659 ' Cow' 4 +19660 ' Cox' 4 +19661 ' Coy' 4 +19662 ' Cra' 4 +19663 ' Cre' 4 +19664 ' Cri' 4 +19665 ' Cro' 4 +19666 ' Cru' 4 +19667 ' Cry' 4 +19668 ' Cub' 4 +19669 ' Cul' 4 +19670 ' Cum' 4 +19671 ' Cup' 4 +19672 ' Cur' 4 +19673 ' Cut' 4 +19674 ' Cyr' 4 +19675 ' DAC' 4 +19676 ' DAG' 4 +19677 ' DAM' 4 +19678 ' DAR' 4 +19679 ' DAT' 4 +19680 ' DAY' 4 +19681 ' DDR' 4 +19682 ' DEA' 4 +19683 ' DEC' 4 +19684 ' DEF' 4 +19685 ' DEL' 4 +19686 ' DEM' 4 +19687 ' DEN' 4 +19688 ' DEP' 4 +19689 ' DES' 4 +19690 ' DET' 4 +19691 ' DEV' 4 +19692 ' DFS' 4 +19693 ' DHS' 4 +19694 ' DID' 4 +19695 ' DIG' 4 +19696 ' DIR' 4 +19697 ' DIS' 4 +19698 ' DIV' 4 +19699 ' DIY' 4 +19700 ' DLL' 4 +19701 ' DNA' 4 +19702 ' DNS' 4 +19703 ' DOC' 4 +19704 ' DOI' 4 +19705 ' DOM' 4 +19706 ' DON' 4 +19707 ' DOS' 4 +19708 ' DOT' 4 +19709 ' DSL' 4 +19710 ' DSM' 4 +19711 ' DVD' 4 +19712 ' Dad' 4 +19713 ' Dag' 4 +19714 ' Dah' 4 +19715 ' Dai' 4 +19716 ' Dak' 4 +19717 ' Dal' 4 +19718 ' Dam' 4 +19719 ' Dan' 4 +19720 ' Dar' 4 +19721 ' Das' 4 +19722 ' Dat' 4 +19723 ' Dav' 4 +19724 ' Daw' 4 +19725 ' Day' 4 +19726 ' Deb' 4 +19727 ' Dec' 4 +19728 ' Ded' 4 +19729 ' Dee' 4 +19730 ' Def' 4 +19731 ' Deg' 4 +19732 ' Dek' 4 +19733 ' Del' 4 +19734 ' Dem' 4 +19735 ' Den' 4 +19736 ' Dep' 4 +19737 ' Der' 4 +19738 ' Des' 4 +19739 ' Det' 4 +19740 ' Dev' 4 +19741 ' Dew' 4 +19742 ' Dex' 4 +19743 ' Dez' 4 +19744 ' Dia' 4 +19745 ' Did' 4 +19746 ' Die' 4 +19747 ' Dig' 4 +19748 ' Dil' 4 +19749 ' Dim' 4 +19750 ' Din' 4 +19751 ' Dip' 4 +19752 ' Dir' 4 +19753 ' Dis' 4 +19754 ' Dit' 4 +19755 ' Div' 4 +19756 ' Dix' 4 +19757 ' Dob' 4 +19758 ' Doc' 4 +19759 ' Dod' 4 +19760 ' Doe' 4 +19761 ' Dog' 4 +19762 ' Dok' 4 +19763 ' Dol' 4 +19764 ' Dom' 4 +19765 ' Don' 4 +19766 ' Dop' 4 +19767 ' Dor' 4 +19768 ' Dos' 4 +19769 ' Dot' 4 +19770 ' Dou' 4 +19771 ' Dow' 4 +19772 ' Dra' 4 +19773 ' Dre' 4 +19774 ' Dro' 4 +19775 ' Dru' 4 +19776 ' Dry' 4 +19777 ' Dub' 4 +19778 ' Duc' 4 +19779 ' Dud' 4 +19780 ' Due' 4 +19781 ' Dul' 4 +19782 ' Dum' 4 +19783 ' Dun' 4 +19784 ' Duo' 4 +19785 ' Dup' 4 +19786 ' Dur' 4 +19787 ' Dyn' 4 +19788 ' Dé' 4 +19789 ' Dí' 4 +19790 ' ECM' 4 +19791 ' EEG' 4 +19792 ' EMP' 4 +19793 ' EMS' 4 +19794 ' END' 4 +19795 ' ENG' 4 +19796 ' EOF' 4 +19797 ' EOS' 4 +19798 ' EPA' 4 +19799 ' EPS' 4 +19800 ' ERA' 4 +19801 ' ERR' 4 +19802 ' ESA' 4 +19803 ' ESC' 4 +19804 ' ESP' 4 +19805 ' EST' 4 +19806 ' ETH' 4 +19807 ' EUR' 4 +19808 ' EXP' 4 +19809 ' EXT' 4 +19810 ' Ear' 4 +19811 ' Eat' 4 +19812 ' Eck' 4 +19813 ' Eco' 4 +19814 ' Edd' 4 +19815 ' Edu' 4 +19816 ' Eff' 4 +19817 ' Egg' 4 +19818 ' Ein' 4 +19819 ' Eld' 4 +19820 ' Ele' 4 +19821 ' Eli' 4 +19822 ' Ell' 4 +19823 ' Emb' 4 +19824 ' Emp' 4 +19825 ' Enc' 4 +19826 ' End' 4 +19827 ' Eng' 4 +19828 ' Enh' 4 +19829 ' Ens' 4 +19830 ' Ent' 4 +19831 ' Env' 4 +19832 ' Eph' 4 +19833 ' Equ' 4 +19834 ' Era' 4 +19835 ' Erd' 4 +19836 ' Ern' 4 +19837 ' Err' 4 +19838 ' Esc' 4 +19839 ' Esp' 4 +19840 ' Ess' 4 +19841 ' Est' 4 +19842 ' Eth' 4 +19843 ' Eug' 4 +19844 ' Eur' 4 +19845 ' Eva' 4 +19846 ' Eve' 4 +19847 ' Exc' 4 +19848 ' Exp' 4 +19849 ' Ext' 4 +19850 ' Eye' 4 +19851 ' FAA' 4 +19852 ' FAC' 4 +19853 ' FAQ' 4 +19854 ' FAR' 4 +19855 ' FAT' 4 +19856 ' FBI' 4 +19857 ' FCC' 4 +19858 ' FDA' 4 +19859 ' FFT' 4 +19860 ' FIF' 4 +19861 ' FIG' 4 +19862 ' FIL' 4 +19863 ' FIN' 4 +19864 ' FIR' 4 +19865 ' FIT' 4 +19866 ' FIX' 4 +19867 ' FOR' 4 +19868 ' FOX' 4 +19869 ' FPS' 4 +19870 ' FTP' 4 +19871 ' FUN' 4 +19872 ' Fab' 4 +19873 ' Fac' 4 +19874 ' Fah' 4 +19875 ' Fal' 4 +19876 ' Fam' 4 +19877 ' Fan' 4 +19878 ' Far' 4 +19879 ' Fas' 4 +19880 ' Fat' 4 +19881 ' Fay' 4 +19882 ' Feb' 4 +19883 ' Fed' 4 +19884 ' Fel' 4 +19885 ' Fem' 4 +19886 ' Fen' 4 +19887 ' Fer' 4 +19888 ' Fet' 4 +19889 ' Few' 4 +19890 ' Fib' 4 +19891 ' Fif' 4 +19892 ' Fig' 4 +19893 ' Fil' 4 +19894 ' Fin' 4 +19895 ' Fir' 4 +19896 ' Fit' 4 +19897 ' Fix' 4 +19898 ' Fla' 4 +19899 ' Fle' 4 +19900 ' Flo' 4 +19901 ' Flu' 4 +19902 ' Fly' 4 +19903 ' Fog' 4 +19904 ' Fol' 4 +19905 ' Fon' 4 +19906 ' Foo' 4 +19907 ' For' 4 +19908 ' Fot' 4 +19909 ' Fou' 4 +19910 ' Fox' 4 +19911 ' Fra' 4 +19912 ' Fre' 4 +19913 ' Fri' 4 +19914 ' Fro' 4 +19915 ' Fry' 4 +19916 ' Fuj' 4 +19917 ' Fuk' 4 +19918 ' Ful' 4 +19919 ' Fun' 4 +19920 ' Fur' 4 +19921 ' Fut' 4 +19922 ' Fé' 4 +19923 ' GAM' 4 +19924 ' GCC' 4 +19925 ' GDP' 4 +19926 ' GEN' 4 +19927 ' GET' 4 +19928 ' GFP' 4 +19929 ' GHz' 4 +19930 ' GMT' 4 +19931 ' GNU' 4 +19932 ' GOD' 4 +19933 ' GOP' 4 +19934 ' GPL' 4 +19935 ' GPS' 4 +19936 ' GPU' 4 +19937 ' GRE' 4 +19938 ' GRO' 4 +19939 ' GSM' 4 +19940 ' GST' 4 +19941 ' GUI' 4 +19942 ' Gab' 4 +19943 ' Gad' 4 +19944 ' Gal' 4 +19945 ' Gam' 4 +19946 ' Gan' 4 +19947 ' Gap' 4 +19948 ' Gar' 4 +19949 ' Gas' 4 +19950 ' Gat' 4 +19951 ' Gay' 4 +19952 ' Gaz' 4 +19953 ' GeV' 4 +19954 ' Geb' 4 +19955 ' Ged' 4 +19956 ' Geg' 4 +19957 ' Gel' 4 +19958 ' Gem' 4 +19959 ' Gen' 4 +19960 ' Geo' 4 +19961 ' Ger' 4 +19962 ' Ges' 4 +19963 ' Get' 4 +19964 ' Gew' 4 +19965 ' Gib' 4 +19966 ' Gig' 4 +19967 ' Gil' 4 +19968 ' Gin' 4 +19969 ' Gir' 4 +19970 ' Git' 4 +19971 ' Gle' 4 +19972 ' Gly' 4 +19973 ' Gob' 4 +19974 ' God' 4 +19975 ' Gol' 4 +19976 ' Gon' 4 +19977 ' Gor' 4 +19978 ' Gos' 4 +19979 ' Got' 4 +19980 ' Gov' 4 +19981 ' Gow' 4 +19982 ' Gra' 4 +19983 ' Gre' 4 +19984 ' Gri' 4 +19985 ' Gro' 4 +19986 ' Gru' 4 +19987 ' Gtk' 4 +19988 ' Gul' 4 +19989 ' Gum' 4 +19990 ' Gun' 4 +19991 ' Gur' 4 +19992 ' Gus' 4 +19993 ' Gut' 4 +19994 ' Guy' 4 +19995 ' Gym' 4 +19996 ' Gä' 4 +19997 ' Gé' 4 +19998 ' Gó' 4 +19999 ' Gö' 4 +20000 ' Gü' 4 +20001 ' HAL' 4 +20002 ' HAR' 4 +20003 ' HAS' 4 +20004 ' HBO' 4 +20005 ' HEL' 4 +20006 ' HER' 4 +20007 ' HIS' 4 +20008 ' HIV' 4 +20009 ' HMS' 4 +20010 ' HOW' 4 +20011 ' HPV' 4 +20012 ' HTC' 4 +20013 ' Hab' 4 +20014 ' Had' 4 +20015 ' Hag' 4 +20016 ' Hai' 4 +20017 ' Haj' 4 +20018 ' Hak' 4 +20019 ' Hal' 4 +20020 ' Ham' 4 +20021 ' Han' 4 +20022 ' Har' 4 +20023 ' Has' 4 +20024 ' Hat' 4 +20025 ' Hav' 4 +20026 ' Haw' 4 +20027 ' Hay' 4 +20028 ' Haz' 4 +20029 ' Heb' 4 +20030 ' Hed' 4 +20031 ' Hel' 4 +20032 ' Hem' 4 +20033 ' Hen' 4 +20034 ' Hep' 4 +20035 ' Her' 4 +20036 ' Het' 4 +20037 ' Hew' 4 +20038 ' Hex' 4 +20039 ' Hey' 4 +20040 ' Hib' 4 +20041 ' Hig' 4 +20042 ' Hij' 4 +20043 ' Hil' 4 +20044 ' Him' 4 +20045 ' Hin' 4 +20046 ' Hip' 4 +20047 ' Hir' 4 +20048 ' His' 4 +20049 ' Hit' 4 +20050 ' Hmm' 4 +20051 ' Hob' 4 +20052 ' Hod' 4 +20053 ' Hof' 4 +20054 ' Hog' 4 +20055 ' Hol' 4 +20056 ' Hom' 4 +20057 ' Hon' 4 +20058 ' Hop' 4 +20059 ' Hor' 4 +20060 ' Hos' 4 +20061 ' Hot' 4 +20062 ' Hou' 4 +20063 ' How' 4 +20064 ' Hoy' 4 +20065 ' Hua' 4 +20066 ' Hub' 4 +20067 ' Hud' 4 +20068 ' Hug' 4 +20069 ' Hum' 4 +20070 ' Hun' 4 +20071 ' Hur' 4 +20072 ' Hus' 4 +20073 ' Hut' 4 +20074 ' Hyp' 4 +20075 ' Hä' 4 +20076 ' Hö' 4 +20077 ' IBM' 4 +20078 ' ICC' 4 +20079 ' ICE' 4 +20080 ' ICO' 4 +20081 ' ICT' 4 +20082 ' ICU' 4 +20083 ' IDE' 4 +20084 ' IDs' 4 +20085 ' III' 4 +20086 ' IIS' 4 +20087 ' IMF' 4 +20088 ' IMP' 4 +20089 ' INC' 4 +20090 ' IND' 4 +20091 ' INF' 4 +20092 ' INS' 4 +20093 ' INT' 4 +20094 ' IPA' 4 +20095 ' IPO' 4 +20096 ' IPS' 4 +20097 ' IPv' 4 +20098 ' IRA' 4 +20099 ' IRC' 4 +20100 ' IRS' 4 +20101 ' ISO' 4 +20102 ' ISP' 4 +20103 ' ISS' 4 +20104 ' ITE' 4 +20105 ' ITS' 4 +20106 ' Ian' 4 +20107 ' Ice' 4 +20108 ' Ich' 4 +20109 ' Ide' 4 +20110 ' Ign' 4 +20111 ' Ill' 4 +20112 ' Ils' 4 +20113 ' Imm' 4 +20114 ' Imp' 4 +20115 ' Inc' 4 +20116 ' Ind' 4 +20117 ' Inf' 4 +20118 ' Ing' 4 +20119 ' Ink' 4 +20120 ' Inn' 4 +20121 ' Ins' 4 +20122 ' Int' 4 +20123 ' Inv' 4 +20124 ' IoT' 4 +20125 ' Ion' 4 +20126 ' Ips' 4 +20127 ' Ira' 4 +20128 ' Isa' 4 +20129 ' Ish' 4 +20130 ' Isl' 4 +20131 ' Isn' 4 +20132 ' Iss' 4 +20133 ' Ist' 4 +20134 ' Its' 4 +20135 ' Ivy' 4 +20136 ' JVM' 4 +20137 ' Jab' 4 +20138 ' Jac' 4 +20139 ' Jag' 4 +20140 ' Jah' 4 +20141 ' Jak' 4 +20142 ' Jal' 4 +20143 ' Jam' 4 +20144 ' Jan' 4 +20145 ' Jar' 4 +20146 ' Jas' 4 +20147 ' Jaw' 4 +20148 ' Jay' 4 +20149 ' Jed' 4 +20150 ' Jen' 4 +20151 ' Jer' 4 +20152 ' Jes' 4 +20153 ' Jet' 4 +20154 ' Jew' 4 +20155 ' Jim' 4 +20156 ' Jin' 4 +20157 ' Job' 4 +20158 ' Joe' 4 +20159 ' Joh' 4 +20160 ' Jon' 4 +20161 ' Jos' 4 +20162 ' Joy' 4 +20163 ' Jub' 4 +20164 ' Jud' 4 +20165 ' Jug' 4 +20166 ' Jul' 4 +20167 ' Jun' 4 +20168 ' Jur' 4 +20169 ' Já' 4 +20170 ' Jó' 4 +20171 ' KDE' 4 +20172 ' KEY' 4 +20173 ' Kab' 4 +20174 ' Kad' 4 +20175 ' Kag' 4 +20176 ' Kah' 4 +20177 ' Kai' 4 +20178 ' Kak' 4 +20179 ' Kal' 4 +20180 ' Kam' 4 +20181 ' Kan' 4 +20182 ' Kap' 4 +20183 ' Kar' 4 +20184 ' Kas' 4 +20185 ' Kat' 4 +20186 ' Kaw' 4 +20187 ' Kay' 4 +20188 ' Kaz' 4 +20189 ' Kel' 4 +20190 ' Kem' 4 +20191 ' Ken' 4 +20192 ' Ker' 4 +20193 ' Kes' 4 +20194 ' Ket' 4 +20195 ' Key' 4 +20196 ' Kid' 4 +20197 ' Kil' 4 +20198 ' Kim' 4 +20199 ' Kin' 4 +20200 ' Kir' 4 +20201 ' Kit' 4 +20202 ' Kle' 4 +20203 ' Kob' 4 +20204 ' Kod' 4 +20205 ' Koh' 4 +20206 ' Kok' 4 +20207 ' Kol' 4 +20208 ' Kom' 4 +20209 ' Kon' 4 +20210 ' Kop' 4 +20211 ' Kor' 4 +20212 ' Kos' 4 +20213 ' Kot' 4 +20214 ' Kou' 4 +20215 ' Kov' 4 +20216 ' Kra' 4 +20217 ' Kre' 4 +20218 ' Kro' 4 +20219 ' Kub' 4 +20220 ' Kul' 4 +20221 ' Kum' 4 +20222 ' Kun' 4 +20223 ' Kur' 4 +20224 ' Kut' 4 +20225 ' Kö' 4 +20226 ' Kü' 4 +20227 ' LAB' 4 +20228 ' LAN' 4 +20229 ' LAP' 4 +20230 ' LAS' 4 +20231 ' LAT' 4 +20232 ' LAW' 4 +20233 ' LCD' 4 +20234 ' LDL' 4 +20235 ' LED' 4 +20236 ' LEG' 4 +20237 ' LET' 4 +20238 ' LIB' 4 +20239 ' LIM' 4 +20240 ' LIN' 4 +20241 ' LLC' 4 +20242 ' LLP' 4 +20243 ' LOC' 4 +20244 ' LOG' 4 +20245 ' LOL' 4 +20246 ' LOS' 4 +20247 ' LOT' 4 +20248 ' LPS' 4 +20249 ' LSD' 4 +20250 ' LTE' 4 +20251 ' Lab' 4 +20252 ' Lac' 4 +20253 ' Lad' 4 +20254 ' Laf' 4 +20255 ' Lag' 4 +20256 ' Lah' 4 +20257 ' Lak' 4 +20258 ' Lal' 4 +20259 ' Lam' 4 +20260 ' Lan' 4 +20261 ' Lap' 4 +20262 ' Lar' 4 +20263 ' Las' 4 +20264 ' Lat' 4 +20265 ' Lau' 4 +20266 ' Lav' 4 +20267 ' Law' 4 +20268 ' Lay' 4 +20269 ' Laz' 4 +20270 ' Leb' 4 +20271 ' Lec' 4 +20272 ' Led' 4 +20273 ' Lee' 4 +20274 ' Leg' 4 +20275 ' Leh' 4 +20276 ' Lei' 4 +20277 ' Lem' 4 +20278 ' Len' 4 +20279 ' Leo' 4 +20280 ' Ler' 4 +20281 ' Les' 4 +20282 ' Let' 4 +20283 ' Lev' 4 +20284 ' Lew' 4 +20285 ' Lex' 4 +20286 ' Ley' 4 +20287 ' Lia' 4 +20288 ' Lib' 4 +20289 ' Lic' 4 +20290 ' Lid' 4 +20291 ' Lie' 4 +20292 ' Lif' 4 +20293 ' Lig' 4 +20294 ' Lik' 4 +20295 ' Lil' 4 +20296 ' Lim' 4 +20297 ' Lin' 4 +20298 ' Lip' 4 +20299 ' Lis' 4 +20300 ' Lit' 4 +20301 ' Liu' 4 +20302 ' Liv' 4 +20303 ' Liz' 4 +20304 ' Lob' 4 +20305 ' Loc' 4 +20306 ' Log' 4 +20307 ' Lok' 4 +20308 ' Lon' 4 +20309 ' Lor' 4 +20310 ' Los' 4 +20311 ' Lot' 4 +20312 ' Lou' 4 +20313 ' Lov' 4 +20314 ' Low' 4 +20315 ' Ltd' 4 +20316 ' Lua' 4 +20317 ' Lub' 4 +20318 ' Luc' 4 +20319 ' Lud' 4 +20320 ' Lug' 4 +20321 ' Luk' 4 +20322 ' Lum' 4 +20323 ' Lun' 4 +20324 ' Lup' 4 +20325 ' Lux' 4 +20326 ' Lyn' 4 +20327 ' Lys' 4 +20328 ' Lé' 4 +20329 ' Lö' 4 +20330 ' Lü' 4 +20331 ' MAC' 4 +20332 ' MAG' 4 +20333 ' MAL' 4 +20334 ' MAN' 4 +20335 ' MAP' 4 +20336 ' MAR' 4 +20337 ' MAS' 4 +20338 ' MAT' 4 +20339 ' MAX' 4 +20340 ' MAY' 4 +20341 ' MBA' 4 +20342 ' MED' 4 +20343 ' MEM' 4 +20344 ' MEN' 4 +20345 ' MEP' 4 +20346 ' MER' 4 +20347 ' MET' 4 +20348 ' MHz' 4 +20349 ' MIC' 4 +20350 ' MID' 4 +20351 ' MIL' 4 +20352 ' MIN' 4 +20353 ' MIS' 4 +20354 ' MIT' 4 +20355 ' MLB' 4 +20356 ' MLP' 4 +20357 ' MLS' 4 +20358 ' MMA' 4 +20359 ' MMP' 4 +20360 ' MOD' 4 +20361 ' MON' 4 +20362 ' MOR' 4 +20363 ' MOS' 4 +20364 ' MOT' 4 +20365 ' MPI' 4 +20366 ' MPs' 4 +20367 ' MRI' 4 +20368 ' MSC' 4 +20369 ' MSE' 4 +20370 ' MSG' 4 +20371 ' MSM' 4 +20372 ' MTV' 4 +20373 ' MUS' 4 +20374 ' MVC' 4 +20375 ' MVP' 4 +20376 ' Mac' 4 +20377 ' Mad' 4 +20378 ' Mae' 4 +20379 ' Mag' 4 +20380 ' Mah' 4 +20381 ' Mai' 4 +20382 ' Maj' 4 +20383 ' Mak' 4 +20384 ' Mal' 4 +20385 ' Mam' 4 +20386 ' Man' 4 +20387 ' Mao' 4 +20388 ' Map' 4 +20389 ' Mar' 4 +20390 ' Mas' 4 +20391 ' Mat' 4 +20392 ' Mau' 4 +20393 ' Max' 4 +20394 ' May' 4 +20395 ' Maz' 4 +20396 ' McC' 4 +20397 ' McD' 4 +20398 ' McG' 4 +20399 ' McK' 4 +20400 ' McL' 4 +20401 ' McN' 4 +20402 ' MeV' 4 +20403 ' Med' 4 +20404 ' Meg' 4 +20405 ' Meh' 4 +20406 ' Mei' 4 +20407 ' Mel' 4 +20408 ' Mem' 4 +20409 ' Men' 4 +20410 ' Mer' 4 +20411 ' Mes' 4 +20412 ' Met' 4 +20413 ' Mex' 4 +20414 ' Mey' 4 +20415 ' Mia' 4 +20416 ' Mic' 4 +20417 ' Mid' 4 +20418 ' Mig' 4 +20419 ' Mik' 4 +20420 ' Mil' 4 +20421 ' Mim' 4 +20422 ' Min' 4 +20423 ' Mir' 4 +20424 ' Mis' 4 +20425 ' Mit' 4 +20426 ' Mix' 4 +20427 ' Miy' 4 +20428 ' Miz' 4 +20429 ' Mob' 4 +20430 ' Mod' 4 +20431 ' Mog' 4 +20432 ' Moh' 4 +20433 ' Mol' 4 +20434 ' Mom' 4 +20435 ' Mon' 4 +20436 ' Mor' 4 +20437 ' Mos' 4 +20438 ' Mot' 4 +20439 ' Mou' 4 +20440 ' Mov' 4 +20441 ' Moy' 4 +20442 ' Moz' 4 +20443 ' Mrs' 4 +20444 ' Msg' 4 +20445 ' Mud' 4 +20446 ' Mug' 4 +20447 ' Muk' 4 +20448 ' Mul' 4 +20449 ' Mum' 4 +20450 ' Mun' 4 +20451 ' Mur' 4 +20452 ' Mus' 4 +20453 ' Mut' 4 +20454 ' Mys' 4 +20455 ' Mé' 4 +20456 ' Mö' 4 +20457 ' Mü' 4 +20458 ' NAD' 4 +20459 ' NAS' 4 +20460 ' NAT' 4 +20461 ' NBA' 4 +20462 ' NBC' 4 +20463 ' NEC' 4 +20464 ' NET' 4 +20465 ' NEW' 4 +20466 ' NFC' 4 +20467 ' NFL' 4 +20468 ' NGC' 4 +20469 ' NGO' 4 +20470 ' NHL' 4 +20471 ' NHS' 4 +20472 ' NIC' 4 +20473 ' NIH' 4 +20474 ' NON' 4 +20475 ' NOR' 4 +20476 ' NOT' 4 +20477 ' NOW' 4 +20478 ' NPC' 4 +20479 ' NPR' 4 +20480 ' NRA' 4 +20481 ' NSA' 4 +20482 ' NSW' 4 +20483 ' NUM' 4 +20484 ' NYC' 4 +20485 ' NaN' 4 +20486 ' Nab' 4 +20487 ' Nad' 4 +20488 ' Nag' 4 +20489 ' Nah' 4 +20490 ' Naj' 4 +20491 ' Nak' 4 +20492 ' Nam' 4 +20493 ' Nan' 4 +20494 ' Nap' 4 +20495 ' Nar' 4 +20496 ' Nas' 4 +20497 ' Nat' 4 +20498 ' Nav' 4 +20499 ' Naz' 4 +20500 ' Neb' 4 +20501 ' Nec' 4 +20502 ' Ned' 4 +20503 ' Neg' 4 +20504 ' Nel' 4 +20505 ' Nem' 4 +20506 ' Neo' 4 +20507 ' Nep' 4 +20508 ' Ner' 4 +20509 ' Net' 4 +20510 ' Neu' 4 +20511 ' Nev' 4 +20512 ' New' 4 +20513 ' Nex' 4 +20514 ' Nic' 4 +20515 ' Nie' 4 +20516 ' Nig' 4 +20517 ' Nik' 4 +20518 ' Nil' 4 +20519 ' Nim' 4 +20520 ' Nin' 4 +20521 ' Nit' 4 +20522 ' Nob' 4 +20523 ' Nom' 4 +20524 ' Non' 4 +20525 ' Nor' 4 +20526 ' Nos' 4 +20527 ' Not' 4 +20528 ' Nou' 4 +20529 ' Nov' 4 +20530 ' Now' 4 +20531 ' Nug' 4 +20532 ' Num' 4 +20533 ' Nun' 4 +20534 ' Nur' 4 +20535 ' Nut' 4 +20536 ' Nä' 4 +20537 ' Né' 4 +20538 ' OCD' 4 +20539 ' OCT' 4 +20540 ' OFF' 4 +20541 ' ONE' 4 +20542 ' OPT' 4 +20543 ' OUR' 4 +20544 ' OUT' 4 +20545 ' Oak' 4 +20546 ' Obj' 4 +20547 ' Obl' 4 +20548 ' Obs' 4 +20549 ' Occ' 4 +20550 ' Oct' 4 +20551 ' Odd' 4 +20552 ' Off' 4 +20553 ' Oil' 4 +20554 ' Old' 4 +20555 ' Ole' 4 +20556 ' One' 4 +20557 ' Ont' 4 +20558 ' Opp' 4 +20559 ' Ops' 4 +20560 ' Opt' 4 +20561 ' Orb' 4 +20562 ' Ord' 4 +20563 ' Ore' 4 +20564 ' Org' 4 +20565 ' Ori' 4 +20566 ' Orn' 4 +20567 ' Ort' 4 +20568 ' Osc' 4 +20569 ' Ost' 4 +20570 ' Ott' 4 +20571 ' Our' 4 +20572 ' Out' 4 +20573 ' Own' 4 +20574 ' PAC' 4 +20575 ' PAD' 4 +20576 ' PAL' 4 +20577 ' PAN' 4 +20578 ' PAR' 4 +20579 ' PAS' 4 +20580 ' PAT' 4 +20581 ' PAY' 4 +20582 ' PBS' 4 +20583 ' PCA' 4 +20584 ' PCB' 4 +20585 ' PCI' 4 +20586 ' PCR' 4 +20587 ' PCs' 4 +20588 ' PDB' 4 +20589 ' PDF' 4 +20590 ' PDO' 4 +20591 ' PDT' 4 +20592 ' PEM' 4 +20593 ' PER' 4 +20594 ' PET' 4 +20595 ' PHP' 4 +20596 ' PHY' 4 +20597 ' PID' 4 +20598 ' PIL' 4 +20599 ' PIN' 4 +20600 ' PLA' 4 +20601 ' PLC' 4 +20602 ' PLL' 4 +20603 ' PMC' 4 +20604 ' PNG' 4 +20605 ' POL' 4 +20606 ' POP' 4 +20607 ' POS' 4 +20608 ' PPP' 4 +20609 ' PRE' 4 +20610 ' PRI' 4 +20611 ' PRO' 4 +20612 ' PSA' 4 +20613 ' PSD' 4 +20614 ' PST' 4 +20615 ' PUR' 4 +20616 ' PUT' 4 +20617 ' PVC' 4 +20618 ' Pac' 4 +20619 ' Pad' 4 +20620 ' Pag' 4 +20621 ' Pak' 4 +20622 ' Pal' 4 +20623 ' Pam' 4 +20624 ' Pan' 4 +20625 ' Pap' 4 +20626 ' Par' 4 +20627 ' Pas' 4 +20628 ' Pat' 4 +20629 ' Pav' 4 +20630 ' Paw' 4 +20631 ' Pay' 4 +20632 ' Paz' 4 +20633 ' Pdf' 4 +20634 ' Pec' 4 +20635 ' Ped' 4 +20636 ' Peg' 4 +20637 ' Pel' 4 +20638 ' Pen' 4 +20639 ' Pep' 4 +20640 ' Per' 4 +20641 ' Pes' 4 +20642 ' Pet' 4 +20643 ' PhD' 4 +20644 ' Phi' 4 +20645 ' Pho' 4 +20646 ' Pic' 4 +20647 ' Pie' 4 +20648 ' Pig' 4 +20649 ' Pik' 4 +20650 ' Pil' 4 +20651 ' Pin' 4 +20652 ' Pip' 4 +20653 ' Pir' 4 +20654 ' Pis' 4 +20655 ' Pit' 4 +20656 ' Pix' 4 +20657 ' Ple' 4 +20658 ' Ply' 4 +20659 ' Pod' 4 +20660 ' Pok' 4 +20661 ' Pol' 4 +20662 ' Pom' 4 +20663 ' Pon' 4 +20664 ' Pop' 4 +20665 ' Por' 4 +20666 ' Pos' 4 +20667 ' Pot' 4 +20668 ' Pow' 4 +20669 ' Poz' 4 +20670 ' Pra' 4 +20671 ' Pre' 4 +20672 ' Pri' 4 +20673 ' Pro' 4 +20674 ' Psy' 4 +20675 ' Pub' 4 +20676 ' Pul' 4 +20677 ' Pun' 4 +20678 ' Pur' 4 +20679 ' Put' 4 +20680 ' Pé' 4 +20681 ' QUE' 4 +20682 ' Que' 4 +20683 ' Qui' 4 +20684 ' RAD' 4 +20685 ' RAF' 4 +20686 ' RAM' 4 +20687 ' RAW' 4 +20688 ' RBI' 4 +20689 ' REC' 4 +20690 ' RED' 4 +20691 ' REF' 4 +20692 ' REG' 4 +20693 ' REL' 4 +20694 ' REM' 4 +20695 ' RES' 4 +20696 ' RET' 4 +20697 ' RFC' 4 +20698 ' RGB' 4 +20699 ' RIP' 4 +20700 ' RMS' 4 +20701 ' RNA' 4 +20702 ' ROC' 4 +20703 ' ROI' 4 +20704 ' ROM' 4 +20705 ' ROS' 4 +20706 ' ROT' 4 +20707 ' RPC' 4 +20708 ' RPG' 4 +20709 ' RPM' 4 +20710 ' RSA' 4 +20711 ' RSS' 4 +20712 ' RUN' 4 +20713 ' Rab' 4 +20714 ' Rac' 4 +20715 ' Rad' 4 +20716 ' Raf' 4 +20717 ' Rag' 4 +20718 ' Rah' 4 +20719 ' Raj' 4 +20720 ' Rak' 4 +20721 ' Ram' 4 +20722 ' Ran' 4 +20723 ' Rao' 4 +20724 ' Rap' 4 +20725 ' Ras' 4 +20726 ' Rat' 4 +20727 ' Rav' 4 +20728 ' Raw' 4 +20729 ' Ray' 4 +20730 ' Raz' 4 +20731 ' Reb' 4 +20732 ' Rec' 4 +20733 ' Red' 4 +20734 ' Ref' 4 +20735 ' Reg' 4 +20736 ' Rei' 4 +20737 ' Rel' 4 +20738 ' Rem' 4 +20739 ' Ren' 4 +20740 ' Rep' 4 +20741 ' Res' 4 +20742 ' Ret' 4 +20743 ' Rev' 4 +20744 ' Rew' 4 +20745 ' Rex' 4 +20746 ' Rey' 4 +20747 ' Rhe' 4 +20748 ' Rib' 4 +20749 ' Ric' 4 +20750 ' Rid' 4 +20751 ' Rif' 4 +20752 ' Rig' 4 +20753 ' Rim' 4 +20754 ' Rin' 4 +20755 ' Rio' 4 +20756 ' Rip' 4 +20757 ' Ris' 4 +20758 ' Rit' 4 +20759 ' Riv' 4 +20760 ' Rob' 4 +20761 ' Roc' 4 +20762 ' Rod' 4 +20763 ' Rog' 4 +20764 ' Roh' 4 +20765 ' Rol' 4 +20766 ' Rom' 4 +20767 ' Ron' 4 +20768 ' Ros' 4 +20769 ' Rot' 4 +20770 ' Rou' 4 +20771 ' Row' 4 +20772 ' Rox' 4 +20773 ' Roy' 4 +20774 ' Rub' 4 +20775 ' Rud' 4 +20776 ' Rue' 4 +20777 ' Rug' 4 +20778 ' Rum' 4 +20779 ' Run' 4 +20780 ' Rus' 4 +20781 ' Rut' 4 +20782 ' Ré' 4 +20783 ' Rö' 4 +20784 ' SAF' 4 +20785 ' SAL' 4 +20786 ' SAM' 4 +20787 ' SAN' 4 +20788 ' SAP' 4 +20789 ' SAR' 4 +20790 ' SAS' 4 +20791 ' SAT' 4 +20792 ' SCH' 4 +20793 ' SCI' 4 +20794 ' SCM' 4 +20795 ' SCO' 4 +20796 ' SDK' 4 +20797 ' SDL' 4 +20798 ' SDS' 4 +20799 ' SEC' 4 +20800 ' SEE' 4 +20801 ' SEL' 4 +20802 ' SEM' 4 +20803 ' SEO' 4 +20804 ' SER' 4 +20805 ' SES' 4 +20806 ' SET' 4 +20807 ' SGD' 4 +20808 ' SHA' 4 +20809 ' SHE' 4 +20810 ' SHO' 4 +20811 ' SIG' 4 +20812 ' SIL' 4 +20813 ' SIM' 4 +20814 ' SIP' 4 +20815 ' SMB' 4 +20816 ' SMS' 4 +20817 ' SNP' 4 +20818 ' SOC' 4 +20819 ' SOL' 4 +20820 ' SOM' 4 +20821 ' SOS' 4 +20822 ' SPD' 4 +20823 ' SPE' 4 +20824 ' SPI' 4 +20825 ' SPR' 4 +20826 ' SQL' 4 +20827 ' SSD' 4 +20828 ' SSH' 4 +20829 ' SSL' 4 +20830 ' STD' 4 +20831 ' STE' 4 +20832 ' STR' 4 +20833 ' SUB' 4 +20834 ' SUM' 4 +20835 ' SUN' 4 +20836 ' SUP' 4 +20837 ' SUR' 4 +20838 ' SUS' 4 +20839 ' SUV' 4 +20840 ' SVG' 4 +20841 ' SVM' 4 +20842 ' Sab' 4 +20843 ' Sac' 4 +20844 ' Sad' 4 +20845 ' Saf' 4 +20846 ' Sag' 4 +20847 ' Sah' 4 +20848 ' Sai' 4 +20849 ' Sak' 4 +20850 ' Sal' 4 +20851 ' Sam' 4 +20852 ' San' 4 +20853 ' Sap' 4 +20854 ' Sar' 4 +20855 ' Sas' 4 +20856 ' Sat' 4 +20857 ' Sau' 4 +20858 ' Sav' 4 +20859 ' Saw' 4 +20860 ' Sax' 4 +20861 ' Say' 4 +20862 ' Sch' 4 +20863 ' Sci' 4 +20864 ' Sco' 4 +20865 ' Scr' 4 +20866 ' Sea' 4 +20867 ' Sec' 4 +20868 ' Sed' 4 +20869 ' See' 4 +20870 ' Seg' 4 +20871 ' Sek' 4 +20872 ' Sel' 4 +20873 ' Sem' 4 +20874 ' Sen' 4 +20875 ' Sep' 4 +20876 ' Seq' 4 +20877 ' Ser' 4 +20878 ' Ses' 4 +20879 ' Set' 4 +20880 ' Sew' 4 +20881 ' Sex' 4 +20882 ' Sey' 4 +20883 ' Sgt' 4 +20884 ' Sha' 4 +20885 ' She' 4 +20886 ' Shi' 4 +20887 ' Sho' 4 +20888 ' Sic' 4 +20889 ' Sid' 4 +20890 ' Sie' 4 +20891 ' Sig' 4 +20892 ' Sik' 4 +20893 ' Sil' 4 +20894 ' Sim' 4 +20895 ' Sin' 4 +20896 ' Sir' 4 +20897 ' Sit' 4 +20898 ' Six' 4 +20899 ' Ske' 4 +20900 ' Ski' 4 +20901 ' Sky' 4 +20902 ' Sob' 4 +20903 ' Soc' 4 +20904 ' Sof' 4 +20905 ' Sok' 4 +20906 ' Sol' 4 +20907 ' Som' 4 +20908 ' Son' 4 +20909 ' Sor' 4 +20910 ' Sou' 4 +20911 ' Sov' 4 +20912 ' Sox' 4 +20913 ' Soy' 4 +20914 ' Spa' 4 +20915 ' Spe' 4 +20916 ' Spl' 4 +20917 ' Spo' 4 +20918 ' Spr' 4 +20919 ' Spy' 4 +20920 ' Sql' 4 +20921 ' Squ' 4 +20922 ' Sri' 4 +20923 ' Sta' 4 +20924 ' Ste' 4 +20925 ' Sto' 4 +20926 ' Str' 4 +20927 ' Sty' 4 +20928 ' Sub' 4 +20929 ' Suc' 4 +20930 ' Sud' 4 +20931 ' Sue' 4 +20932 ' Sug' 4 +20933 ' Suk' 4 +20934 ' Sul' 4 +20935 ' Sum' 4 +20936 ' Sun' 4 +20937 ' Sup' 4 +20938 ' Sur' 4 +20939 ' Sus' 4 +20940 ' Suz' 4 +20941 ' Swe' 4 +20942 ' Syd' 4 +20943 ' Syl' 4 +20944 ' Sym' 4 +20945 ' Syn' 4 +20946 ' Sys' 4 +20947 ' Sé' 4 +20948 ' Sü' 4 +20949 ' TAB' 4 +20950 ' TAG' 4 +20951 ' TAM' 4 +20952 ' TCP' 4 +20953 ' TED' 4 +20954 ' TEM' 4 +20955 ' TER' 4 +20956 ' THE' 4 +20957 ' TIM' 4 +20958 ' TLS' 4 +20959 ' TOD' 4 +20960 ' TOP' 4 +20961 ' TPP' 4 +20962 ' TRA' 4 +20963 ' TRE' 4 +20964 ' TRI' 4 +20965 ' TWO' 4 +20966 ' Tab' 4 +20967 ' Tac' 4 +20968 ' Tag' 4 +20969 ' Tah' 4 +20970 ' Tai' 4 +20971 ' Taj' 4 +20972 ' Tak' 4 +20973 ' Tal' 4 +20974 ' Tam' 4 +20975 ' Tan' 4 +20976 ' Tao' 4 +20977 ' Tap' 4 +20978 ' Tar' 4 +20979 ' Tas' 4 +20980 ' Tat' 4 +20981 ' Tau' 4 +20982 ' Tax' 4 +20983 ' Tay' 4 +20984 ' Tea' 4 +20985 ' Tec' 4 +20986 ' Ted' 4 +20987 ' Teh' 4 +20988 ' Tek' 4 +20989 ' Tel' 4 +20990 ' Tem' 4 +20991 ' Ten' 4 +20992 ' Ter' 4 +20993 ' Tes' 4 +20994 ' Tet' 4 +20995 ' Tex' 4 +20996 ' The' 4 +20997 ' Thi' 4 +20998 ' Thr' 4 +20999 ' Thu' 4 +21000 ' Thy' 4 +21001 ' Tib' 4 +21002 ' Tie' 4 +21003 ' Tig' 4 +21004 ' Tik' 4 +21005 ' Til' 4 +21006 ' Tim' 4 +21007 ' Tin' 4 +21008 ' Tip' 4 +21009 ' Tir' 4 +21010 ' Tit' 4 +21011 ' Tob' 4 +21012 ' Tod' 4 +21013 ' Tok' 4 +21014 ' Tol' 4 +21015 ' Tom' 4 +21016 ' Ton' 4 +21017 ' Too' 4 +21018 ' Top' 4 +21019 ' Tor' 4 +21020 ' Tos' 4 +21021 ' Tot' 4 +21022 ' Tou' 4 +21023 ' Tow' 4 +21024 ' Toy' 4 +21025 ' Tra' 4 +21026 ' Tre' 4 +21027 ' Tri' 4 +21028 ' Tro' 4 +21029 ' Tru' 4 +21030 ' Try' 4 +21031 ' Tub' 4 +21032 ' Tuc' 4 +21033 ' Tud' 4 +21034 ' Tue' 4 +21035 ' Tul' 4 +21036 ' Tum' 4 +21037 ' Tun' 4 +21038 ' Tur' 4 +21039 ' Tus' 4 +21040 ' Tut' 4 +21041 ' Twe' 4 +21042 ' Two' 4 +21043 ' Typ' 4 +21044 ' Tyr' 4 +21045 ' UAE' 4 +21046 ' UDP' 4 +21047 ' UFC' 4 +21048 ' UFO' 4 +21049 ' UID' 4 +21050 ' UIT' 4 +21051 ' UPS' 4 +21052 ' URI' 4 +21053 ' URL' 4 +21054 ' USA' 4 +21055 ' USB' 4 +21056 ' USC' 4 +21057 ' USD' 4 +21058 ' USE' 4 +21059 ' USS' 4 +21060 ' UTC' 4 +21061 ' UTF' 4 +21062 ' Uhr' 4 +21063 ' Ult' 4 +21064 ' Una' 4 +21065 ' Und' 4 +21066 ' Une' 4 +21067 ' Ung' 4 +21068 ' Uni' 4 +21069 ' Uns' 4 +21070 ' Unt' 4 +21071 ' Urb' 4 +21072 ' Uri' 4 +21073 ' Url' 4 +21074 ' Urs' 4 +21075 ' Use' 4 +21076 ' Utt' 4 +21077 ' VAL' 4 +21078 ' VAR' 4 +21079 ' VAT' 4 +21080 ' VER' 4 +21081 ' VID' 4 +21082 ' VII' 4 +21083 ' VIP' 4 +21084 ' VIS' 4 +21085 ' VOC' 4 +21086 ' VOL' 4 +21087 ' VPN' 4 +21088 ' Vac' 4 +21089 ' Val' 4 +21090 ' Van' 4 +21091 ' Var' 4 +21092 ' Vas' 4 +21093 ' Vec' 4 +21094 ' Ved' 4 +21095 ' Veg' 4 +21096 ' Veh' 4 +21097 ' Vel' 4 +21098 ' Ven' 4 +21099 ' Ver' 4 +21100 ' Ves' 4 +21101 ' Via' 4 +21102 ' Vic' 4 +21103 ' Vid' 4 +21104 ' Vie' 4 +21105 ' Vig' 4 +21106 ' Vij' 4 +21107 ' Vik' 4 +21108 ' Vil' 4 +21109 ' Vim' 4 +21110 ' Vin' 4 +21111 ' Vir' 4 +21112 ' Vis' 4 +21113 ' Vit' 4 +21114 ' Viv' 4 +21115 ' Voc' 4 +21116 ' Vog' 4 +21117 ' Vol' 4 +21118 ' Von' 4 +21119 ' Vor' 4 +21120 ' Vox' 4 +21121 ' Voy' 4 +21122 ' Vue' 4 +21123 ' Vul' 4 +21124 ' Vé' 4 +21125 ' WAR' 4 +21126 ' WAS' 4 +21127 ' WAY' 4 +21128 ' WEB' 4 +21129 ' WHO' 4 +21130 ' WIN' 4 +21131 ' WIT' 4 +21132 ' WOR' 4 +21133 ' WWE' 4 +21134 ' Wah' 4 +21135 ' Wak' 4 +21136 ' Wal' 4 +21137 ' Wan' 4 +21138 ' War' 4 +21139 ' Was' 4 +21140 ' Wat' 4 +21141 ' Way' 4 +21142 ' Web' 4 +21143 ' Wed' 4 +21144 ' Wei' 4 +21145 ' Wel' 4 +21146 ' Wen' 4 +21147 ' Wer' 4 +21148 ' Wes' 4 +21149 ' Wet' 4 +21150 ' Whe' 4 +21151 ' Who' 4 +21152 ' Why' 4 +21153 ' Wid' 4 +21154 ' Wie' 4 +21155 ' Wii' 4 +21156 ' Wik' 4 +21157 ' Wil' 4 +21158 ' Win' 4 +21159 ' Wir' 4 +21160 ' Wis' 4 +21161 ' Wit' 4 +21162 ' Wol' 4 +21163 ' Won' 4 +21164 ' Woo' 4 +21165 ' Wor' 4 +21166 ' Wow' 4 +21167 ' Wyn' 4 +21168 ' XII' 4 +21169 ' XIV' 4 +21170 ' XML' 4 +21171 ' XOR' 4 +21172 ' XVI' 4 +21173 ' XXX' 4 +21174 ' Xen' 4 +21175 ' Xia' 4 +21176 ' Xin' 4 +21177 ' Xml' 4 +21178 ' YES' 4 +21179 ' YOU' 4 +21180 ' Yad' 4 +21181 ' Yak' 4 +21182 ' Yam' 4 +21183 ' Yan' 4 +21184 ' Yao' 4 +21185 ' Yas' 4 +21186 ' Yes' 4 +21187 ' Yet' 4 +21188 ' Yin' 4 +21189 ' You' 4 +21190 ' Yuk' 4 +21191 ' Yun' 4 +21192 ' Yus' 4 +21193 ' ZIP' 4 +21194 ' Zag' 4 +21195 ' Zah' 4 +21196 ' Zak' 4 +21197 ' Zam' 4 +21198 ' Zap' 4 +21199 ' Zar' 4 +21200 ' Zel' 4 +21201 ' Zen' 4 +21202 ' Zhu' 4 +21203 ' Zig' 4 +21204 ' Zip' 4 +21205 ' Zoe' 4 +21206 ' Zoo' 4 +21207 ' Zum' 4 +21208 ' Zur' 4 +21209 ' Zwe' 4 +21210 ' [],' 4 +21211 ' [];' 4 +21212 ' ___' 4 +21213 ' ``(' 4 +21214 ' ```' 4 +21215 ' aan' 4 +21216 ' abb' 4 +21217 ' abc' 4 +21218 ' abi' 4 +21219 ' abl' 4 +21220 ' abs' 4 +21221 ' aby' 4 +21222 ' acc' 4 +21223 ' ace' 4 +21224 ' ach' 4 +21225 ' acl' 4 +21226 ' act' 4 +21227 ' add' 4 +21228 ' ade' 4 +21229 ' adj' 4 +21230 ' adm' 4 +21231 ' ado' 4 +21232 ' ads' 4 +21233 ' adv' 4 +21234 ' aer' 4 +21235 ' aes' 4 +21236 ' aff' 4 +21237 ' aft' 4 +21238 ' age' 4 +21239 ' agg' 4 +21240 ' ago' 4 +21241 ' agr' 4 +21242 ' aid' 4 +21243 ' ail' 4 +21244 ' aim' 4 +21245 ' ain' 4 +21246 ' air' 4 +21247 ' aka' 4 +21248 ' akt' 4 +21249 ' alb' 4 +21250 ' alc' 4 +21251 ' ald' 4 +21252 ' ale' 4 +21253 ' alg' 4 +21254 ' ali' 4 +21255 ' alk' 4 +21256 ' all' 4 +21257 ' als' 4 +21258 ' alt' 4 +21259 ' ama' 4 +21260 ' amb' 4 +21261 ' ami' 4 +21262 ' amp' 4 +21263 ' ana' 4 +21264 ' anc' 4 +21265 ' and' 4 +21266 ' ang' 4 +21267 ' ank' 4 +21268 ' ann' 4 +21269 ' ano' 4 +21270 ' ans' 4 +21271 ' ant' 4 +21272 ' anx' 4 +21273 ' any' 4 +21274 ' aos' 4 +21275 ' aph' 4 +21276 ' api' 4 +21277 ' apo' 4 +21278 ' app' 4 +21279 ' apr' 4 +21280 ' apt' 4 +21281 ' aqu' 4 +21282 ' ara' 4 +21283 ' arb' 4 +21284 ' arc' 4 +21285 ' ard' 4 +21286 ' are' 4 +21287 ' arg' 4 +21288 ' ark' 4 +21289 ' arm' 4 +21290 ' arr' 4 +21291 ' art' 4 +21292 ' asc' 4 +21293 ' ash' 4 +21294 ' asi' 4 +21295 ' ask' 4 +21296 ' asm' 4 +21297 ' asp' 4 +21298 ' ass' 4 +21299 ' ast' 4 +21300 ' ate' 4 +21301 ' ath' 4 +21302 ' atm' 4 +21303 ' att' 4 +21304 ' auc' 4 +21305 ' aud' 4 +21306 ' auf' 4 +21307 ' aug' 4 +21308 ' aur' 4 +21309 ' aus' 4 +21310 ' aut' 4 +21311 ' aux' 4 +21312 ' ave' 4 +21313 ' avg' 4 +21314 ' avo' 4 +21315 ' awa' 4 +21316 ' awe' 4 +21317 ' awk' 4 +21318 ' aws' 4 +21319 ' axe' 4 +21320 ' aç' 4 +21321 ' añ' 4 +21322 ' až' 4 +21323 ' bab' 4 +21324 ' bac' 4 +21325 ' bad' 4 +21326 ' bag' 4 +21327 ' bak' 4 +21328 ' bal' 4 +21329 ' bam' 4 +21330 ' ban' 4 +21331 ' bar' 4 +21332 ' bas' 4 +21333 ' bat' 4 +21334 ' bay' 4 +21335 ' baz' 4 +21336 ' bec' 4 +21337 ' bed' 4 +21338 ' bee' 4 +21339 ' bef' 4 +21340 ' beg' 4 +21341 ' beh' 4 +21342 ' bei' 4 +21343 ' bek' 4 +21344 ' bel' 4 +21345 ' bem' 4 +21346 ' ben' 4 +21347 ' ber' 4 +21348 ' bes' 4 +21349 ' bet' 4 +21350 ' bew' 4 +21351 ' bey' 4 +21352 ' bez' 4 +21353 ' bib' 4 +21354 ' bic' 4 +21355 ' bid' 4 +21356 ' bif' 4 +21357 ' big' 4 +21358 ' bij' 4 +21359 ' bil' 4 +21360 ' bin' 4 +21361 ' bio' 4 +21362 ' bip' 4 +21363 ' bir' 4 +21364 ' bis' 4 +21365 ' bit' 4 +21366 ' biz' 4 +21367 ' bla' 4 +21368 ' ble' 4 +21369 ' blk' 4 +21370 ' blo' 4 +21371 ' bob' 4 +21372 ' bod' 4 +21373 ' bog' 4 +21374 ' bol' 4 +21375 ' bom' 4 +21376 ' bon' 4 +21377 ' boo' 4 +21378 ' bor' 4 +21379 ' bos' 4 +21380 ' bot' 4 +21381 ' bou' 4 +21382 ' bow' 4 +21383 ' box' 4 +21384 ' boy' 4 +21385 ' bra' 4 +21386 ' bre' 4 +21387 ' bri' 4 +21388 ' bro' 4 +21389 ' bru' 4 +21390 ' btn' 4 +21391 ' bub' 4 +21392 ' bud' 4 +21393 ' buf' 4 +21394 ' bug' 4 +21395 ' bul' 4 +21396 ' bum' 4 +21397 ' bun' 4 +21398 ' bur' 4 +21399 ' bus' 4 +21400 ' but' 4 +21401 ' buy' 4 +21402 ' bye' 4 +21403 ' bzw' 4 +21404 ' bä' 4 +21405 ' bé' 4 +21406 ' bý' 4 +21407 ' cab' 4 +21408 ' cac' 4 +21409 ' cad' 4 +21410 ' caf' 4 +21411 ' cal' 4 +21412 ' cam' 4 +21413 ' can' 4 +21414 ' cap' 4 +21415 ' car' 4 +21416 ' cas' 4 +21417 ' cat' 4 +21418 ' cav' 4 +21419 ' cel' 4 +21420 ' cen' 4 +21421 ' cep' 4 +21422 ' cer' 4 +21423 ' ces' 4 +21424 ' cet' 4 +21425 ' cfg' 4 +21426 ' cha' 4 +21427 ' che' 4 +21428 ' chi' 4 +21429 ' chk' 4 +21430 ' cho' 4 +21431 ' chr' 4 +21432 ' cic' 4 +21433 ' cid' 4 +21434 ' cig' 4 +21435 ' cin' 4 +21436 ' cir' 4 +21437 ' cis' 4 +21438 ' cit' 4 +21439 ' civ' 4 +21440 ' cla' 4 +21441 ' cle' 4 +21442 ' cli' 4 +21443 ' clo' 4 +21444 ' cls' 4 +21445 ' cmd' 4 +21446 ' cmp' 4 +21447 ' cnt' 4 +21448 ' cob' 4 +21449 ' coc' 4 +21450 ' cod' 4 +21451 ' cof' 4 +21452 ' cog' 4 +21453 ' coh' 4 +21454 ' col' 4 +21455 ' com' 4 +21456 ' con' 4 +21457 ' cop' 4 +21458 ' cor' 4 +21459 ' cos' 4 +21460 ' cot' 4 +21461 ' cou' 4 +21462 ' cov' 4 +21463 ' cow' 4 +21464 ' coy' 4 +21465 ' cpu' 4 +21466 ' cra' 4 +21467 ' cre' 4 +21468 ' cri' 4 +21469 ' cro' 4 +21470 ' cru' 4 +21471 ' cry' 4 +21472 ' css' 4 +21473 ' csv' 4 +21474 ' ctx' 4 +21475 ' cub' 4 +21476 ' cuc' 4 +21477 ' cue' 4 +21478 ' cui' 4 +21479 ' cul' 4 +21480 ' cum' 4 +21481 ' cup' 4 +21482 ' cur' 4 +21483 ' cus' 4 +21484 ' cut' 4 +21485 ' cyl' 4 +21486 ' cyn' 4 +21487 ' cyt' 4 +21488 ' cá' 4 +21489 ' câ' 4 +21490 ' cé' 4 +21491 ' cí' 4 +21492 ' có' 4 +21493 ' cô' 4 +21494 ' că' 4 +21495 ' cơ' 4 +21496 ' dab' 4 +21497 ' dad' 4 +21498 ' dag' 4 +21499 ' dah' 4 +21500 ' dai' 4 +21501 ' dal' 4 +21502 ' dam' 4 +21503 ' dan' 4 +21504 ' dao' 4 +21505 ' dar' 4 +21506 ' das' 4 +21507 ' dat' 4 +21508 ' dav' 4 +21509 ' day' 4 +21510 ' dbo' 4 +21511 ' deb' 4 +21512 ' dec' 4 +21513 ' ded' 4 +21514 ' dee' 4 +21515 ' def' 4 +21516 ' deg' 4 +21517 ' dei' 4 +21518 ' dej' 4 +21519 ' del' 4 +21520 ' dem' 4 +21521 ' den' 4 +21522 ' dep' 4 +21523 ' der' 4 +21524 ' des' 4 +21525 ' det' 4 +21526 ' dev' 4 +21527 ' dex' 4 +21528 ' dez' 4 +21529 ' dia' 4 +21530 ' dib' 4 +21531 ' dic' 4 +21532 ' did' 4 +21533 ' die' 4 +21534 ' dif' 4 +21535 ' dig' 4 +21536 ' dil' 4 +21537 ' dim' 4 +21538 ' din' 4 +21539 ' dio' 4 +21540 ' dip' 4 +21541 ' dir' 4 +21542 ' dis' 4 +21543 ' dit' 4 +21544 ' div' 4 +21545 ' diz' 4 +21546 ' dll' 4 +21547 ' dns' 4 +21548 ' dob' 4 +21549 ' doc' 4 +21550 ' dod' 4 +21551 ' dog' 4 +21552 ' doi' 4 +21553 ' dok' 4 +21554 ' dol' 4 +21555 ' dom' 4 +21556 ' don' 4 +21557 ' dop' 4 +21558 ' dor' 4 +21559 ' dos' 4 +21560 ' dot' 4 +21561 ' dou' 4 +21562 ' dow' 4 +21563 ' dpi' 4 +21564 ' dra' 4 +21565 ' dre' 4 +21566 ' dri' 4 +21567 ' dro' 4 +21568 ' dru' 4 +21569 ' dry' 4 +21570 ' dst' 4 +21571 ' dub' 4 +21572 ' due' 4 +21573 ' dug' 4 +21574 ' dum' 4 +21575 ' dun' 4 +21576 ' duo' 4 +21577 ' dup' 4 +21578 ' dur' 4 +21579 ' dus' 4 +21580 ' dut' 4 +21581 ' dye' 4 +21582 ' dyn' 4 +21583 ' dys' 4 +21584 ' dá' 4 +21585 ' då' 4 +21586 ' dé' 4 +21587 ' dí' 4 +21588 ' dó' 4 +21589 ' dü' 4 +21590 ' ear' 4 +21591 ' eas' 4 +21592 ' eat' 4 +21593 ' ecc' 4 +21594 ' ech' 4 +21595 ' ecl' 4 +21596 ' eco' 4 +21597 ' ect' 4 +21598 ' eds' 4 +21599 ' een' 4 +21600 ' eer' 4 +21601 ' eff' 4 +21602 ' egg' 4 +21603 ' ego' 4 +21604 ' egy' 4 +21605 ' eig' 4 +21606 ' ein' 4 +21607 ' ela' 4 +21608 ' ele' 4 +21609 ' elf' 4 +21610 ' ell' 4 +21611 ' els' 4 +21612 ' emb' 4 +21613 ' emo' 4 +21614 ' emp' 4 +21615 ' enc' 4 +21616 ' end' 4 +21617 ' enf' 4 +21618 ' eng' 4 +21619 ' enh' 4 +21620 ' ens' 4 +21621 ' ent' 4 +21622 ' env' 4 +21623 ' eos' 4 +21624 ' eps' 4 +21625 ' equ' 4 +21626 ' era' 4 +21627 ' ere' 4 +21628 ' erf' 4 +21629 ' erg' 4 +21630 ' ern' 4 +21631 ' err' 4 +21632 ' ers' 4 +21633 ' eru' 4 +21634 ' ery' 4 +21635 ' esa' 4 +21636 ' esc' 4 +21637 ' ese' 4 +21638 ' eso' 4 +21639 ' esp' 4 +21640 ' ess' 4 +21641 ' est' 4 +21642 ' eta' 4 +21643 ' etc' 4 +21644 ' eth' 4 +21645 ' ett' 4 +21646 ' eux' 4 +21647 ' eve' 4 +21648 ' evt' 4 +21649 ' exc' 4 +21650 ' exe' 4 +21651 ' exh' 4 +21652 ' exp' 4 +21653 ' ext' 4 +21654 ' eye' 4 +21655 ' fab' 4 +21656 ' fac' 4 +21657 ' fal' 4 +21658 ' fam' 4 +21659 ' fan' 4 +21660 ' far' 4 +21661 ' fas' 4 +21662 ' fat' 4 +21663 ' fav' 4 +21664 ' fax' 4 +21665 ' faz' 4 +21666 ' fed' 4 +21667 ' fee' 4 +21668 ' fel' 4 +21669 ' fem' 4 +21670 ' fen' 4 +21671 ' fer' 4 +21672 ' fet' 4 +21673 ' feu' 4 +21674 ' few' 4 +21675 ' fft' 4 +21676 ' fib' 4 +21677 ' fic' 4 +21678 ' fid' 4 +21679 ' fif' 4 +21680 ' fig' 4 +21681 ' fil' 4 +21682 ' fim' 4 +21683 ' fin' 4 +21684 ' fir' 4 +21685 ' fis' 4 +21686 ' fit' 4 +21687 ' fix' 4 +21688 ' fla' 4 +21689 ' fle' 4 +21690 ' flo' 4 +21691 ' flu' 4 +21692 ' fly' 4 +21693 ' fmt' 4 +21694 ' foc' 4 +21695 ' fog' 4 +21696 ' foi' 4 +21697 ' fol' 4 +21698 ' fon' 4 +21699 ' foo' 4 +21700 ' for' 4 +21701 ' fos' 4 +21702 ' fot' 4 +21703 ' fou' 4 +21704 ' fox' 4 +21705 ' fra' 4 +21706 ' fre' 4 +21707 ' fri' 4 +21708 ' frm' 4 +21709 ' fro' 4 +21710 ' fru' 4 +21711 ' fry' 4 +21712 ' ftp' 4 +21713 ' fue' 4 +21714 ' fug' 4 +21715 ' ful' 4 +21716 ' fun' 4 +21717 ' fur' 4 +21718 ' fus' 4 +21719 ' fut' 4 +21720 ' fá' 4 +21721 ' få' 4 +21722 ' fé' 4 +21723 ' fö' 4 +21724 ' fø' 4 +21725 ' fő' 4 +21726 ' gab' 4 +21727 ' gad' 4 +21728 ' gag' 4 +21729 ' gal' 4 +21730 ' gam' 4 +21731 ' gan' 4 +21732 ' gap' 4 +21733 ' gar' 4 +21734 ' gas' 4 +21735 ' gau' 4 +21736 ' gay' 4 +21737 ' gaz' 4 +21738 ' gcc' 4 +21739 ' gcd' 4 +21740 ' geb' 4 +21741 ' ged' 4 +21742 ' gef' 4 +21743 ' geg' 4 +21744 ' gek' 4 +21745 ' gel' 4 +21746 ' gem' 4 +21747 ' gen' 4 +21748 ' geo' 4 +21749 ' ger' 4 +21750 ' ges' 4 +21751 ' get' 4 +21752 ' gew' 4 +21753 ' gez' 4 +21754 ' gib' 4 +21755 ' gid' 4 +21756 ' gif' 4 +21757 ' gig' 4 +21758 ' gin' 4 +21759 ' gir' 4 +21760 ' git' 4 +21761 ' giv' 4 +21762 ' gle' 4 +21763 ' gli' 4 +21764 ' glo' 4 +21765 ' glu' 4 +21766 ' gly' 4 +21767 ' gob' 4 +21768 ' god' 4 +21769 ' gol' 4 +21770 ' gon' 4 +21771 ' gor' 4 +21772 ' got' 4 +21773 ' gou' 4 +21774 ' gpu' 4 +21775 ' gra' 4 +21776 ' gre' 4 +21777 ' gri' 4 +21778 ' gro' 4 +21779 ' gru' 4 +21780 ' gtk' 4 +21781 ' gui' 4 +21782 ' gul' 4 +21783 ' gum' 4 +21784 ' gun' 4 +21785 ' gut' 4 +21786 ' guy' 4 +21787 ' gym' 4 +21788 ' gå' 4 +21789 ' gé' 4 +21790 ' gö' 4 +21791 ' gü' 4 +21792 ' gł' 4 +21793 ' hab' 4 +21794 ' hac' 4 +21795 ' had' 4 +21796 ' hal' 4 +21797 ' ham' 4 +21798 ' han' 4 +21799 ' har' 4 +21800 ' has' 4 +21801 ' hat' 4 +21802 ' hav' 4 +21803 ' haw' 4 +21804 ' hay' 4 +21805 ' haz' 4 +21806 ' hed' 4 +21807 ' hel' 4 +21808 ' hem' 4 +21809 ' hen' 4 +21810 ' hep' 4 +21811 ' her' 4 +21812 ' hes' 4 +21813 ' het' 4 +21814 ' hex' 4 +21815 ' hey' 4 +21816 ' hid' 4 +21817 ' hig' 4 +21818 ' hij' 4 +21819 ' hil' 4 +21820 ' him' 4 +21821 ' hin' 4 +21822 ' hip' 4 +21823 ' his' 4 +21824 ' hit' 4 +21825 ' hob' 4 +21826 ' hoc' 4 +21827 ' hog' 4 +21828 ' hol' 4 +21829 ' hom' 4 +21830 ' hon' 4 +21831 ' hop' 4 +21832 ' hor' 4 +21833 ' hos' 4 +21834 ' hot' 4 +21835 ' how' 4 +21836 ' hrs' 4 +21837 ' htt' 4 +21838 ' hub' 4 +21839 ' hue' 4 +21840 ' hug' 4 +21841 ' huh' 4 +21842 ' hum' 4 +21843 ' hun' 4 +21844 ' hur' 4 +21845 ' hus' 4 +21846 ' hut' 4 +21847 ' hyd' 4 +21848 ' hym' 4 +21849 ' hyp' 4 +21850 ' há' 4 +21851 ' hä' 4 +21852 ' hå' 4 +21853 ' hé' 4 +21854 ' hö' 4 +21855 ' iOS' 4 +21856 ' ice' 4 +21857 ' ich' 4 +21858 ' ici' 4 +21859 ' icy' 4 +21860 ' ide' 4 +21861 ' idi' 4 +21862 ' ids' 4 +21863 ' idx' 4 +21864 ' iff' 4 +21865 ' ign' 4 +21866 ' ihm' 4 +21867 ' ihn' 4 +21868 ' ihr' 4 +21869 ' iii' 4 +21870 ' ile' 4 +21871 ' ili' 4 +21872 ' ill' 4 +21873 ' ils' 4 +21874 ' imb' 4 +21875 ' img' 4 +21876 ' imm' 4 +21877 ' imp' 4 +21878 ' inc' 4 +21879 ' ind' 4 +21880 ' inf' 4 +21881 ' ing' 4 +21882 ' inh' 4 +21883 ' ini' 4 +21884 ' inj' 4 +21885 ' ink' 4 +21886 ' inn' 4 +21887 ' ins' 4 +21888 ' int' 4 +21889 ' inv' 4 +21890 ' iod' 4 +21891 ' ion' 4 +21892 ' ios' 4 +21893 ' ips' 4 +21894 ' ire' 4 +21895 ' irr' 4 +21896 ' isn' 4 +21897 ' iso' 4 +21898 ' iss' 4 +21899 ' ist' 4 +21900 ' ith' 4 +21901 ' itr' 4 +21902 ' its' 4 +21903 ' iç' 4 +21904 ' iş' 4 +21905 ' jab' 4 +21906 ' jac' 4 +21907 ' jag' 4 +21908 ' jak' 4 +21909 ' jal' 4 +21910 ' jam' 4 +21911 ' jan' 4 +21912 ' jap' 4 +21913 ' jar' 4 +21914 ' jas' 4 +21915 ' jav' 4 +21916 ' jaw' 4 +21917 ' jed' 4 +21918 ' jej' 4 +21919 ' jel' 4 +21920 ' jer' 4 +21921 ' jet' 4 +21922 ' jeu' 4 +21923 ' jew' 4 +21924 ' job' 4 +21925 ' jog' 4 +21926 ' jou' 4 +21927 ' joy' 4 +21928 ' jud' 4 +21929 ' jug' 4 +21930 ' jul' 4 +21931 ' jun' 4 +21932 ' jur' 4 +21933 ' jus' 4 +21934 ' já' 4 +21935 ' jä' 4 +21936 ' jó' 4 +21937 ' jú' 4 +21938 ' kHz' 4 +21939 ' kad' 4 +21940 ' kal' 4 +21941 ' kam' 4 +21942 ' kan' 4 +21943 ' kap' 4 +21944 ' kar' 4 +21945 ' kas' 4 +21946 ' kat' 4 +21947 ' kay' 4 +21948 ' kde' 4 +21949 ' kel' 4 +21950 ' ker' 4 +21951 ' ket' 4 +21952 ' key' 4 +21953 ' kid' 4 +21954 ' kil' 4 +21955 ' kin' 4 +21956 ' kir' 4 +21957 ' kit' 4 +21958 ' kle' 4 +21959 ' kne' 4 +21960 ' kol' 4 +21961 ' kom' 4 +21962 ' kon' 4 +21963 ' kop' 4 +21964 ' kor' 4 +21965 ' kos' 4 +21966 ' kot' 4 +21967 ' kre' 4 +21968 ' kun' 4 +21969 ' kur' 4 +21970 ' kä' 4 +21971 ' ké' 4 +21972 ' kö' 4 +21973 ' kø' 4 +21974 ' kü' 4 +21975 ' kā' 4 +21976 ' lab' 4 +21977 ' lac' 4 +21978 ' lad' 4 +21979 ' lag' 4 +21980 ' lak' 4 +21981 ' lam' 4 +21982 ' lan' 4 +21983 ' lap' 4 +21984 ' lar' 4 +21985 ' las' 4 +21986 ' lat' 4 +21987 ' lav' 4 +21988 ' law' 4 +21989 ' lax' 4 +21990 ' lay' 4 +21991 ' laz' 4 +21992 ' lbl' 4 +21993 ' lbs' 4 +21994 ' led' 4 +21995 ' leg' 4 +21996 ' lei' 4 +21997 ' lem' 4 +21998 ' len' 4 +21999 ' ler' 4 +22000 ' les' 4 +22001 ' let' 4 +22002 ' lev' 4 +22003 ' lex' 4 +22004 ' lhs' 4 +22005 ' lia' 4 +22006 ' lib' 4 +22007 ' lic' 4 +22008 ' lid' 4 +22009 ' lie' 4 +22010 ' lif' 4 +22011 ' lig' 4 +22012 ' lik' 4 +22013 ' lil' 4 +22014 ' lim' 4 +22015 ' lin' 4 +22016 ' lip' 4 +22017 ' lis' 4 +22018 ' lit' 4 +22019 ' liv' 4 +22020 ' lle' 4 +22021 ' lng' 4 +22022 ' lob' 4 +22023 ' loc' 4 +22024 ' lod' 4 +22025 ' log' 4 +22026 ' loi' 4 +22027 ' lok' 4 +22028 ' lol' 4 +22029 ' lon' 4 +22030 ' los' 4 +22031 ' lot' 4 +22032 ' lou' 4 +22033 ' lov' 4 +22034 ' low' 4 +22035 ' lst' 4 +22036 ' lua' 4 +22037 ' lub' 4 +22038 ' luc' 4 +22039 ' lud' 4 +22040 ' lug' 4 +22041 ' lui' 4 +22042 ' lum' 4 +22043 ' lun' 4 +22044 ' lup' 4 +22045 ' lur' 4 +22046 ' lut' 4 +22047 ' lux' 4 +22048 ' lyr' 4 +22049 ' lys' 4 +22050 ' là' 4 +22051 ' lá' 4 +22052 ' lä' 4 +22053 ' lå' 4 +22054 ' læ' 4 +22055 ' lé' 4 +22056 ' lí' 4 +22057 ' lö' 4 +22058 ' lø' 4 +22059 ' mac' 4 +22060 ' mad' 4 +22061 ' mag' 4 +22062 ' mah' 4 +22063 ' mai' 4 +22064 ' maj' 4 +22065 ' mak' 4 +22066 ' mal' 4 +22067 ' mam' 4 +22068 ' man' 4 +22069 ' map' 4 +22070 ' mar' 4 +22071 ' mas' 4 +22072 ' mat' 4 +22073 ' max' 4 +22074 ' may' 4 +22075 ' mec' 4 +22076 ' med' 4 +22077 ' meg' 4 +22078 ' mel' 4 +22079 ' mem' 4 +22080 ' men' 4 +22081 ' mer' 4 +22082 ' mes' 4 +22083 ' met' 4 +22084 ' meu' 4 +22085 ' mex' 4 +22086 ' mez' 4 +22087 ' miR' 4 +22088 ' mic' 4 +22089 ' mid' 4 +22090 ' mie' 4 +22091 ' mig' 4 +22092 ' mij' 4 +22093 ' mil' 4 +22094 ' mim' 4 +22095 ' min' 4 +22096 ' mir' 4 +22097 ' mis' 4 +22098 ' mit' 4 +22099 ' mix' 4 +22100 ' mob' 4 +22101 ' mod' 4 +22102 ' mog' 4 +22103 ' moi' 4 +22104 ' mol' 4 +22105 ' mom' 4 +22106 ' mon' 4 +22107 ' mor' 4 +22108 ' mos' 4 +22109 ' mot' 4 +22110 ' mou' 4 +22111 ' mov' 4 +22112 ' moy' 4 +22113 ' mph' 4 +22114 ' msg' 4 +22115 ' muc' 4 +22116 ' mud' 4 +22117 ' mug' 4 +22118 ' mul' 4 +22119 ' mum' 4 +22120 ' mun' 4 +22121 ' mur' 4 +22122 ' mus' 4 +22123 ' mut' 4 +22124 ' muy' 4 +22125 ' mys' 4 +22126 ' mà' 4 +22127 ' má' 4 +22128 ' mã' 4 +22129 ' må' 4 +22130 ' mé' 4 +22131 ' mí' 4 +22132 ' mó' 4 +22133 ' mô' 4 +22134 ' mö' 4 +22135 ' mø' 4 +22136 ' mú' 4 +22137 ' mü' 4 +22138 ' mě' 4 +22139 ' mű' 4 +22140 ' nab' 4 +22141 ' nad' 4 +22142 ' nag' 4 +22143 ' nah' 4 +22144 ' naj' 4 +22145 ' nak' 4 +22146 ' nal' 4 +22147 ' nam' 4 +22148 ' nan' 4 +22149 ' nap' 4 +22150 ' nar' 4 +22151 ' nas' 4 +22152 ' nat' 4 +22153 ' nau' 4 +22154 ' nav' 4 +22155 ' naz' 4 +22156 ' neb' 4 +22157 ' nec' 4 +22158 ' ned' 4 +22159 ' neg' 4 +22160 ' nel' 4 +22161 ' nem' 4 +22162 ' nen' 4 +22163 ' neo' 4 +22164 ' nep' 4 +22165 ' ner' 4 +22166 ' net' 4 +22167 ' neu' 4 +22168 ' new' 4 +22169 ' nex' 4 +22170 ' nib' 4 +22171 ' nic' 4 +22172 ' nid' 4 +22173 ' nie' 4 +22174 ' nig' 4 +22175 ' nil' 4 +22176 ' nim' 4 +22177 ' nin' 4 +22178 ' nit' 4 +22179 ' nob' 4 +22180 ' noc' 4 +22181 ' nod' 4 +22182 ' nog' 4 +22183 ' nom' 4 +22184 ' non' 4 +22185 ' nor' 4 +22186 ' nos' 4 +22187 ' not' 4 +22188 ' nou' 4 +22189 ' nov' 4 +22190 ' now' 4 +22191 ' npc' 4 +22192 ' npm' 4 +22193 ' nth' 4 +22194 ' nud' 4 +22195 ' nue' 4 +22196 ' num' 4 +22197 ' nun' 4 +22198 ' nur' 4 +22199 ' nut' 4 +22200 ' ná' 4 +22201 ' nä' 4 +22202 ' nå' 4 +22203 ' né' 4 +22204 ' në' 4 +22205 ' nó' 4 +22206 ' nú' 4 +22207 ' ně' 4 +22208 ' oak' 4 +22209 ' obj' 4 +22210 ' obl' 4 +22211 ' obs' 4 +22212 ' obt' 4 +22213 ' occ' 4 +22214 ' och' 4 +22215 ' oct' 4 +22216 ' odd' 4 +22217 ' ode' 4 +22218 ' off' 4 +22219 ' oft' 4 +22220 ' oil' 4 +22221 ' old' 4 +22222 ' ole' 4 +22223 ' oli' 4 +22224 ' omn' 4 +22225 ' onc' 4 +22226 ' one' 4 +22227 ' ont' 4 +22228 ' ook' 4 +22229 ' opp' 4 +22230 ' ops' 4 +22231 ' opt' 4 +22232 ' ora' 4 +22233 ' orb' 4 +22234 ' ord' 4 +22235 ' ore' 4 +22236 ' org' 4 +22237 ' ori' 4 +22238 ' orn' 4 +22239 ' oro' 4 +22240 ' ort' 4 +22241 ' osc' 4 +22242 ' osm' 4 +22243 ' osp' 4 +22244 ' oss' 4 +22245 ' ost' 4 +22246 ' ott' 4 +22247 ' oun' 4 +22248 ' our' 4 +22249 ' out' 4 +22250 ' owe' 4 +22251 ' own' 4 +22252 ' oxy' 4 +22253 ' où' 4 +22254 ' pac' 4 +22255 ' pad' 4 +22256 ' pag' 4 +22257 ' pai' 4 +22258 ' pak' 4 +22259 ' pal' 4 +22260 ' pam' 4 +22261 ' pan' 4 +22262 ' pap' 4 +22263 ' par' 4 +22264 ' pas' 4 +22265 ' pat' 4 +22266 ' pau' 4 +22267 ' pav' 4 +22268 ' paw' 4 +22269 ' pay' 4 +22270 ' pdf' 4 +22271 ' pec' 4 +22272 ' ped' 4 +22273 ' peg' 4 +22274 ' pel' 4 +22275 ' pem' 4 +22276 ' pen' 4 +22277 ' pep' 4 +22278 ' per' 4 +22279 ' pes' 4 +22280 ' pet' 4 +22281 ' peu' 4 +22282 ' phi' 4 +22283 ' php' 4 +22284 ' phr' 4 +22285 ' phy' 4 +22286 ' pic' 4 +22287 ' pid' 4 +22288 ' pie' 4 +22289 ' pig' 4 +22290 ' pil' 4 +22291 ' pin' 4 +22292 ' pip' 4 +22293 ' pir' 4 +22294 ' pis' 4 +22295 ' pit' 4 +22296 ' piv' 4 +22297 ' pix' 4 +22298 ' pkg' 4 +22299 ' pla' 4 +22300 ' ple' 4 +22301 ' plt' 4 +22302 ' ply' 4 +22303 ' png' 4 +22304 ' pob' 4 +22305 ' poc' 4 +22306 ' pod' 4 +22307 ' pog' 4 +22308 ' poi' 4 +22309 ' pok' 4 +22310 ' pol' 4 +22311 ' pom' 4 +22312 ' pon' 4 +22313 ' pop' 4 +22314 ' por' 4 +22315 ' pos' 4 +22316 ' pot' 4 +22317 ' pou' 4 +22318 ' pov' 4 +22319 ' pow' 4 +22320 ' poz' 4 +22321 ' ppm' 4 +22322 ' pra' 4 +22323 ' pre' 4 +22324 ' pri' 4 +22325 ' pro' 4 +22326 ' prz' 4 +22327 ' pse' 4 +22328 ' psi' 4 +22329 ' psy' 4 +22330 ' ptr' 4 +22331 ' pts' 4 +22332 ' pub' 4 +22333 ' pud' 4 +22334 ' pul' 4 +22335 ' pun' 4 +22336 ' pup' 4 +22337 ' pur' 4 +22338 ' pus' 4 +22339 ' put' 4 +22340 ' pyg' 4 +22341 ' pyl' 4 +22342 ' pá' 4 +22343 ' pä' 4 +22344 ' på' 4 +22345 ' pé' 4 +22346 ' pó' 4 +22347 ' pł' 4 +22348 ' př' 4 +22349 ' pů' 4 +22350 ' que' 4 +22351 ' qui' 4 +22352 ' quo' 4 +22353 ' rab' 4 +22354 ' rac' 4 +22355 ' rad' 4 +22356 ' rag' 4 +22357 ' ram' 4 +22358 ' ran' 4 +22359 ' rap' 4 +22360 ' ras' 4 +22361 ' rat' 4 +22362 ' rav' 4 +22363 ' raw' 4 +22364 ' ray' 4 +22365 ' raz' 4 +22366 ' reb' 4 +22367 ' rec' 4 +22368 ' red' 4 +22369 ' ref' 4 +22370 ' reg' 4 +22371 ' rel' 4 +22372 ' rem' 4 +22373 ' ren' 4 +22374 ' rep' 4 +22375 ' req' 4 +22376 ' rer' 4 +22377 ' res' 4 +22378 ' ret' 4 +22379 ' rev' 4 +22380 ' rez' 4 +22381 ' rgb' 4 +22382 ' rhe' 4 +22383 ' rho' 4 +22384 ' rhs' 4 +22385 ' rib' 4 +22386 ' ric' 4 +22387 ' rid' 4 +22388 ' rif' 4 +22389 ' rig' 4 +22390 ' rim' 4 +22391 ' rin' 4 +22392 ' rip' 4 +22393 ' ris' 4 +22394 ' rit' 4 +22395 ' riv' 4 +22396 ' rms' 4 +22397 ' rng' 4 +22398 ' rob' 4 +22399 ' roc' 4 +22400 ' rod' 4 +22401 ' roi' 4 +22402 ' rol' 4 +22403 ' rom' 4 +22404 ' ros' 4 +22405 ' rot' 4 +22406 ' rou' 4 +22407 ' row' 4 +22408 ' roy' 4 +22409 ' roz' 4 +22410 ' rpm' 4 +22411 ' rst' 4 +22412 ' rub' 4 +22413 ' rud' 4 +22414 ' rue' 4 +22415 ' rug' 4 +22416 ' rul' 4 +22417 ' rum' 4 +22418 ' run' 4 +22419 ' rus' 4 +22420 ' rut' 4 +22421 ' rá' 4 +22422 ' rå' 4 +22423 ' rè' 4 +22424 ' ré' 4 +22425 ' rê' 4 +22426 ' sab' 4 +22427 ' sac' 4 +22428 ' sad' 4 +22429 ' saf' 4 +22430 ' sag' 4 +22431 ' sal' 4 +22432 ' sam' 4 +22433 ' san' 4 +22434 ' sap' 4 +22435 ' sar' 4 +22436 ' sat' 4 +22437 ' sau' 4 +22438 ' sav' 4 +22439 ' saw' 4 +22440 ' sax' 4 +22441 ' say' 4 +22442 ' sch' 4 +22443 ' sci' 4 +22444 ' sco' 4 +22445 ' scr' 4 +22446 ' sea' 4 +22447 ' sec' 4 +22448 ' sed' 4 +22449 ' see' 4 +22450 ' seg' 4 +22451 ' sei' 4 +22452 ' sel' 4 +22453 ' sem' 4 +22454 ' sen' 4 +22455 ' sep' 4 +22456 ' seq' 4 +22457 ' ser' 4 +22458 ' ses' 4 +22459 ' set' 4 +22460 ' seu' 4 +22461 ' sew' 4 +22462 ' sex' 4 +22463 ' sha' 4 +22464 ' she' 4 +22465 ' sho' 4 +22466 ' shr' 4 +22467 ' shy' 4 +22468 ' sia' 4 +22469 ' sib' 4 +22470 ' sic' 4 +22471 ' sid' 4 +22472 ' sie' 4 +22473 ' sig' 4 +22474 ' sil' 4 +22475 ' sim' 4 +22476 ' sin' 4 +22477 ' sip' 4 +22478 ' sir' 4 +22479 ' sis' 4 +22480 ' sit' 4 +22481 ' six' 4 +22482 ' ske' 4 +22483 ' ski' 4 +22484 ' sku' 4 +22485 ' sky' 4 +22486 ' sla' 4 +22487 ' sle' 4 +22488 ' sme' 4 +22489 ' smo' 4 +22490 ' sms' 4 +22491 ' snd' 4 +22492 ' sne' 4 +22493 ' sob' 4 +22494 ' soc' 4 +22495 ' sod' 4 +22496 ' sog' 4 +22497 ' sol' 4 +22498 ' som' 4 +22499 ' son' 4 +22500 ' sop' 4 +22501 ' sor' 4 +22502 ' sou' 4 +22503 ' sow' 4 +22504 ' soy' 4 +22505 ' spa' 4 +22506 ' spe' 4 +22507 ' sph' 4 +22508 ' spl' 4 +22509 ' spo' 4 +22510 ' spp' 4 +22511 ' spr' 4 +22512 ' spy' 4 +22513 ' sql' 4 +22514 ' squ' 4 +22515 ' src' 4 +22516 ' ssh' 4 +22517 ' ssl' 4 +22518 ' sta' 4 +22519 ' std' 4 +22520 ' ste' 4 +22521 ' sto' 4 +22522 ' str' 4 +22523 ' sty' 4 +22524 ' sua' 4 +22525 ' sub' 4 +22526 ' suc' 4 +22527 ' sud' 4 +22528 ' sue' 4 +22529 ' suf' 4 +22530 ' sug' 4 +22531 ' sul' 4 +22532 ' sum' 4 +22533 ' sun' 4 +22534 ' suo' 4 +22535 ' sup' 4 +22536 ' sur' 4 +22537 ' sus' 4 +22538 ' sut' 4 +22539 ' svg' 4 +22540 ' swe' 4 +22541 ' swo' 4 +22542 ' sym' 4 +22543 ' syn' 4 +22544 ' sys' 4 +22545 ' sä' 4 +22546 ' så' 4 +22547 ' sæ' 4 +22548 ' sé' 4 +22549 ' sí' 4 +22550 ' só' 4 +22551 ' sø' 4 +22552 ' sú' 4 +22553 ' sû' 4 +22554 ' sü' 4 +22555 ' să' 4 +22556 ' są' 4 +22557 ' sł' 4 +22558 ' tab' 4 +22559 ' tac' 4 +22560 ' tad' 4 +22561 ' tag' 4 +22562 ' tai' 4 +22563 ' tak' 4 +22564 ' tal' 4 +22565 ' tam' 4 +22566 ' tan' 4 +22567 ' tap' 4 +22568 ' tar' 4 +22569 ' tas' 4 +22570 ' tat' 4 +22571 ' tau' 4 +22572 ' tax' 4 +22573 ' tbl' 4 +22574 ' tcp' 4 +22575 ' tea' 4 +22576 ' tec' 4 +22577 ' ted' 4 +22578 ' tee' 4 +22579 ' tej' 4 +22580 ' tek' 4 +22581 ' tel' 4 +22582 ' tem' 4 +22583 ' ten' 4 +22584 ' ter' 4 +22585 ' tes' 4 +22586 ' tet' 4 +22587 ' tex' 4 +22588 ' tgt' 4 +22589 ' tha' 4 +22590 ' the' 4 +22591 ' thi' 4 +22592 ' tho' 4 +22593 ' thr' 4 +22594 ' thy' 4 +22595 ' tib' 4 +22596 ' tic' 4 +22597 ' tid' 4 +22598 ' tie' 4 +22599 ' til' 4 +22600 ' tim' 4 +22601 ' tin' 4 +22602 ' tip' 4 +22603 ' tir' 4 +22604 ' tit' 4 +22605 ' tmp' 4 +22606 ' tob' 4 +22607 ' toc' 4 +22608 ' tod' 4 +22609 ' toe' 4 +22610 ' tok' 4 +22611 ' tol' 4 +22612 ' tom' 4 +22613 ' ton' 4 +22614 ' too' 4 +22615 ' top' 4 +22616 ' tor' 4 +22617 ' tot' 4 +22618 ' tou' 4 +22619 ' tow' 4 +22620 ' tox' 4 +22621 ' toy' 4 +22622 ' tra' 4 +22623 ' tre' 4 +22624 ' tri' 4 +22625 ' tro' 4 +22626 ' try' 4 +22627 ' tsp' 4 +22628 ' tub' 4 +22629 ' tud' 4 +22630 ' tug' 4 +22631 ' tul' 4 +22632 ' tum' 4 +22633 ' tun' 4 +22634 ' tup' 4 +22635 ' tur' 4 +22636 ' tut' 4 +22637 ' twe' 4 +22638 ' two' 4 +22639 ' txt' 4 +22640 ' typ' 4 +22641 ' tyr' 4 +22642 ' tá' 4 +22643 ' tä' 4 +22644 ' té' 4 +22645 ' të' 4 +22646 ' tí' 4 +22647 ' tö' 4 +22648 ' tú' 4 +22649 ' uid' 4 +22650 ' uit' 4 +22651 ' ult' 4 +22652 ' uma' 4 +22653 ' umb' 4 +22654 ' una' 4 +22655 ' unb' 4 +22656 ' unc' 4 +22657 ' und' 4 +22658 ' une' 4 +22659 ' unf' 4 +22660 ' ung' 4 +22661 ' unh' 4 +22662 ' uni' 4 +22663 ' unl' 4 +22664 ' unm' 4 +22665 ' uno' 4 +22666 ' uns' 4 +22667 ' unt' 4 +22668 ' unw' 4 +22669 ' upd' 4 +22670 ' upl' 4 +22671 ' upp' 4 +22672 ' ups' 4 +22673 ' upt' 4 +22674 ' urb' 4 +22675 ' ure' 4 +22676 ' urg' 4 +22677 ' uri' 4 +22678 ' url' 4 +22679 ' urn' 4 +22680 ' usb' 4 +22681 ' use' 4 +22682 ' uso' 4 +22683 ' usu' 4 +22684 ' utf' 4 +22685 ' vac' 4 +22686 ' vad' 4 +22687 ' vag' 4 +22688 ' vai' 4 +22689 ' val' 4 +22690 ' van' 4 +22691 ' vap' 4 +22692 ' var' 4 +22693 ' vas' 4 +22694 ' vec' 4 +22695 ' ved' 4 +22696 ' veg' 4 +22697 ' veh' 4 +22698 ' vel' 4 +22699 ' ven' 4 +22700 ' ver' 4 +22701 ' ves' 4 +22702 ' vet' 4 +22703 ' vex' 4 +22704 ' vez' 4 +22705 ' via' 4 +22706 ' vib' 4 +22707 ' vic' 4 +22708 ' vid' 4 +22709 ' vie' 4 +22710 ' vig' 4 +22711 ' vil' 4 +22712 ' vim' 4 +22713 ' vin' 4 +22714 ' vip' 4 +22715 ' vir' 4 +22716 ' vis' 4 +22717 ' vit' 4 +22718 ' viv' 4 +22719 ' viz' 4 +22720 ' voc' 4 +22721 ' vol' 4 +22722 ' vom' 4 +22723 ' von' 4 +22724 ' vor' 4 +22725 ' vos' 4 +22726 ' vot' 4 +22727 ' vou' 4 +22728 ' vow' 4 +22729 ' vox' 4 +22730 ' voy' 4 +22731 ' voz' 4 +22732 ' vra' 4 +22733 ' vue' 4 +22734 ' vul' 4 +22735 ' và' 4 +22736 ' vá' 4 +22737 ' vä' 4 +22738 ' vå' 4 +22739 ' væ' 4 +22740 ' vé' 4 +22741 ' ví' 4 +22742 ' võ' 4 +22743 ' vý' 4 +22744 ' vě' 4 +22745 ' vš' 4 +22746 ' wal' 4 +22747 ' war' 4 +22748 ' was' 4 +22749 ' wat' 4 +22750 ' wav' 4 +22751 ' wax' 4 +22752 ' way' 4 +22753 ' web' 4 +22754 ' wed' 4 +22755 ' wee' 4 +22756 ' weg' 4 +22757 ' wel' 4 +22758 ' wen' 4 +22759 ' wer' 4 +22760 ' wet' 4 +22761 ' whe' 4 +22762 ' who' 4 +22763 ' why' 4 +22764 ' wid' 4 +22765 ' wie' 4 +22766 ' wig' 4 +22767 ' wij' 4 +22768 ' wik' 4 +22769 ' wil' 4 +22770 ' win' 4 +22771 ' wir' 4 +22772 ' wis' 4 +22773 ' wit' 4 +22774 ' wob' 4 +22775 ' wol' 4 +22776 ' wom' 4 +22777 ' won' 4 +22778 ' woo' 4 +22779 ' wor' 4 +22780 ' wow' 4 +22781 ' wra' 4 +22782 ' wre' 4 +22783 ' wsp' 4 +22784 ' wur' 4 +22785 ' www' 4 +22786 ' wä' 4 +22787 ' wł' 4 +22788 ' xen' 4 +22789 ' xml' 4 +22790 ' xxx' 4 +22791 ' xyz' 4 +22792 ' yap' 4 +22793 ' yaw' 4 +22794 ' yen' 4 +22795 ' yer' 4 +22796 ' yes' 4 +22797 ' yet' 4 +22798 ' yog' 4 +22799 ' you' 4 +22800 ' zab' 4 +22801 ' zak' 4 +22802 ' zal' 4 +22803 ' zam' 4 +22804 ' zap' 4 +22805 ' zar' 4 +22806 ' zaw' 4 +22807 ' zen' 4 +22808 ' zer' 4 +22809 ' zig' 4 +22810 ' zij' 4 +22811 ' zip' 4 +22812 ' zon' 4 +22813 ' zoo' 4 +22814 ' zug' 4 +22815 ' zum' 4 +22816 ' zur' 4 +22817 ' zwe' 4 +22818 ' zá' 4 +22819 ' zł' 4 +22820 ' {},' 4 +22821 ' {};' 4 +22822 ' {¶' 4 +22823 ' }).' 4 +22824 ' });' 4 +22825 ' »,' 4 +22826 ' ».' 4 +22827 ' Ál' 4 +22828 ' Éd' 4 +22829 ' És' 4 +22830 ' Ét' 4 +22831 ' În' 4 +22832 ' às' 4 +22833 ' ál' 4 +22834 ' ár' 4 +22835 ' át' 4 +22836 ' äl' 4 +22837 ' än' 4 +22838 ' är' 4 +22839 ' år' 4 +22840 ' ça' 4 +22841 ' éc' 4 +22842 ' éd' 4 +22843 ' ég' 4 +22844 ' él' 4 +22845 ' én' 4 +22846 ' ép' 4 +22847 ' ér' 4 +22848 ' és' 4 +22849 ' ét' 4 +22850 ' év' 4 +22851 ' éx' 4 +22852 ' în' 4 +22853 ' ór' 4 +22854 ' ön' 4 +22855 ' új' 4 +22856 ' ún' 4 +22857 ' će' 4 +22858 ' či' 4 +22859 ' đi' 4 +22860 ' św' 4 +22861 ' şi' 4 +22862 ' że' 4 +22863 ' ży' 4 +22864 ' že' 4 +22865 ' și' 4 +22866 ' μL' 4 +22867 ' μM' 4 +22868 ' μg' 4 +22869 ' μl' 4 +22870 ' μm' 4 +22871 ' μs' 4 +22872 ' अ' 4 +22873 ' आ' 4 +22874 ' क' 4 +22875 ' ज' 4 +22876 ' त' 4 +22877 ' द' 4 +22878 ' न' 4 +22879 ' प' 4 +22880 ' ब' 4 +22881 ' म' 4 +22882 ' र' 4 +22883 ' ल' 4 +22884 ' व' 4 +22885 ' स' 4 +22886 ' ह' 4 +22887 ' ক' 4 +22888 ' ਦ' 4 +22889 ' ਸ' 4 +22890 ' ப' 4 +22891 ' เ' 4 +22892 ' ở' 4 +22893 ' ἀ' 4 +22894 ' ἐ' 4 +22895 ' \u200b' 4 +22896 ' \u200e' 4 +22897 ' –' 4 +22898 ' —' 4 +22899 ' ―' 4 +22900 ' ‖' 4 +22901 ' ‘' 4 +22902 ' ’' 4 +22903 ' “' 4 +22904 ' ”' 4 +22905 ' „' 4 +22906 ' †' 4 +22907 ' •' 4 +22908 ' …' 4 +22909 ' ′' 4 +22910 ' ›' 4 +22911 ' €' 4 +22912 ' ₹' 4 +22913 ' №' 4 +22914 ' ←' 4 +22915 ' ↑' 4 +22916 ' →' 4 +22917 ' ↓' 4 +22918 ' ↔' 4 +22919 ' ⇒' 4 +22920 ' ⇔' 4 +22921 ' ∀' 4 +22922 ' ∂' 4 +22923 ' ∃' 4 +22924 ' ∅' 4 +22925 ' ∆' 4 +22926 ' ∇' 4 +22927 ' ∈' 4 +22928 ' ∑' 4 +22929 ' −' 4 +22930 ' ∗' 4 +22931 ' ∘' 4 +22932 ' √' 4 +22933 ' ∞' 4 +22934 ' ∧' 4 +22935 ' ∨' 4 +22936 ' ∩' 4 +22937 ' ∪' 4 +22938 ' ∫' 4 +22939 ' ∼' 4 +22940 ' ≃' 4 +22941 ' ≈' 4 +22942 ' ≠' 4 +22943 ' ≡' 4 +22944 ' ≤' 4 +22945 ' ≥' 4 +22946 ' ⊂' 4 +22947 ' ⊆' 4 +22948 ' ⊕' 4 +22949 ' ⊗' 4 +22950 ' ⊥' 4 +22951 ' ⋅' 4 +22952 ' ⋯' 4 +22953 ' │' 4 +22954 ' ├' 4 +22955 ' ╚' 4 +22956 ' █' 4 +22957 ' ░' 4 +22958 ' ■' 4 +22959 ' ►' 4 +22960 ' ●' 4 +22961 ' ★' 4 +22962 ' ♥' 4 +22963 ' ♦' 4 +22964 ' ♪' 4 +22965 ' ✓' 4 +22966 ' ✔' 4 +22967 ' ❤' 4 +22968 ' ⟨' 4 +22969 ' ⟩' 4 +22970 ' 。' 4 +22971 ' 〈' 4 +22972 ' 「' 4 +22973 ' 【' 4 +22974 ' 가' 4 +22975 ' 각' 4 +22976 ' 간' 4 +22977 ' 감' 4 +22978 ' 강' 4 +22979 ' 같' 4 +22980 ' 개' 4 +22981 ' 거' 4 +22982 ' 건' 4 +22983 ' 걸' 4 +22984 ' 검' 4 +22985 ' 것' 4 +22986 ' 게' 4 +22987 ' 결' 4 +22988 ' 경' 4 +22989 ' 계' 4 +22990 ' 고' 4 +22991 ' 공' 4 +22992 ' 과' 4 +22993 ' 관' 4 +22994 ' 광' 4 +22995 ' 교' 4 +22996 ' 구' 4 +22997 ' 국' 4 +22998 ' 군' 4 +22999 ' 권' 4 +23000 ' 규' 4 +23001 ' 그' 4 +23002 ' 근' 4 +23003 ' 금' 4 +23004 ' 기' 4 +23005 ' 김' 4 +23006 ' 나' 4 +23007 ' 날' 4 +23008 ' 남' 4 +23009 ' 내' 4 +23010 ' 네' 4 +23011 ' 노' 4 +23012 ' 높' 4 +23013 ' 누' 4 +23014 ' 눈' 4 +23015 ' 다' 4 +23016 ' 단' 4 +23017 ' 달' 4 +23018 ' 당' 4 +23019 ' 대' 4 +23020 ' 더' 4 +23021 ' 덤' 4 +23022 ' 데' 4 +23023 ' 도' 4 +23024 ' 독' 4 +23025 ' 돌' 4 +23026 ' 동' 4 +23027 ' 되' 4 +23028 ' 된' 4 +23029 ' 두' 4 +23030 ' 뒤' 4 +23031 ' 드' 4 +23032 ' 들' 4 +23033 ' 등' 4 +23034 ' 디' 4 +23035 ' 따' 4 +23036 ' 때' 4 +23037 ' 또' 4 +23038 ' 라' 4 +23039 ' 레' 4 +23040 ' 로' 4 +23041 ' 루' 4 +23042 ' 리' 4 +23043 ' 링' 4 +23044 ' 마' 4 +23045 ' 만' 4 +23046 ' 많' 4 +23047 ' 말' 4 +23048 ' 맞' 4 +23049 ' 매' 4 +23050 ' 메' 4 +23051 ' 명' 4 +23052 ' 모' 4 +23053 ' 목' 4 +23054 ' 못' 4 +23055 ' 무' 4 +23056 ' 문' 4 +23057 ' 물' 4 +23058 ' 미' 4 +23059 ' 민' 4 +23060 ' 및' 4 +23061 ' 바' 4 +23062 ' 박' 4 +23063 ' 반' 4 +23064 ' 받' 4 +23065 ' 발' 4 +23066 ' 밝' 4 +23067 ' 방' 4 +23068 ' 배' 4 +23069 ' 백' 4 +23070 ' 버' 4 +23071 ' 번' 4 +23072 ' 법' 4 +23073 ' 베' 4 +23074 ' 변' 4 +23075 ' 병' 4 +23076 ' 보' 4 +23077 ' 복' 4 +23078 ' 본' 4 +23079 ' 부' 4 +23080 ' 북' 4 +23081 ' 분' 4 +23082 ' 불' 4 +23083 ' 브' 4 +23084 ' 비' 4 +23085 ' 사' 4 +23086 ' 산' 4 +23087 ' 살' 4 +23088 ' 삼' 4 +23089 ' 상' 4 +23090 ' 새' 4 +23091 ' 생' 4 +23092 ' 서' 4 +23093 ' 선' 4 +23094 ' 설' 4 +23095 ' 성' 4 +23096 ' 세' 4 +23097 ' 소' 4 +23098 ' 속' 4 +23099 ' 손' 4 +23100 ' 수' 4 +23101 ' 순' 4 +23102 ' 스' 4 +23103 ' 승' 4 +23104 ' 시' 4 +23105 ' 신' 4 +23106 ' 실' 4 +23107 ' 심' 4 +23108 ' 아' 4 +23109 ' 안' 4 +23110 ' 않' 4 +23111 ' 알' 4 +23112 ' 앞' 4 +23113 ' 애' 4 +23114 ' 야' 4 +23115 ' 약' 4 +23116 ' 양' 4 +23117 ' 어' 4 +23118 ' 언' 4 +23119 ' 얼' 4 +23120 ' 업' 4 +23121 ' 없' 4 +23122 ' 에' 4 +23123 ' 여' 4 +23124 ' 역' 4 +23125 ' 연' 4 +23126 ' 열' 4 +23127 ' 영' 4 +23128 ' 예' 4 +23129 ' 오' 4 +23130 ' 온' 4 +23131 ' 올' 4 +23132 ' 완' 4 +23133 ' 왕' 4 +23134 ' 외' 4 +23135 ' 요' 4 +23136 ' 용' 4 +23137 ' 우' 4 +23138 ' 운' 4 +23139 ' 원' 4 +23140 ' 월' 4 +23141 ' 위' 4 +23142 ' 유' 4 +23143 ' 음' 4 +23144 ' 의' 4 +23145 ' 이' 4 +23146 ' 인' 4 +23147 ' 일' 4 +23148 ' 임' 4 +23149 ' 입' 4 +23150 ' 있' 4 +23151 ' 자' 4 +23152 ' 작' 4 +23153 ' 잘' 4 +23154 ' 장' 4 +23155 ' 재' 4 +23156 ' 저' 4 +23157 ' 적' 4 +23158 ' 전' 4 +23159 ' 점' 4 +23160 ' 정' 4 +23161 ' 제' 4 +23162 ' 조' 4 +23163 ' 존' 4 +23164 ' 종' 4 +23165 ' 좋' 4 +23166 ' 주' 4 +23167 ' 죽' 4 +23168 ' 준' 4 +23169 ' 중' 4 +23170 ' 증' 4 +23171 ' 지' 4 +23172 ' 직' 4 +23173 ' 진' 4 +23174 ' 집' 4 +23175 ' 차' 4 +23176 ' 참' 4 +23177 ' 창' 4 +23178 ' 찾' 4 +23179 ' 채' 4 +23180 ' 책' 4 +23181 ' 처' 4 +23182 ' 천' 4 +23183 ' 철' 4 +23184 ' 첫' 4 +23185 ' 청' 4 +23186 ' 체' 4 +23187 ' 초' 4 +23188 ' 총' 4 +23189 ' 최' 4 +23190 ' 추' 4 +23191 ' 축' 4 +23192 ' 출' 4 +23193 ' 충' 4 +23194 ' 취' 4 +23195 ' 치' 4 +23196 ' 친' 4 +23197 ' 카' 4 +23198 ' 코' 4 +23199 ' 크' 4 +23200 ' 클' 4 +23201 ' 타' 4 +23202 ' 태' 4 +23203 ' 테' 4 +23204 ' 토' 4 +23205 ' 통' 4 +23206 ' 투' 4 +23207 ' 트' 4 +23208 ' 특' 4 +23209 ' 팀' 4 +23210 ' 파' 4 +23211 ' 판' 4 +23212 ' 패' 4 +23213 ' 페' 4 +23214 ' 편' 4 +23215 ' 평' 4 +23216 ' 포' 4 +23217 ' 표' 4 +23218 ' 프' 4 +23219 ' 플' 4 +23220 ' 피' 4 +23221 ' 필' 4 +23222 ' 하' 4 +23223 ' 학' 4 +23224 ' 한' 4 +23225 ' 할' 4 +23226 ' 함' 4 +23227 ' 합' 4 +23228 ' 항' 4 +23229 ' 해' 4 +23230 ' 했' 4 +23231 ' 행' 4 +23232 ' 현' 4 +23233 ' 형' 4 +23234 ' 호' 4 +23235 ' 화' 4 +23236 ' 확' 4 +23237 ' 환' 4 +23238 ' 활' 4 +23239 ' 황' 4 +23240 ' 회' 4 +23241 ' 후' 4 +23242 ' 히' 4 +23243 ' \ufeff' 4 +23244 ' (' 4 +23245 ' ,' 4 +23246 ' :' 4 +23247 ' �' 4 +23248 '!!!!' 4 +23249 '!");' 4 +23250 '!’' 4 +23251 '!”' 4 +23252 '""""' 4 +23253 '")))' 4 +23254 '")),' 4 +23255 '"));' 4 +23256 '"...' 4 +23257 '"/><' 4 +23258 '":["' 4 +23259 '":{"' 4 +23260 '">' 4 +23264 '"]["' 4 +23265 '"—' 4 +23266 '####' 4 +23267 '$$$$' 4 +23268 '$’' 4 +23269 '%%%%' 4 +23270 "')))" 4 +23271 "'))," 4 +23272 "'))." 4 +23273 "'));" 4 +23274 "')->" 4 +23275 "']))" 4 +23276 "'])," 4 +23277 "'])." 4 +23278 "']):" 4 +23279 "']);" 4 +23280 "']==" 4 +23281 "']['" 4 +23282 "']]," 4 +23283 '("--' 4 +23284 '("./' 4 +23285 "(''," 4 +23286 "('--" 4 +23287 "('./" 4 +23288 '()))' 4 +23289 '()),' 4 +23290 '()).' 4 +23291 '()):' 4 +23292 '());' 4 +23293 '()->' 4 +23294 '()' 4 +23340 '="${' 4 +23341 '="@+' 4 +23342 "='')" 4 +23343 '=-=-' 4 +23344 '====' 4 +23345 '=”' 4 +23346 '>();' 4 +23347 '>>>>' 4 +23348 '?,?,' 4 +23349 '????' 4 +23350 '?’' 4 +23351 '?”' 4 +23352 '@@@@' 4 +23353 'AAAA' 4 +23354 'ABEL' 4 +23355 'ABLE' 4 +23356 'ACES' 4 +23357 'ACHE' 4 +23358 'ADDR' 4 +23359 'ADER' 4 +23360 'AGES' 4 +23361 'AIDS' 4 +23362 'ALLY' 4 +23363 'ALOG' 4 +23364 'ALSE' 4 +23365 'ALTH' 4 +23366 'AMES' 4 +23367 'ANCE' 4 +23368 'ANGE' 4 +23369 'ANGO' 4 +23370 'ANTS' 4 +23371 'ARCH' 4 +23372 'ARGS' 4 +23373 'ATAL' 4 +23374 'ATCH' 4 +23375 'ATED' 4 +23376 'ATEG' 4 +23377 'ATER' 4 +23378 'ATES' 4 +23379 'ATIC' 4 +23380 'ATOM' 4 +23381 'ATOR' 4 +23382 'ATTR' 4 +23383 'AUTH' 4 +23384 'AUTO' 4 +23385 'Adam' 4 +23386 'Addr' 4 +23387 'Alan' 4 +23388 'Alex' 4 +23389 'Also' 4 +23390 'Anal' 4 +23391 'Andy' 4 +23392 'Anim' 4 +23393 'Anna' 4 +23394 'Anne' 4 +23395 'Anth' 4 +23396 'Anti' 4 +23397 'Appe' 4 +23398 'Apps' 4 +23399 'Arab' 4 +23400 'Arch' 4 +23401 'Area' 4 +23402 'Args' 4 +23403 'Asia' 4 +23404 'Atom' 4 +23405 'Attr' 4 +23406 'Auth' 4 +23407 'Auto' 4 +23408 'Axes' 4 +23409 'Axis' 4 +23410 'BACK' 4 +23411 'BASE' 4 +23412 'BERT' 4 +23413 'BITS' 4 +23414 'BLUE' 4 +23415 'BOOK' 4 +23416 'BOOL' 4 +23417 'BUFF' 4 +23418 'BYTE' 4 +23419 'Baby' 4 +23420 'Back' 4 +23421 'Ball' 4 +23422 'Band' 4 +23423 'Bang' 4 +23424 'Bank' 4 +23425 'Base' 4 +23426 'Beam' 4 +23427 'Bean' 4 +23428 'Beat' 4 +23429 'Bell' 4 +23430 'Bern' 4 +23431 'Bert' 4 +23432 'Best' 4 +23433 'Beta' 4 +23434 'Bias' 4 +23435 'Bill' 4 +23436 'Bind' 4 +23437 'Bits' 4 +23438 'Blob' 4 +23439 'Blog' 4 +23440 'Blue' 4 +23441 'Blur' 4 +23442 'Body' 4 +23443 'Bold' 4 +23444 'Book' 4 +23445 'Bool' 4 +23446 'Boot' 4 +23447 'Born' 4 +23448 'Boss' 4 +23449 'Both' 4 +23450 'Brad' 4 +23451 'Brit' 4 +23452 'Bron' 4 +23453 'Buff' 4 +23454 'Burn' 4 +23455 'ById' 4 +23456 'Byte' 4 +23457 'CADE' 4 +23458 'CALL' 4 +23459 'CASE' 4 +23460 'CAST' 4 +23461 'CCCC' 4 +23462 'CENT' 4 +23463 'CEPT' 4 +23464 'CHAR' 4 +23465 'CLUD' 4 +23466 'CLUS' 4 +23467 'CODE' 4 +23468 'COMM' 4 +23469 'COMP' 4 +23470 'COND' 4 +23471 'CONF' 4 +23472 'CONN' 4 +23473 'CONT' 4 +23474 'COPY' 4 +23475 'CORE' 4 +23476 'COUN' 4 +23477 'CTOR' 4 +23478 'CTRL' 4 +23479 'CUDA' 4 +23480 'Calc' 4 +23481 'Call' 4 +23482 'Camb' 4 +23483 'Camp' 4 +23484 'Cand' 4 +23485 'Capt' 4 +23486 'Card' 4 +23487 'Care' 4 +23488 'Carl' 4 +23489 'Cart' 4 +23490 'Case' 4 +23491 'Cash' 4 +23492 'Cast' 4 +23493 'Cath' 4 +23494 'Cell' 4 +23495 'Cent' 4 +23496 'Cert' 4 +23497 'Chan' 4 +23498 'Chap' 4 +23499 'Char' 4 +23500 'Chat' 4 +23501 'Chem' 4 +23502 'Chen' 4 +23503 'Chip' 4 +23504 'Circ' 4 +23505 'City' 4 +23506 'Clar' 4 +23507 'Clip' 4 +23508 'Club' 4 +23509 'Code' 4 +23510 'Coin' 4 +23511 'Cold' 4 +23512 'Cole' 4 +23513 'Coll' 4 +23514 'Cols' 4 +23515 'Comb' 4 +23516 'Come' 4 +23517 'Comm' 4 +23518 'Comp' 4 +23519 'Cond' 4 +23520 'Conf' 4 +23521 'Cong' 4 +23522 'Conn' 4 +23523 'Cons' 4 +23524 'Cont' 4 +23525 'Conv' 4 +23526 'Cook' 4 +23527 'Cool' 4 +23528 'Copy' 4 +23529 'Core' 4 +23530 'Corn' 4 +23531 'Corp' 4 +23532 'Cost' 4 +23533 'Cour' 4 +23534 'Cred' 4 +23535 'Crit' 4 +23536 'Crop' 4 +23537 'Ctrl' 4 +23538 'Cube' 4 +23539 'Curr' 4 +23540 'DATA' 4 +23541 'DATE' 4 +23542 'DECL' 4 +23543 'DESC' 4 +23544 'DIFF' 4 +23545 'DIST' 4 +23546 'DONE' 4 +23547 'DOWN' 4 +23548 'DRAW' 4 +23549 'DROP' 4 +23550 'Damn' 4 +23551 'Dark' 4 +23552 'Dash' 4 +23553 'Data' 4 +23554 'Date' 4 +23555 'Dave' 4 +23556 'Days' 4 +23557 'Dead' 4 +23558 'Dear' 4 +23559 'Decl' 4 +23560 'Deep' 4 +23561 'Dele' 4 +23562 'Demo' 4 +23563 'Desc' 4 +23564 'Dest' 4 +23565 'Diam' 4 +23566 'Dick' 4 +23567 'Dict' 4 +23568 'Diff' 4 +23569 'Dire' 4 +23570 'Disc' 4 +23571 'Disk' 4 +23572 'Disp' 4 +23573 'Dist' 4 +23574 'Dock' 4 +23575 'Docs' 4 +23576 'Does' 4 +23577 'Done' 4 +23578 'Door' 4 +23579 'Doug' 4 +23580 'Down' 4 +23581 'Drag' 4 +23582 'Draw' 4 +23583 'Drop' 4 +23584 'Drug' 4 +23585 'Dump' 4 +23586 'EDIT' 4 +23587 'EEEE' 4 +23588 'EGIN' 4 +23589 'EMPL' 4 +23590 'ENCE' 4 +23591 'ENCY' 4 +23592 'ENER' 4 +23593 'ENSE' 4 +23594 'ENTS' 4 +23595 'ERIC' 4 +23596 'ESCO' 4 +23597 'EXEC' 4 +23598 'EXIT' 4 +23599 'Each' 4 +23600 'East' 4 +23601 'Easy' 4 +23602 'Echo' 4 +23603 'Edge' 4 +23604 'Edit' 4 +23605 'Educ' 4 +23606 'Elem' 4 +23607 'Else' 4 +23608 'Emer' 4 +23609 'Emit' 4 +23610 'Enum' 4 +23611 'Eric' 4 +23612 'Euro' 4 +23613 'Eval' 4 +23614 'Even' 4 +23615 'Ever' 4 +23616 'Exec' 4 +23617 'Exit' 4 +23618 'Expl' 4 +23619 'Expr' 4 +23620 'FACE' 4 +23621 'FAIL' 4 +23622 'FAST' 4 +23623 'FFER' 4 +23624 'FFFF' 4 +23625 'FILE' 4 +23626 'FLAG' 4 +23627 'FLOW' 4 +23628 'FONT' 4 +23629 'FORE' 4 +23630 'FORM' 4 +23631 'FREE' 4 +23632 'FROM' 4 +23633 'FULL' 4 +23634 'FUNC' 4 +23635 'Face' 4 +23636 'Fact' 4 +23637 'Fail' 4 +23638 'Fair' 4 +23639 'Fake' 4 +23640 'Fall' 4 +23641 'Farm' 4 +23642 'Fast' 4 +23643 'Feed' 4 +23644 'Feel' 4 +23645 'File' 4 +23646 'Fill' 4 +23647 'Film' 4 +23648 'Find' 4 +23649 'Fine' 4 +23650 'Fire' 4 +23651 'Fish' 4 +23652 'Five' 4 +23653 'Flag' 4 +23654 'Flat' 4 +23655 'Flex' 4 +23656 'Flip' 4 +23657 'Flor' 4 +23658 'Flow' 4 +23659 'Fold' 4 +23660 'Font' 4 +23661 'Food' 4 +23662 'Foot' 4 +23663 'Ford' 4 +23664 'Fore' 4 +23665 'Form' 4 +23666 'Fort' 4 +23667 'Four' 4 +23668 'Frag' 4 +23669 'Fran' 4 +23670 'Fred' 4 +23671 'Free' 4 +23672 'From' 4 +23673 'Fuck' 4 +23674 'Full' 4 +23675 'Func' 4 +23676 'Fund' 4 +23677 'Für' 4 +23678 'GPIO' 4 +23679 'GRAM' 4 +23680 'GUID' 4 +23681 'Gain' 4 +23682 'Game' 4 +23683 'Gary' 4 +23684 'Gate' 4 +23685 'Gene' 4 +23686 'Geom' 4 +23687 'Germ' 4 +23688 'Gest' 4 +23689 'Girl' 4 +23690 'Give' 4 +23691 'Glob' 4 +23692 'Goal' 4 +23693 'Gold' 4 +23694 'Good' 4 +23695 'Grab' 4 +23696 'Grad' 4 +23697 'Gram' 4 +23698 'Gran' 4 +23699 'Gray' 4 +23700 'Greg' 4 +23701 'Grid' 4 +23702 'Grow' 4 +23703 'Guid' 4 +23704 'HAND' 4 +23705 'HASH' 4 +23706 'HEAD' 4 +23707 'HERE' 4 +23708 'HIGH' 4 +23709 'HOME' 4 +23710 'HOST' 4 +23711 'HOUT' 4 +23712 'HTML' 4 +23713 'HTTP' 4 +23714 'Half' 4 +23715 'Hall' 4 +23716 'Hand' 4 +23717 'Hang' 4 +23718 'Hard' 4 +23719 'Hart' 4 +23720 'Hash' 4 +23721 'Have' 4 +23722 'Head' 4 +23723 'Heap' 4 +23724 'Heat' 4 +23725 'Hell' 4 +23726 'Help' 4 +23727 'Here' 4 +23728 'Hero' 4 +23729 'Hide' 4 +23730 'High' 4 +23731 'Hill' 4 +23732 'Hint' 4 +23733 'Hist' 4 +23734 'Hold' 4 +23735 'Holy' 4 +23736 'Home' 4 +23737 'Hong' 4 +23738 'Hook' 4 +23739 'Hope' 4 +23740 'Host' 4 +23741 'Hour' 4 +23742 'Html' 4 +23743 'Http' 4 +23744 'Hung' 4 +23745 'IBLE' 4 +23746 'IBUT' 4 +23747 'ICAL' 4 +23748 'ICAg' 4 +23749 'ICES' 4 +23750 'ICLE' 4 +23751 'ICON' 4 +23752 'IDER' 4 +23753 'IDTH' 4 +23754 'IEEE' 4 +23755 'IENT' 4 +23756 'IFIC' 4 +23757 'IGHT' 4 +23758 'ILED' 4 +23759 'ILLE' 4 +23760 'IMAL' 4 +23761 'IMIT' 4 +23762 'INCT' 4 +23763 'INES' 4 +23764 'INFO' 4 +23765 'INGS' 4 +23766 'INIT' 4 +23767 'INST' 4 +23768 'IONS' 4 +23769 'IOUS' 4 +23770 'IRED' 4 +23771 'IRST' 4 +23772 'ISBN' 4 +23773 'ISON' 4 +23774 'ISTR' 4 +23775 'ISTS' 4 +23776 'ITAL' 4 +23777 'ITCH' 4 +23778 'ITED' 4 +23779 'ITEM' 4 +23780 'ITER' 4 +23781 'ITES' 4 +23782 'ITLE' 4 +23783 'ITOR' 4 +23784 'IVER' 4 +23785 'IZED' 4 +23786 'IZER' 4 +23787 'Icon' 4 +23788 'Idle' 4 +23789 'Impl' 4 +23790 'Infl' 4 +23791 'Info' 4 +23792 'Init' 4 +23793 'Insp' 4 +23794 'Inst' 4 +23795 'Into' 4 +23796 'Iran' 4 +23797 'Iron' 4 +23798 'Ital' 4 +23799 'Item' 4 +23800 'Iter' 4 +23801 'IÓN' 4 +23802 'JECT' 4 +23803 'JOIN' 4 +23804 'JSON' 4 +23805 'JUST' 4 +23806 'Jack' 4 +23807 'Jane' 4 +23808 'Java' 4 +23809 'Jean' 4 +23810 'Jeff' 4 +23811 'Jess' 4 +23812 'Jobs' 4 +23813 'John' 4 +23814 'Join' 4 +23815 'Jose' 4 +23816 'Josh' 4 +23817 'Json' 4 +23818 'July' 4 +23819 'Jump' 4 +23820 'June' 4 +23821 'Just' 4 +23822 'KEEP' 4 +23823 'Kate' 4 +23824 'Keep' 4 +23825 'Kenn' 4 +23826 'Keys' 4 +23827 'Kill' 4 +23828 'Kind' 4 +23829 'King' 4 +23830 'Know' 4 +23831 'LAND' 4 +23832 'LANG' 4 +23833 'LAST' 4 +23834 'LDAP' 4 +23835 'LEAN' 4 +23836 'LEAR' 4 +23837 'LECT' 4 +23838 'LEFT' 4 +23839 'LETE' 4 +23840 'LINE' 4 +23841 'LINK' 4 +23842 'LIST' 4 +23843 'LOAD' 4 +23844 'LOAT' 4 +23845 'LOCK' 4 +23846 'LONG' 4 +23847 'LOOP' 4 +23848 'LSTM' 4 +23849 'Lady' 4 +23850 'Lake' 4 +23851 'Land' 4 +23852 'Lang' 4 +23853 'Last' 4 +23854 'Late' 4 +23855 'Lazy' 4 +23856 'Lead' 4 +23857 'Leaf' 4 +23858 'Lean' 4 +23859 'Lear' 4 +23860 'Left' 4 +23861 'Leon' 4 +23862 'Less' 4 +23863 'Life' 4 +23864 'Like' 4 +23865 'Line' 4 +23866 'Link' 4 +23867 'List' 4 +23868 'Lite' 4 +23869 'Live' 4 +23870 'Load' 4 +23871 'Lock' 4 +23872 'Logo' 4 +23873 'Long' 4 +23874 'Look' 4 +23875 'Loop' 4 +23876 'Lord' 4 +23877 'Loss' 4 +23878 'Lost' 4 +23879 'Love' 4 +23880 'Luke' 4 +23881 'MAIL' 4 +23882 'MAIN' 4 +23883 'MAKE' 4 +23884 'MARK' 4 +23885 'MASK' 4 +23886 'MBOL' 4 +23887 'MENT' 4 +23888 'MENU' 4 +23889 'MESS' 4 +23890 'META' 4 +23891 'MISS' 4 +23892 'MMMM' 4 +23893 'MODE' 4 +23894 'MORE' 4 +23895 'MULT' 4 +23896 'Mach' 4 +23897 'Made' 4 +23898 'Magn' 4 +23899 'Mail' 4 +23900 'Main' 4 +23901 'Make' 4 +23902 'Male' 4 +23903 'Many' 4 +23904 'Maps' 4 +23905 'Marc' 4 +23906 'Marg' 4 +23907 'Mark' 4 +23908 'Mart' 4 +23909 'Mary' 4 +23910 'Mask' 4 +23911 'Mass' 4 +23912 'Math' 4 +23913 'Matt' 4 +23914 'Mean' 4 +23915 'Meet' 4 +23916 'Memo' 4 +23917 'Menu' 4 +23918 'Merc' 4 +23919 'Mesh' 4 +23920 'Mess' 4 +23921 'Meta' 4 +23922 'Mich' 4 +23923 'Mike' 4 +23924 'Mill' 4 +23925 'Mind' 4 +23926 'Mini' 4 +23927 'Misc' 4 +23928 'Miss' 4 +23929 'Mock' 4 +23930 'Mode' 4 +23931 'Mont' 4 +23932 'Moon' 4 +23933 'More' 4 +23934 'Most' 4 +23935 'Move' 4 +23936 'Much' 4 +23937 'Mult' 4 +23938 'Must' 4 +23939 'NAME' 4 +23940 'NASA' 4 +23941 'NECT' 4 +23942 'NESS' 4 +23943 'NEWS' 4 +23944 'NEXT' 4 +23945 'NING' 4 +23946 'NODE' 4 +23947 'NONE' 4 +23948 'NOTE' 4 +23949 'NULL' 4 +23950 'Name' 4 +23951 'Near' 4 +23952 'Need' 4 +23953 'Neil' 4 +23954 'News' 4 +23955 'Next' 4 +23956 'Nice' 4 +23957 'Nick' 4 +23958 'Node' 4 +23959 'Nome' 4 +23960 'None' 4 +23961 'Norm' 4 +23962 'Note' 4 +23963 'Nova' 4 +23964 'Null' 4 +23965 'Não' 4 +23966 'ONES' 4 +23967 'ONLY' 4 +23968 'OPEN' 4 +23969 'OPER' 4 +23970 'ORIZ' 4 +23971 'OTAL' 4 +23972 'OUND' 4 +23973 'OVER' 4 +23974 'OWER' 4 +23975 'Ohio' 4 +23976 'Okay' 4 +23977 'Once' 4 +23978 'Only' 4 +23979 'Oops' 4 +23980 'Open' 4 +23981 'Oper' 4 +23982 'Opts' 4 +23983 'Orig' 4 +23984 'Over' 4 +23985 'PACK' 4 +23986 'PAGE' 4 +23987 'PART' 4 +23988 'PASS' 4 +23989 'PATH' 4 +23990 'PECT' 4 +23991 'PING' 4 +23992 'PLAY' 4 +23993 'PORT' 4 +23994 'POSE' 4 +23995 'POST' 4 +23996 'PRES' 4 +23997 'PROC' 4 +23998 'PROP' 4 +23999 'PUBL' 4 +24000 'Pack' 4 +24001 'Page' 4 +24002 'Pain' 4 +24003 'Pair' 4 +24004 'Pane' 4 +24005 'Para' 4 +24006 'Park' 4 +24007 'Part' 4 +24008 'Pass' 4 +24009 'Past' 4 +24010 'Path' 4 +24011 'Paul' 4 +24012 'Pear' 4 +24013 'Peer' 4 +24014 'Perm' 4 +24015 'Pers' 4 +24016 'Phil' 4 +24017 'Phot' 4 +24018 'Phys' 4 +24019 'Pick' 4 +24020 'Pier' 4 +24021 'Ping' 4 +24022 'Pipe' 4 +24023 'Plan' 4 +24024 'Play' 4 +24025 'Plot' 4 +24026 'Plug' 4 +24027 'Plus' 4 +24028 'Poll' 4 +24029 'Poly' 4 +24030 'Pont' 4 +24031 'Pool' 4 +24032 'Poor' 4 +24033 'Port' 4 +24034 'Pose' 4 +24035 'Poss' 4 +24036 'Post' 4 +24037 'Pour' 4 +24038 'Prec' 4 +24039 'Pred' 4 +24040 'Pref' 4 +24041 'Prem' 4 +24042 'Prep' 4 +24043 'Pres' 4 +24044 'Prev' 4 +24045 'Prim' 4 +24046 'Priv' 4 +24047 'Prob' 4 +24048 'Proc' 4 +24049 'Prod' 4 +24050 'Prof' 4 +24051 'Prog' 4 +24052 'Proj' 4 +24053 'Prom' 4 +24054 'Prop' 4 +24055 'Pros' 4 +24056 'Prot' 4 +24057 'Prov' 4 +24058 'Pull' 4 +24059 'Pure' 4 +24060 'Push' 4 +24061 'QUAL' 4 +24062 'Quad' 4 +24063 'Qual' 4 +24064 'Quit' 4 +24065 'Qué' 4 +24066 'RATE' 4 +24067 'READ' 4 +24068 'REAL' 4 +24069 'REAM' 4 +24070 'RECT' 4 +24071 'RENT' 4 +24072 'REPL' 4 +24073 'REQU' 4 +24074 'RESH' 4 +24075 'RESS' 4 +24076 'REST' 4 +24077 'RGBA' 4 +24078 'RIPT' 4 +24079 'RNAs' 4 +24080 'ROLE' 4 +24081 'ROLL' 4 +24082 'ROOT' 4 +24083 'ROUP' 4 +24084 'ROUT' 4 +24085 'Race' 4 +24086 'Radi' 4 +24087 'Rail' 4 +24088 'Rain' 4 +24089 'Rand' 4 +24090 'Rank' 4 +24091 'Rate' 4 +24092 'ReLU' 4 +24093 'Read' 4 +24094 'Real' 4 +24095 'Rece' 4 +24096 'Rect' 4 +24097 'Repo' 4 +24098 'Resp' 4 +24099 'Rest' 4 +24100 'Rich' 4 +24101 'Rick' 4 +24102 'Ring' 4 +24103 'Risk' 4 +24104 'Road' 4 +24105 'Rock' 4 +24106 'Role' 4 +24107 'Roll' 4 +24108 'Room' 4 +24109 'Root' 4 +24110 'Rose' 4 +24111 'Ross' 4 +24112 'Rout' 4 +24113 'Rows' 4 +24114 'Ruby' 4 +24115 'Rule' 4 +24116 'Russ' 4 +24117 'Ryan' 4 +24118 'SAME' 4 +24119 'SCAN' 4 +24120 'SELF' 4 +24121 'SENT' 4 +24122 'SEQU' 4 +24123 'SHOT' 4 +24124 'SIGN' 4 +24125 'SION' 4 +24126 'SIZE' 4 +24127 'SKIP' 4 +24128 'SMTP' 4 +24129 'SPEC' 4 +24130 'STAR' 4 +24131 'STAT' 4 +24132 'STEM' 4 +24133 'STEP' 4 +24134 'STER' 4 +24135 'STIT' 4 +24136 'STOP' 4 +24137 'STRU' 4 +24138 'Safe' 4 +24139 'Sale' 4 +24140 'Salt' 4 +24141 'Same' 4 +24142 'Sand' 4 +24143 'Sans' 4 +24144 'Save' 4 +24145 'Scal' 4 +24146 'Scan' 4 +24147 'Sche' 4 +24148 'Seed' 4 +24149 'Seek' 4 +24150 'Self' 4 +24151 'Sell' 4 +24152 'Send' 4 +24153 'Sent' 4 +24154 'Sept' 4 +24155 'Sequ' 4 +24156 'Serv' 4 +24157 'Sets' 4 +24158 'Shar' 4 +24159 'Sher' 4 +24160 'Ship' 4 +24161 'Shop' 4 +24162 'Shot' 4 +24163 'Show' 4 +24164 'Side' 4 +24165 'Sign' 4 +24166 'Sing' 4 +24167 'Sink' 4 +24168 'Site' 4 +24169 'Size' 4 +24170 'Skin' 4 +24171 'Skip' 4 +24172 'Slot' 4 +24173 'Slow' 4 +24174 'Snap' 4 +24175 'Snow' 4 +24176 'Soft' 4 +24177 'Sold' 4 +24178 'Some' 4 +24179 'Song' 4 +24180 'Sony' 4 +24181 'Soon' 4 +24182 'Sort' 4 +24183 'Soup' 4 +24184 'Span' 4 +24185 'Spec' 4 +24186 'Spin' 4 +24187 'Spot' 4 +24188 'Stan' 4 +24189 'Star' 4 +24190 'Stat' 4 +24191 'Stay' 4 +24192 'Step' 4 +24193 'Stmt' 4 +24194 'Stop' 4 +24195 'Stra' 4 +24196 'Stre' 4 +24197 'Stub' 4 +24198 'Stud' 4 +24199 'Such' 4 +24200 'Suit' 4 +24201 'Supp' 4 +24202 'Sure' 4 +24203 'Swap' 4 +24204 'Sync' 4 +24205 'TAIN' 4 +24206 'TASK' 4 +24207 'TEMP' 4 +24208 'TERN' 4 +24209 'TEST' 4 +24210 'TEXT' 4 +24211 'THER' 4 +24212 'THIS' 4 +24213 'THON' 4 +24214 'TIME' 4 +24215 'TING' 4 +24216 'TION' 4 +24217 'TODO' 4 +24218 'TOOL' 4 +24219 'TRAN' 4 +24220 'TRUE' 4 +24221 'TYPE' 4 +24222 'Tabs' 4 +24223 'Tags' 4 +24224 'Tail' 4 +24225 'Take' 4 +24226 'Talk' 4 +24227 'Tang' 4 +24228 'Task' 4 +24229 'Team' 4 +24230 'Tech' 4 +24231 'Tele' 4 +24232 'Tell' 4 +24233 'Temp' 4 +24234 'Term' 4 +24235 'Test' 4 +24236 'Text' 4 +24237 'Than' 4 +24238 'That' 4 +24239 'Then' 4 +24240 'Ther' 4 +24241 'They' 4 +24242 'This' 4 +24243 'Thus' 4 +24244 'Tick' 4 +24245 'Tile' 4 +24246 'Time' 4 +24247 'Tipo' 4 +24248 'Tips' 4 +24249 'Todo' 4 +24250 'Tony' 4 +24251 'Tool' 4 +24252 'Tour' 4 +24253 'Town' 4 +24254 'Trad' 4 +24255 'Tree' 4 +24256 'Trim' 4 +24257 'Trip' 4 +24258 'True' 4 +24259 'Tube' 4 +24260 'Turn' 4 +24261 'Type' 4 +24262 'Tên' 4 +24263 'UBLE' 4 +24264 'UILD' 4 +24265 'UINT' 4 +24266 'UInt' 4 +24267 'ULAR' 4 +24268 'UNIT' 4 +24269 'URAL' 4 +24270 'URES' 4 +24271 'USED' 4 +24272 'USER' 4 +24273 'UUID' 4 +24274 'Uint' 4 +24275 'Undo' 4 +24276 'Unit' 4 +24277 'Unix' 4 +24278 'Upon' 4 +24279 'Urls' 4 +24280 'Used' 4 +24281 'User' 4 +24282 'Util' 4 +24283 'VARI' 4 +24284 'VENT' 4 +24285 'VERS' 4 +24286 'VERT' 4 +24287 'VICE' 4 +24288 'VIEW' 4 +24289 'Vari' 4 +24290 'Vars' 4 +24291 'Verb' 4 +24292 'Vers' 4 +24293 'Vert' 4 +24294 'Very' 4 +24295 'Vict' 4 +24296 'Viet' 4 +24297 'View' 4 +24298 'Vill' 4 +24299 'Viol' 4 +24300 'Void' 4 +24301 'Vote' 4 +24302 'Vous' 4 +24303 'WAIT' 4 +24304 'WARD' 4 +24305 'WARE' 4 +24306 'WARN' 4 +24307 'WAYS' 4 +24308 'WEEN' 4 +24309 'WHAT' 4 +24310 'WISE' 4 +24311 'WITH' 4 +24312 'WORD' 4 +24313 'WORK' 4 +24314 'Wait' 4 +24315 'Walk' 4 +24316 'Wall' 4 +24317 'Wang' 4 +24318 'Want' 4 +24319 'Warn' 4 +24320 'Wave' 4 +24321 'Weak' 4 +24322 'Week' 4 +24323 'Well' 4 +24324 'Were' 4 +24325 'West' 4 +24326 'What' 4 +24327 'When' 4 +24328 'Whit' 4 +24329 'Wide' 4 +24330 'Wiki' 4 +24331 'Wild' 4 +24332 'Will' 4 +24333 'Wind' 4 +24334 'Wire' 4 +24335 'With' 4 +24336 'Wolf' 4 +24337 'Wood' 4 +24338 'Word' 4 +24339 'Work' 4 +24340 'Wrap' 4 +24341 'Writ' 4 +24342 'XXXX' 4 +24343 'YEAR' 4 +24344 'YYYY' 4 +24345 'Yang' 4 +24346 'Yeah' 4 +24347 'Year' 4 +24348 'York' 4 +24349 'Your' 4 +24350 'ZERO' 4 +24351 'ZONE' 4 +24352 'Zero' 4 +24353 'Zone' 4 +24354 'Zoom' 4 +24355 '\\\\\\\\' 4 +24356 '])))' 4 +24357 '])),' 4 +24358 ']));' 4 +24359 '^^^^' 4 +24360 '^−' 4 +24361 '__("' 4 +24362 '__()' 4 +24363 '____' 4 +24364 'aaaa' 4 +24365 'abad' 4 +24366 'abal' 4 +24367 'aban' 4 +24368 'abar' 4 +24369 'abbr' 4 +24370 'abcd' 4 +24371 'abei' 4 +24372 'abel' 4 +24373 'aben' 4 +24374 'aber' 4 +24375 'abet' 4 +24376 'abil' 4 +24377 'abin' 4 +24378 'abis' 4 +24379 'abit' 4 +24380 'abla' 4 +24381 'able' 4 +24382 'ablo' 4 +24383 'ably' 4 +24384 'abol' 4 +24385 'abor' 4 +24386 'abul' 4 +24387 'abus' 4 +24388 'abwe' 4 +24389 'acao' 4 +24390 'acas' 4 +24391 'acci' 4 +24392 'acco' 4 +24393 'acea' 4 +24394 'aced' 4 +24395 'acer' 4 +24396 'aces' 4 +24397 'acet' 4 +24398 'acey' 4 +24399 'acha' 4 +24400 'ache' 4 +24401 'achi' 4 +24402 'acho' 4 +24403 'acht' 4 +24404 'achu' 4 +24405 'achy' 4 +24406 'acia' 4 +24407 'acic' 4 +24408 'acid' 4 +24409 'acin' 4 +24410 'acio' 4 +24411 'acks' 4 +24412 'acle' 4 +24413 'acon' 4 +24414 'acos' 4 +24415 'acre' 4 +24416 'acro' 4 +24417 'acts' 4 +24418 'acus' 4 +24419 'ací' 4 +24420 'adal' 4 +24421 'adam' 4 +24422 'adan' 4 +24423 'adas' 4 +24424 'aday' 4 +24425 'addr' 4 +24426 'addy' 4 +24427 'aded' 4 +24428 'adel' 4 +24429 'adem' 4 +24430 'aden' 4 +24431 'ader' 4 +24432 'ades' 4 +24433 'adia' 4 +24434 'adic' 4 +24435 'adin' 4 +24436 'adir' 4 +24437 'adoc' 4 +24438 'ador' 4 +24439 'ados' 4 +24440 'adow' 4 +24441 'adó' 4 +24442 'aeda' 4 +24443 'afen' 4 +24444 'affe' 4 +24445 'afia' 4 +24446 'afka' 4 +24447 'afé' 4 +24448 'agan' 4 +24449 'agar' 4 +24450 'agas' 4 +24451 'aged' 4 +24452 'agem' 4 +24453 'agen' 4 +24454 'ager' 4 +24455 'ages' 4 +24456 'agic' 4 +24457 'agin' 4 +24458 'agit' 4 +24459 'agle' 4 +24460 'agli' 4 +24461 'agma' 4 +24462 'agna' 4 +24463 'agne' 4 +24464 'agog' 4 +24465 'agon' 4 +24466 'agos' 4 +24467 'agra' 4 +24468 'agua' 4 +24469 'ague' 4 +24470 'agus' 4 +24471 'ahan' 4 +24472 'ahoo' 4 +24473 'aign' 4 +24474 'ails' 4 +24475 'aily' 4 +24476 'aina' 4 +24477 'aine' 4 +24478 'ains' 4 +24479 'aint' 4 +24480 'aird' 4 +24481 'aire' 4 +24482 'airo' 4 +24483 'airs' 4 +24484 'airy' 4 +24485 'aise' 4 +24486 'aisy' 4 +24487 'ajan' 4 +24488 'ajas' 4 +24489 'ajax' 4 +24490 'ajes' 4 +24491 'ajor' 4 +24492 'ają' 4 +24493 'akan' 4 +24494 'aked' 4 +24495 'aken' 4 +24496 'aker' 4 +24497 'akes' 4 +24498 'akia' 4 +24499 'akin' 4 +24500 'akis' 4 +24501 'akov' 4 +24502 'alam' 4 +24503 'alan' 4 +24504 'alar' 4 +24505 'aldi' 4 +24506 'aldo' 4 +24507 'aleb' 4 +24508 'aled' 4 +24509 'alem' 4 +24510 'alen' 4 +24511 'aler' 4 +24512 'ales' 4 +24513 'alex' 4 +24514 'aley' 4 +24515 'alez' 4 +24516 'algo' 4 +24517 'alia' 4 +24518 'alin' 4 +24519 'alis' 4 +24520 'alla' 4 +24521 'alle' 4 +24522 'alli' 4 +24523 'allo' 4 +24524 'alls' 4 +24525 'ally' 4 +24526 'alog' 4 +24527 'alom' 4 +24528 'alon' 4 +24529 'alph' 4 +24530 'alsa' 4 +24531 'alse' 4 +24532 'also' 4 +24533 'alta' 4 +24534 'alth' 4 +24535 'alty' 4 +24536 'alus' 4 +24537 'amac' 4 +24538 'aman' 4 +24539 'amar' 4 +24540 'amas' 4 +24541 'amat' 4 +24542 'amaz' 4 +24543 'amba' 4 +24544 'ambi' 4 +24545 'ambo' 4 +24546 'amed' 4 +24547 'amel' 4 +24548 'amen' 4 +24549 'amer' 4 +24550 'ames' 4 +24551 'amic' 4 +24552 'amil' 4 +24553 'amin' 4 +24554 'amis' 4 +24555 'amma' 4 +24556 'amon' 4 +24557 'amos' 4 +24558 'ampa' 4 +24559 'ampl' 4 +24560 'amps' 4 +24561 'amus' 4 +24562 'anal' 4 +24563 'anan' 4 +24564 'anas' 4 +24565 'anca' 4 +24566 'ance' 4 +24567 'anch' 4 +24568 'anco' 4 +24569 'ancy' 4 +24570 'anda' 4 +24571 'ande' 4 +24572 'andi' 4 +24573 'ando' 4 +24574 'andr' 4 +24575 'ands' 4 +24576 'andy' 4 +24577 'aned' 4 +24578 'anel' 4 +24579 'anes' 4 +24580 'aney' 4 +24581 'anga' 4 +24582 'ange' 4 +24583 'angi' 4 +24584 'ango' 4 +24585 'angs' 4 +24586 'angu' 4 +24587 'ania' 4 +24588 'anic' 4 +24589 'anie' 4 +24590 'anim' 4 +24591 'anja' 4 +24592 'anje' 4 +24593 'anka' 4 +24594 'anke' 4 +24595 'anks' 4 +24596 'anna' 4 +24597 'anne' 4 +24598 'anni' 4 +24599 'anno' 4 +24600 'anny' 4 +24601 'anol' 4 +24602 'anon' 4 +24603 'anor' 4 +24604 'anos' 4 +24605 'anse' 4 +24606 'ansi' 4 +24607 'ansk' 4 +24608 'anst' 4 +24609 'answ' 4 +24610 'anta' 4 +24611 'ante' 4 +24612 'anth' 4 +24613 'anti' 4 +24614 'anto' 4 +24615 'ants' 4 +24616 'antz' 4 +24617 'anus' 4 +24618 'anut' 4 +24619 'anya' 4 +24620 'anye' 4 +24621 'anyl' 4 +24622 'anza' 4 +24623 'anç' 4 +24624 'apan' 4 +24625 'apat' 4 +24626 'aped' 4 +24627 'aper' 4 +24628 'apes' 4 +24629 'apid' 4 +24630 'apis' 4 +24631 'apon' 4 +24632 'apor' 4 +24633 'appa' 4 +24634 'appe' 4 +24635 'appl' 4 +24636 'apps' 4 +24637 'appy' 4 +24638 'apro' 4 +24639 'apse' 4 +24640 'apur' 4 +24641 'aque' 4 +24642 'arak' 4 +24643 'aram' 4 +24644 'aran' 4 +24645 'aras' 4 +24646 'arat' 4 +24647 'arch' 4 +24648 'arda' 4 +24649 'arde' 4 +24650 'ardi' 4 +24651 'ardo' 4 +24652 'ards' 4 +24653 'area' 4 +24654 'ared' 4 +24655 'arel' 4 +24656 'arem' 4 +24657 'aren' 4 +24658 'arer' 4 +24659 'ares' 4 +24660 'aret' 4 +24661 'arez' 4 +24662 'arga' 4 +24663 'arge' 4 +24664 'argo' 4 +24665 'args' 4 +24666 'argv' 4 +24667 'aria' 4 +24668 'arie' 4 +24669 'arin' 4 +24670 'ario' 4 +24671 'aris' 4 +24672 'arks' 4 +24673 'arlo' 4 +24674 'arly' 4 +24675 'arma' 4 +24676 'arms' 4 +24677 'arna' 4 +24678 'aron' 4 +24679 'aroo' 4 +24680 'arra' 4 +24681 'arri' 4 +24682 'arro' 4 +24683 'arry' 4 +24684 'arse' 4 +24685 'arta' 4 +24686 'arte' 4 +24687 'arth' 4 +24688 'arti' 4 +24689 'arto' 4 +24690 'arts' 4 +24691 'arty' 4 +24692 'artz' 4 +24693 'arum' 4 +24694 'arus' 4 +24695 'arya' 4 +24696 'aryl' 4 +24697 'ará' 4 +24698 'aré' 4 +24699 'arı' 4 +24700 'asan' 4 +24701 'asar' 4 +24702 'asci' 4 +24703 'asco' 4 +24704 'ased' 4 +24705 'aser' 4 +24706 'ases' 4 +24707 'aset' 4 +24708 'asha' 4 +24709 'ashi' 4 +24710 'asia' 4 +24711 'asic' 4 +24712 'asin' 4 +24713 'asio' 4 +24714 'asis' 4 +24715 'aska' 4 +24716 'asks' 4 +24717 'asma' 4 +24718 'ason' 4 +24719 'aspx' 4 +24720 'assa' 4 +24721 'asse' 4 +24722 'assi' 4 +24723 'asso' 4 +24724 'assy' 4 +24725 'asta' 4 +24726 'aste' 4 +24727 'asti' 4 +24728 'asto' 4 +24729 'astr' 4 +24730 'asts' 4 +24731 'asty' 4 +24732 'asus' 4 +24733 'atal' 4 +24734 'atan' 4 +24735 'atar' 4 +24736 'atas' 4 +24737 'atch' 4 +24738 'ated' 4 +24739 'ateg' 4 +24740 'atel' 4 +24741 'atem' 4 +24742 'aten' 4 +24743 'ater' 4 +24744 'ates' 4 +24745 'atex' 4 +24746 'atha' 4 +24747 'athe' 4 +24748 'athi' 4 +24749 'aths' 4 +24750 'athy' 4 +24751 'atia' 4 +24752 'atic' 4 +24753 'atie' 4 +24754 'atif' 4 +24755 'atin' 4 +24756 'atio' 4 +24757 'atis' 4 +24758 'ativ' 4 +24759 'atol' 4 +24760 'atom' 4 +24761 'aton' 4 +24762 'ator' 4 +24763 'atos' 4 +24764 'atra' 4 +24765 'atre' 4 +24766 'atri' 4 +24767 'atro' 4 +24768 'atsu' 4 +24769 'atta' 4 +24770 'atte' 4 +24771 'atti' 4 +24772 'attn' 4 +24773 'atto' 4 +24774 'attr' 4 +24775 'atts' 4 +24776 'atum' 4 +24777 'atur' 4 +24778 'atus' 4 +24779 'ató' 4 +24780 'ată' 4 +24781 'auch' 4 +24782 'audi' 4 +24783 'auer' 4 +24784 'auff' 4 +24785 'auge' 4 +24786 'augh' 4 +24787 'ault' 4 +24788 'aupt' 4 +24789 'aura' 4 +24790 'ause' 4 +24791 'auss' 4 +24792 'auth' 4 +24793 'auto' 4 +24794 'aval' 4 +24795 'avan' 4 +24796 'avar' 4 +24797 'avas' 4 +24798 'aved' 4 +24799 'avel' 4 +24800 'aven' 4 +24801 'aver' 4 +24802 'aves' 4 +24803 'avez' 4 +24804 'avia' 4 +24805 'avid' 4 +24806 'avig' 4 +24807 'avin' 4 +24808 'avis' 4 +24809 'avor' 4 +24810 'away' 4 +24811 'awks' 4 +24812 'axes' 4 +24813 'axis' 4 +24814 'axon' 4 +24815 'ayan' 4 +24816 'ayed' 4 +24817 'ayer' 4 +24818 'azar' 4 +24819 'azed' 4 +24820 'azer' 4 +24821 'azon' 4 +24822 'azzi' 4 +24823 'azzo' 4 +24824 'ază' 4 +24825 'aña' 4 +24826 'ała' 4 +24827 'ało' 4 +24828 'ały' 4 +24829 'baby' 4 +24830 'bach' 4 +24831 'back' 4 +24832 'bage' 4 +24833 'bags' 4 +24834 'ball' 4 +24835 'band' 4 +24836 'bane' 4 +24837 'bang' 4 +24838 'bank' 4 +24839 'bara' 4 +24840 'bard' 4 +24841 'bare' 4 +24842 'bars' 4 +24843 'bart' 4 +24844 'base' 4 +24845 'bash' 4 +24846 'bast' 4 +24847 'bath' 4 +24848 'baum' 4 +24849 'bbbb' 4 +24850 'bben' 4 +24851 'bbox' 4 +24852 'beam' 4 +24853 'bean' 4 +24854 'bear' 4 +24855 'beat' 4 +24856 'beck' 4 +24857 'been' 4 +24858 'beer' 4 +24859 'beit' 4 +24860 'bell' 4 +24861 'belt' 4 +24862 'bere' 4 +24863 'berg' 4 +24864 'bern' 4 +24865 'bers' 4 +24866 'bert' 4 +24867 'bery' 4 +24868 'best' 4 +24869 'beta' 4 +24870 'beth' 4 +24871 'bial' 4 +24872 'bian' 4 +24873 'bias' 4 +24874 'bies' 4 +24875 'bigg' 4 +24876 'bike' 4 +24877 'bild' 4 +24878 'bill' 4 +24879 'bilt' 4 +24880 'bind' 4 +24881 'bing' 4 +24882 'bins' 4 +24883 'bios' 4 +24884 'bird' 4 +24885 'bish' 4 +24886 'bits' 4 +24887 'bió' 4 +24888 'blah' 4 +24889 'bled' 4 +24890 'blem' 4 +24891 'bler' 4 +24892 'bles' 4 +24893 'blic' 4 +24894 'blob' 4 +24895 'blog' 4 +24896 'blue' 4 +24897 'blur' 4 +24898 'boat' 4 +24899 'body' 4 +24900 'bold' 4 +24901 'bole' 4 +24902 'bolt' 4 +24903 'bomb' 4 +24904 'bond' 4 +24905 'bone' 4 +24906 'bons' 4 +24907 'book' 4 +24908 'bool' 4 +24909 'boot' 4 +24910 'borg' 4 +24911 'born' 4 +24912 'boro' 4 +24913 'bose' 4 +24914 'boss' 4 +24915 'both' 4 +24916 'bour' 4 +24917 'bove' 4 +24918 'bows' 4 +24919 'boys' 4 +24920 'bral' 4 +24921 'bran' 4 +24922 'bras' 4 +24923 'bred' 4 +24924 'brew' 4 +24925 'brid' 4 +24926 'bris' 4 +24927 'brit' 4 +24928 'bron' 4 +24929 'brow' 4 +24930 'buch' 4 +24931 'buck' 4 +24932 'buff' 4 +24933 'bugs' 4 +24934 'bulk' 4 +24935 'bull' 4 +24936 'bund' 4 +24937 'burg' 4 +24938 'burn' 4 +24939 'bury' 4 +24940 'busy' 4 +24941 'byte' 4 +24942 'ból' 4 +24943 'cade' 4 +24944 'cake' 4 +24945 'calc' 4 +24946 'cale' 4 +24947 'call' 4 +24948 'came' 4 +24949 'camp' 4 +24950 'cano' 4 +24951 'cant' 4 +24952 'cape' 4 +24953 'caps' 4 +24954 'capt' 4 +24955 'carb' 4 +24956 'card' 4 +24957 'care' 4 +24958 'cars' 4 +24959 'cart' 4 +24960 'case' 4 +24961 'cash' 4 +24962 'cast' 4 +24963 'cate' 4 +24964 'cats' 4 +24965 'cccc' 4 +24966 'cdot' 4 +24967 'cean' 4 +24968 'ceed' 4 +24969 'ceil' 4 +24970 'cele' 4 +24971 'cell' 4 +24972 'cent' 4 +24973 'cept' 4 +24974 'cern' 4 +24975 'cers' 4 +24976 'cert' 4 +24977 'cery' 4 +24978 'ceso' 4 +24979 'cess' 4 +24980 'chal' 4 +24981 'chan' 4 +24982 'chap' 4 +24983 'char' 4 +24984 'chas' 4 +24985 'chat' 4 +24986 'ched' 4 +24987 'chel' 4 +24988 'chem' 4 +24989 'chen' 4 +24990 'cher' 4 +24991 'ches' 4 +24992 'chet' 4 +24993 'chev' 4 +24994 'chez' 4 +24995 'chia' 4 +24996 'chie' 4 +24997 'chin' 4 +24998 'chio' 4 +24999 'chip' 4 +25000 'chor' 4 +25001 'chos' 4 +25002 'chte' 4 +25003 'chts' 4 +25004 'chus' 4 +25005 'ché' 4 +25006 'cial' 4 +25007 'cias' 4 +25008 'cido' 4 +25009 'cies' 4 +25010 'cing' 4 +25011 'cion' 4 +25012 'cipl' 4 +25013 'circ' 4 +25014 'cite' 4 +25015 'city' 4 +25016 'cium' 4 +25017 'ció' 4 +25018 'cker' 4 +25019 'cket' 4 +25020 'ckpt' 4 +25021 'clam' 4 +25022 'clar' 4 +25023 'clas' 4 +25024 'cler' 4 +25025 'cles' 4 +25026 'clic' 4 +25027 'clin' 4 +25028 'clip' 4 +25029 'clos' 4 +25030 'club' 4 +25031 'clud' 4 +25032 'clus' 4 +25033 'coal' 4 +25034 'coat' 4 +25035 'cock' 4 +25036 'code' 4 +25037 'coef' 4 +25038 'coin' 4 +25039 'cola' 4 +25040 'cold' 4 +25041 'cole' 4 +25042 'coli' 4 +25043 'coll' 4 +25044 'colm' 4 +25045 'colo' 4 +25046 'cols' 4 +25047 'coma' 4 +25048 'comb' 4 +25049 'come' 4 +25050 'comm' 4 +25051 'como' 4 +25052 'comp' 4 +25053 'conc' 4 +25054 'cond' 4 +25055 'cone' 4 +25056 'conf' 4 +25057 'cong' 4 +25058 'coni' 4 +25059 'conj' 4 +25060 'conn' 4 +25061 'cono' 4 +25062 'cons' 4 +25063 'cont' 4 +25064 'conv' 4 +25065 'cook' 4 +25066 'cool' 4 +25067 'cope' 4 +25068 'copy' 4 +25069 'cord' 4 +25070 'core' 4 +25071 'corn' 4 +25072 'corp' 4 +25073 'corr' 4 +25074 'cost' 4 +25075 'cott' 4 +25076 'cour' 4 +25077 'cout' 4 +25078 'cred' 4 +25079 'cret' 4 +25080 'crib' 4 +25081 'crit' 4 +25082 'cron' 4 +25083 'crop' 4 +25084 'crow' 4 +25085 'csrf' 4 +25086 'ctic' 4 +25087 'ctor' 4 +25088 'ctrl' 4 +25089 'cube' 4 +25090 'cuda' 4 +25091 'cule' 4 +25092 'culo' 4 +25093 'cult' 4 +25094 'curl' 4 +25095 'curr' 4 +25096 'cuts' 4 +25097 'cyan' 4 +25098 'cycl' 4 +25099 'ców' 4 +25100 'dade' 4 +25101 'dain' 4 +25102 'dale' 4 +25103 'damn' 4 +25104 'dark' 4 +25105 'dash' 4 +25106 'data' 4 +25107 'date' 4 +25108 'days' 4 +25109 'dddd' 4 +25110 'dden' 4 +25111 'dead' 4 +25112 'deal' 4 +25113 'deck' 4 +25114 'decl' 4 +25115 'deen' 4 +25116 'deep' 4 +25117 'demo' 4 +25118 'dens' 4 +25119 'dent' 4 +25120 'dept' 4 +25121 'dera' 4 +25122 'dere' 4 +25123 'dern' 4 +25124 'derr' 4 +25125 'ders' 4 +25126 'desc' 4 +25127 'desk' 4 +25128 'dess' 4 +25129 'dest' 4 +25130 'diag' 4 +25131 'dial' 4 +25132 'dian' 4 +25133 'dice' 4 +25134 'dict' 4 +25135 'dies' 4 +25136 'diff' 4 +25137 'digo' 4 +25138 'dims' 4 +25139 'ding' 4 +25140 'dire' 4 +25141 'disc' 4 +25142 'disk' 4 +25143 'disp' 4 +25144 'diss' 4 +25145 'dist' 4 +25146 'doch' 4 +25147 'dock' 4 +25148 'docs' 4 +25149 'does' 4 +25150 'dogs' 4 +25151 'done' 4 +25152 'dong' 4 +25153 'dont' 4 +25154 'door' 4 +25155 'dorf' 4 +25156 'dose' 4 +25157 'dots' 4 +25158 'down' 4 +25159 'drag' 4 +25160 'draw' 4 +25161 'drop' 4 +25162 'drug' 4 +25163 'dual' 4 +25164 'duce' 4 +25165 'duct' 4 +25166 'duit' 4 +25167 'dule' 4 +25168 'dump' 4 +25169 'dust' 4 +25170 'duty' 4 +25171 'each' 4 +25172 'ears' 4 +25173 'east' 4 +25174 'easy' 4 +25175 'ebra' 4 +25176 'ecal' 4 +25177 'eced' 4 +25178 'eces' 4 +25179 'echa' 4 +25180 'echo' 4 +25181 'ects' 4 +25182 'edad' 4 +25183 'edar' 4 +25184 'eday' 4 +25185 'eded' 4 +25186 'edef' 4 +25187 'eden' 4 +25188 'eder' 4 +25189 'edes' 4 +25190 'edge' 4 +25191 'edia' 4 +25192 'edic' 4 +25193 'edin' 4 +25194 'edit' 4 +25195 'edly' 4 +25196 'edom' 4 +25197 'edor' 4 +25198 'educ' 4 +25199 'eeee' 4 +25200 'eful' 4 +25201 'egal' 4 +25202 'egan' 4 +25203 'egen' 4 +25204 'eger' 4 +25205 'egin' 4 +25206 'eing' 4 +25207 'eken' 4 +25208 'eker' 4 +25209 'eled' 4 +25210 'elem' 4 +25211 'elen' 4 +25212 'eler' 4 +25213 'eles' 4 +25214 'elia' 4 +25215 'elic' 4 +25216 'elif' 4 +25217 'elig' 4 +25218 'elim' 4 +25219 'elin' 4 +25220 'ella' 4 +25221 'elle' 4 +25222 'elli' 4 +25223 'ello' 4 +25224 'ells' 4 +25225 'ellt' 4 +25226 'elly' 4 +25227 'elon' 4 +25228 'elor' 4 +25229 'else' 4 +25230 'elta' 4 +25231 'elve' 4 +25232 'eman' 4 +25233 'emas' 4 +25234 'emat' 4 +25235 'emed' 4 +25236 'emen' 4 +25237 'emer' 4 +25238 'emes' 4 +25239 'emet' 4 +25240 'emia' 4 +25241 'emic' 4 +25242 'emin' 4 +25243 'emis' 4 +25244 'emit' 4 +25245 'emon' 4 +25246 'emos' 4 +25247 'empl' 4 +25248 'empt' 4 +25249 'enas' 4 +25250 'ence' 4 +25251 'ench' 4 +25252 'enci' 4 +25253 'ency' 4 +25254 'enda' 4 +25255 'ende' 4 +25256 'endi' 4 +25257 'endl' 4 +25258 'endo' 4 +25259 'ends' 4 +25260 'ened' 4 +25261 'eneg' 4 +25262 'enem' 4 +25263 'enen' 4 +25264 'ener' 4 +25265 'enes' 4 +25266 'enet' 4 +25267 'enez' 4 +25268 'enge' 4 +25269 'engl' 4 +25270 'engo' 4 +25271 'engu' 4 +25272 'enia' 4 +25273 'enic' 4 +25274 'enig' 4 +25275 'enis' 4 +25276 'enix' 4 +25277 'enko' 4 +25278 'enna' 4 +25279 'enne' 4 +25280 'enny' 4 +25281 'enos' 4 +25282 'ensa' 4 +25283 'ense' 4 +25284 'enso' 4 +25285 'enta' 4 +25286 'ente' 4 +25287 'enth' 4 +25288 'enti' 4 +25289 'ento' 4 +25290 'entr' 4 +25291 'ents' 4 +25292 'enty' 4 +25293 'enum' 4 +25294 'enza' 4 +25295 'enç' 4 +25296 'ení' 4 +25297 'eous' 4 +25298 'epad' 4 +25299 'eper' 4 +25300 'eral' 4 +25301 'eras' 4 +25302 'erca' 4 +25303 'erce' 4 +25304 'erea' 4 +25305 'ered' 4 +25306 'eree' 4 +25307 'ereg' 4 +25308 'erek' 4 +25309 'eren' 4 +25310 'erer' 4 +25311 'eres' 4 +25312 'erez' 4 +25313 'erge' 4 +25314 'ergy' 4 +25315 'eria' 4 +25316 'eric' 4 +25317 'erie' 4 +25318 'ermo' 4 +25319 'erna' 4 +25320 'erne' 4 +25321 'erno' 4 +25322 'eron' 4 +25323 'eros' 4 +25324 'erra' 4 +25325 'erre' 4 +25326 'erro' 4 +25327 'erry' 4 +25328 'erta' 4 +25329 'erte' 4 +25330 'erto' 4 +25331 'erts' 4 +25332 'erty' 4 +25333 'erva' 4 +25334 'erve' 4 +25335 'esan' 4 +25336 'esar' 4 +25337 'esch' 4 +25338 'esen' 4 +25339 'eses' 4 +25340 'esis' 4 +25341 'eson' 4 +25342 'essa' 4 +25343 'esse' 4 +25344 'esso' 4 +25345 'esta' 4 +25346 'este' 4 +25347 'esti' 4 +25348 'esto' 4 +25349 'estr' 4 +25350 'ests' 4 +25351 'esty' 4 +25352 'etag' 4 +25353 'etal' 4 +25354 'etas' 4 +25355 'etch' 4 +25356 'eted' 4 +25357 'eten' 4 +25358 'eter' 4 +25359 'etes' 4 +25360 'ethe' 4 +25361 'etic' 4 +25362 'eton' 4 +25363 'etra' 4 +25364 'etro' 4 +25365 'etry' 4 +25366 'etta' 4 +25367 'ette' 4 +25368 'etti' 4 +25369 'etto' 4 +25370 'etur' 4 +25371 'etus' 4 +25372 'etzt' 4 +25373 'età' 4 +25374 'eurs' 4 +25375 'eval' 4 +25376 'even' 4 +25377 'ever' 4 +25378 'evil' 4 +25379 'evin' 4 +25380 'eway' 4 +25381 'exam' 4 +25382 'exec' 4 +25383 'exit' 4 +25384 'expl' 4 +25385 'expr' 4 +25386 'extr' 4 +25387 'eyed' 4 +25388 'eyer' 4 +25389 'face' 4 +25390 'fact' 4 +25391 'fade' 4 +25392 'fail' 4 +25393 'fair' 4 +25394 'fake' 4 +25395 'fall' 4 +25396 'fang' 4 +25397 'fant' 4 +25398 'fare' 4 +25399 'farm' 4 +25400 'fast' 4 +25401 'feas' 4 +25402 'feat' 4 +25403 'fect' 4 +25404 'feed' 4 +25405 'feel' 4 +25406 'feit' 4 +25407 'feld' 4 +25408 'felt' 4 +25409 'fern' 4 +25410 'fers' 4 +25411 'fert' 4 +25412 'fest' 4 +25413 'ffee' 4 +25414 'ffen' 4 +25415 'ffer' 4 +25416 'ffff' 4 +25417 'ffic' 4 +25418 'fica' 4 +25419 'fico' 4 +25420 'file' 4 +25421 'fill' 4 +25422 'film' 4 +25423 'find' 4 +25424 'fine' 4 +25425 'fire' 4 +25426 'firm' 4 +25427 'fish' 4 +25428 'fits' 4 +25429 'five' 4 +25430 'flag' 4 +25431 'flat' 4 +25432 'flex' 4 +25433 'flip' 4 +25434 'flix' 4 +25435 'flow' 4 +25436 'flux' 4 +25437 'foil' 4 +25438 'fois' 4 +25439 'fold' 4 +25440 'folk' 4 +25441 'fono' 4 +25442 'font' 4 +25443 'fony' 4 +25444 'food' 4 +25445 'foot' 4 +25446 'ford' 4 +25447 'fore' 4 +25448 'fork' 4 +25449 'form' 4 +25450 'fort' 4 +25451 'four' 4 +25452 'frac' 4 +25453 'frag' 4 +25454 'frak' 4 +25455 'fram' 4 +25456 'fred' 4 +25457 'free' 4 +25458 'freq' 4 +25459 'frey' 4 +25460 'from' 4 +25461 'ften' 4 +25462 'fter' 4 +25463 'fuel' 4 +25464 'full' 4 +25465 'func' 4 +25466 'fund' 4 +25467 'furt' 4 +25468 'fusc' 4 +25469 'fuse' 4 +25470 'fér' 4 +25471 'för' 4 +25472 'füg' 4 +25473 'füh' 4 +25474 'für' 4 +25475 'gado' 4 +25476 'gage' 4 +25477 'gain' 4 +25478 'game' 4 +25479 'gang' 4 +25480 'gard' 4 +25481 'gart' 4 +25482 'gary' 4 +25483 'gate' 4 +25484 'gear' 4 +25485 'geme' 4 +25486 'gems' 4 +25487 'gend' 4 +25488 'gene' 4 +25489 'gens' 4 +25490 'gent' 4 +25491 'geom' 4 +25492 'geon' 4 +25493 'gers' 4 +25494 'gery' 4 +25495 'gest' 4 +25496 'getX' 4 +25497 'gets' 4 +25498 'gett' 4 +25499 'gger' 4 +25500 'ggle' 4 +25501 'ghan' 4 +25502 'gian' 4 +25503 'gift' 4 +25504 'ging' 4 +25505 'gins' 4 +25506 'ginx' 4 +25507 'girl' 4 +25508 'gium' 4 +25509 'give' 4 +25510 'glob' 4 +25511 'glut' 4 +25512 'goal' 4 +25513 'gold' 4 +25514 'gone' 4 +25515 'good' 4 +25516 'goog' 4 +25517 'goto' 4 +25518 'gpio' 4 +25519 'grab' 4 +25520 'grad' 4 +25521 'gram' 4 +25522 'gran' 4 +25523 'grat' 4 +25524 'grav' 4 +25525 'gray' 4 +25526 'gree' 4 +25527 'greg' 4 +25528 'gren' 4 +25529 'grep' 4 +25530 'gres' 4 +25531 'grey' 4 +25532 'grid' 4 +25533 'grow' 4 +25534 'gré' 4 +25535 'gså' 4 +25536 'guid' 4 +25537 'guns' 4 +25538 'gypt' 4 +25539 'gzip' 4 +25540 'habi' 4 +25541 'hack' 4 +25542 'haft' 4 +25543 'hair' 4 +25544 'halb' 4 +25545 'half' 4 +25546 'hall' 4 +25547 'halt' 4 +25548 'hand' 4 +25549 'hang' 4 +25550 'hani' 4 +25551 'hape' 4 +25552 'happ' 4 +25553 'haps' 4 +25554 'hard' 4 +25555 'hare' 4 +25556 'harm' 4 +25557 'hart' 4 +25558 'hash' 4 +25559 'hatt' 4 +25560 'haul' 4 +25561 'haus' 4 +25562 'have' 4 +25563 'havi' 4 +25564 'hbar' 4 +25565 'hbox' 4 +25566 'head' 4 +25567 'heal' 4 +25568 'heap' 4 +25569 'heat' 4 +25570 'heck' 4 +25571 'heed' 4 +25572 'heel' 4 +25573 'heet' 4 +25574 'heid' 4 +25575 'heim' 4 +25576 'heit' 4 +25577 'held' 4 +25578 'helf' 4 +25579 'hell' 4 +25580 'helm' 4 +25581 'help' 4 +25582 'hend' 4 +25583 'hene' 4 +25584 'heng' 4 +25585 'hens' 4 +25586 'here' 4 +25587 'hern' 4 +25588 'hero' 4 +25589 'hers' 4 +25590 'hest' 4 +25591 'heur' 4 +25592 'hide' 4 +25593 'hift' 4 +25594 'high' 4 +25595 'hill' 4 +25596 'hind' 4 +25597 'hing' 4 +25598 'hint' 4 +25599 'hips' 4 +25600 'hire' 4 +25601 'hist' 4 +25602 'hive' 4 +25603 'hlen' 4 +25604 'hler' 4 +25605 'hoff' 4 +25606 'hold' 4 +25607 'hole' 4 +25608 'holm' 4 +25609 'home' 4 +25610 'hood' 4 +25611 'hook' 4 +25612 'hope' 4 +25613 'hora' 4 +25614 'horn' 4 +25615 'hors' 4 +25616 'hort' 4 +25617 'host' 4 +25618 'hots' 4 +25619 'hour' 4 +25620 'href' 4 +25621 'html' 4 +25622 'hton' 4 +25623 'http' 4 +25624 'hung' 4 +25625 'hydr' 4 +25626 'hyth' 4 +25627 'ház' 4 +25628 'hés' 4 +25629 'hör' 4 +25630 'iada' 4 +25631 'iage' 4 +25632 'iais' 4 +25633 'iale' 4 +25634 'ials' 4 +25635 'iami' 4 +25636 'iamo' 4 +25637 'iams' 4 +25638 'iana' 4 +25639 'iane' 4 +25640 'iang' 4 +25641 'iani' 4 +25642 'iano' 4 +25643 'ians' 4 +25644 'iant' 4 +25645 'iary' 4 +25646 'iasm' 4 +25647 'iate' 4 +25648 'iał' 4 +25649 'ibal' 4 +25650 'iban' 4 +25651 'ibel' 4 +25652 'iben' 4 +25653 'iber' 4 +25654 'ibia' 4 +25655 'ibil' 4 +25656 'ible' 4 +25657 'ibli' 4 +25658 'ibly' 4 +25659 'ibus' 4 +25660 'ical' 4 +25661 'ican' 4 +25662 'icar' 4 +25663 'icas' 4 +25664 'iced' 4 +25665 'icer' 4 +25666 'ices' 4 +25667 'icha' 4 +25668 'iche' 4 +25669 'ichi' 4 +25670 'icho' 4 +25671 'icht' 4 +25672 'icia' 4 +25673 'icio' 4 +25674 'icip' 4 +25675 'icit' 4 +25676 'icki' 4 +25677 'icks' 4 +25678 'icky' 4 +25679 'icle' 4 +25680 'icol' 4 +25681 'icon' 4 +25682 'icos' 4 +25683 'icro' 4 +25684 'icts' 4 +25685 'icul' 4 +25686 'icum' 4 +25687 'icus' 4 +25688 'icut' 4 +25689 'ică' 4 +25690 'idad' 4 +25691 'idae' 4 +25692 'idal' 4 +25693 'idan' 4 +25694 'idas' 4 +25695 'iday' 4 +25696 'iddy' 4 +25697 'idea' 4 +25698 'ided' 4 +25699 'idel' 4 +25700 'iden' 4 +25701 'ideo' 4 +25702 'ider' 4 +25703 'ides' 4 +25704 'idge' 4 +25705 'idia' 4 +25706 'idin' 4 +25707 'idis' 4 +25708 'idle' 4 +25709 'idor' 4 +25710 'idos' 4 +25711 'idth' 4 +25712 'idé' 4 +25713 'iece' 4 +25714 'iego' 4 +25715 'ield' 4 +25716 'iele' 4 +25717 'iels' 4 +25718 'iene' 4 +25719 'iens' 4 +25720 'ient' 4 +25721 'iera' 4 +25722 'iere' 4 +25723 'ieri' 4 +25724 'iero' 4 +25725 'iers' 4 +25726 'iert' 4 +25727 'iese' 4 +25728 'iest' 4 +25729 'iets' 4 +25730 'iety' 4 +25731 'ieur' 4 +25732 'ieux' 4 +25733 'ieve' 4 +25734 'ieß' 4 +25735 'ież' 4 +25736 'ifar' 4 +25737 'ifax' 4 +25738 'ifen' 4 +25739 'ifer' 4 +25740 'iffe' 4 +25741 'iffs' 4 +25742 'ific' 4 +25743 'ifie' 4 +25744 'ifik' 4 +25745 'ifle' 4 +25746 'ifth' 4 +25747 'ifts' 4 +25748 'ifty' 4 +25749 'iful' 4 +25750 'igan' 4 +25751 'igar' 4 +25752 'igen' 4 +25753 'iger' 4 +25754 'iges' 4 +25755 'ighb' 4 +25756 'ight' 4 +25757 'igin' 4 +25758 'igma' 4 +25759 'igne' 4 +25760 'igon' 4 +25761 'igor' 4 +25762 'igos' 4 +25763 'igua' 4 +25764 'igue' 4 +25765 'ihad' 4 +25766 'ikal' 4 +25767 'ikan' 4 +25768 'iked' 4 +25769 'ikel' 4 +25770 'iken' 4 +25771 'iker' 4 +25772 'ikes' 4 +25773 'ikit' 4 +25774 'ikon' 4 +25775 'ikov' 4 +25776 'ilar' 4 +25777 'ilda' 4 +25778 'ilde' 4 +25779 'iled' 4 +25780 'ilee' 4 +25781 'ilen' 4 +25782 'iler' 4 +25783 'iles' 4 +25784 'ilet' 4 +25785 'iley' 4 +25786 'ilia' 4 +25787 'ilib' 4 +25788 'ilic' 4 +25789 'ilin' 4 +25790 'ilio' 4 +25791 'ilis' 4 +25792 'ilit' 4 +25793 'illa' 4 +25794 'ille' 4 +25795 'illi' 4 +25796 'illo' 4 +25797 'ills' 4 +25798 'illy' 4 +25799 'iloc' 4 +25800 'ilog' 4 +25801 'ilon' 4 +25802 'ilor' 4 +25803 'ilos' 4 +25804 'ilot' 4 +25805 'ilst' 4 +25806 'ilty' 4 +25807 'ilus' 4 +25808 'ilyn' 4 +25809 'ilà' 4 +25810 'imag' 4 +25811 'imal' 4 +25812 'iman' 4 +25813 'imap' 4 +25814 'imar' 4 +25815 'imas' 4 +25816 'imat' 4 +25817 'imed' 4 +25818 'imen' 4 +25819 'imer' 4 +25820 'imes' 4 +25821 'imet' 4 +25822 'imin' 4 +25823 'imir' 4 +25824 'imit' 4 +25825 'imon' 4 +25826 'imos' 4 +25827 'impl' 4 +25828 'imum' 4 +25829 'imus' 4 +25830 'inae' 4 +25831 'inal' 4 +25832 'inar' 4 +25833 'inas' 4 +25834 'ince' 4 +25835 'inch' 4 +25836 'inci' 4 +25837 'incl' 4 +25838 'inct' 4 +25839 'inda' 4 +25840 'inde' 4 +25841 'indi' 4 +25842 'indo' 4 +25843 'inds' 4 +25844 'indu' 4 +25845 'indy' 4 +25846 'inea' 4 +25847 'ined' 4 +25848 'inee' 4 +25849 'inel' 4 +25850 'inem' 4 +25851 'inen' 4 +25852 'iner' 4 +25853 'ines' 4 +25854 'inet' 4 +25855 'inez' 4 +25856 'infl' 4 +25857 'info' 4 +25858 'inge' 4 +25859 'ingo' 4 +25860 'ings' 4 +25861 'ingt' 4 +25862 'ingu' 4 +25863 'inha' 4 +25864 'inho' 4 +25865 'inia' 4 +25866 'inic' 4 +25867 'inin' 4 +25868 'inis' 4 +25869 'init' 4 +25870 'iniz' 4 +25871 'inja' 4 +25872 'inka' 4 +25873 'inki' 4 +25874 'inks' 4 +25875 'inky' 4 +25876 'inoa' 4 +25877 'inos' 4 +25878 'inqu' 4 +25879 'insi' 4 +25880 'insk' 4 +25881 'insn' 4 +25882 'insp' 4 +25883 'inst' 4 +25884 'inta' 4 +25885 'inte' 4 +25886 'inth' 4 +25887 'into' 4 +25888 'intr' 4 +25889 'ints' 4 +25890 'inue' 4 +25891 'inus' 4 +25892 'inux' 4 +25893 'iné' 4 +25894 'iona' 4 +25895 'ione' 4 +25896 'ioni' 4 +25897 'ions' 4 +25898 'iors' 4 +25899 'ioso' 4 +25900 'iota' 4 +25901 'iour' 4 +25902 'ious' 4 +25903 'ipal' 4 +25904 'iped' 4 +25905 'ipeg' 4 +25906 'ipel' 4 +25907 'iper' 4 +25908 'ipes' 4 +25909 'iple' 4 +25910 'ippi' 4 +25911 'ippy' 4 +25912 'ipro' 4 +25913 'ipse' 4 +25914 'ique' 4 +25915 'iral' 4 +25916 'iran' 4 +25917 'iras' 4 +25918 'irds' 4 +25919 'ired' 4 +25920 'iren' 4 +25921 'ires' 4 +25922 'irez' 4 +25923 'irie' 4 +25924 'iris' 4 +25925 'irit' 4 +25926 'irms' 4 +25927 'iron' 4 +25928 'iros' 4 +25929 'irse' 4 +25930 'irst' 4 +25931 'irth' 4 +25932 'irts' 4 +25933 'irty' 4 +25934 'irus' 4 +25935 'irá' 4 +25936 'isan' 4 +25937 'isas' 4 +25938 'isch' 4 +25939 'isco' 4 +25940 'ised' 4 +25941 'isel' 4 +25942 'isen' 4 +25943 'iser' 4 +25944 'ises' 4 +25945 'iset' 4 +25946 'isha' 4 +25947 'ishi' 4 +25948 'isia' 4 +25949 'isin' 4 +25950 'isis' 4 +25951 'iska' 4 +25952 'iske' 4 +25953 'isko' 4 +25954 'isks' 4 +25955 'isle' 4 +25956 'isma' 4 +25957 'isme' 4 +25958 'ismo' 4 +25959 'isms' 4 +25960 'isol' 4 +25961 'ison' 4 +25962 'isor' 4 +25963 'issa' 4 +25964 'isse' 4 +25965 'issy' 4 +25966 'ista' 4 +25967 'iste' 4 +25968 'isti' 4 +25969 'isto' 4 +25970 'istr' 4 +25971 'ists' 4 +25972 'isty' 4 +25973 'isé' 4 +25974 'ital' 4 +25975 'itan' 4 +25976 'itar' 4 +25977 'itas' 4 +25978 'itat' 4 +25979 'itch' 4 +25980 'ited' 4 +25981 'itel' 4 +25982 'item' 4 +25983 'iten' 4 +25984 'iter' 4 +25985 'ites' 4 +25986 'itet' 4 +25987 'ithe' 4 +25988 'itia' 4 +25989 'itic' 4 +25990 'itin' 4 +25991 'itis' 4 +25992 'itle' 4 +25993 'itol' 4 +25994 'iton' 4 +25995 'itor' 4 +25996 'itos' 4 +25997 'itro' 4 +25998 'itsu' 4 +25999 'itta' 4 +26000 'itte' 4 +26001 'itti' 4 +26002 'itto' 4 +26003 'itty' 4 +26004 'itud' 4 +26005 'itus' 4 +26006 'ità' 4 +26007 'itä' 4 +26008 'ité' 4 +26009 'ită' 4 +26010 'ival' 4 +26011 'ivan' 4 +26012 'ivar' 4 +26013 'ivas' 4 +26014 'ived' 4 +26015 'ivel' 4 +26016 'iven' 4 +26017 'iver' 4 +26018 'ives' 4 +26019 'ivia' 4 +26020 'ivic' 4 +26021 'ivid' 4 +26022 'ivil' 4 +26023 'ivir' 4 +26024 'ivos' 4 +26025 'ivot' 4 +26026 'ixed' 4 +26027 'ixel' 4 +26028 'ixin' 4 +26029 'ixon' 4 +26030 'izar' 4 +26031 'ized' 4 +26032 'izen' 4 +26033 'izer' 4 +26034 'izes' 4 +26035 'izia' 4 +26036 'izin' 4 +26037 'izio' 4 +26038 'izon' 4 +26039 'izza' 4 +26040 'ião' 4 +26041 'iça' 4 +26042 'ién' 4 +26043 'ión' 4 +26044 'jack' 4 +26045 'jang' 4 +26046 'java' 4 +26047 'jdbc' 4 +26048 'ject' 4 +26049 'jest' 4 +26050 'jets' 4 +26051 'jian' 4 +26052 'jing' 4 +26053 'jira' 4 +26054 'jobs' 4 +26055 'john' 4 +26056 'join' 4 +26057 'jong' 4 +26058 'jour' 4 +26059 'jpeg' 4 +26060 'json' 4 +26061 'jump' 4 +26062 'jury' 4 +26063 'just' 4 +26064 'ják' 4 +26065 'ján' 4 +26066 'ját' 4 +26067 'jär' 4 +26068 'jön' 4 +26069 'jör' 4 +26070 'jąc' 4 +26071 'kań' 4 +26072 'keep' 4 +26073 'kees' 4 +26074 'kehr' 4 +26075 'keit' 4 +26076 'kern' 4 +26077 'kers' 4 +26078 'keys' 4 +26079 'kick' 4 +26080 'kids' 4 +26081 'kill' 4 +26082 'kind' 4 +26083 'king' 4 +26084 'kins' 4 +26085 'know' 4 +26086 'krit' 4 +26087 'ktop' 4 +26088 'ktor' 4 +26089 'któ' 4 +26090 'ków' 4 +26091 'lace' 4 +26092 'lage' 4 +26093 'laim' 4 +26094 'lain' 4 +26095 'lake' 4 +26096 'land' 4 +26097 'lane' 4 +26098 'lang' 4 +26099 'larg' 4 +26100 'lash' 4 +26101 'lass' 4 +26102 'last' 4 +26103 'late' 4 +26104 'laus' 4 +26105 'laws' 4 +26106 'lazy' 4 +26107 'ldap' 4 +26108 'lder' 4 +26109 'lead' 4 +26110 'leaf' 4 +26111 'lean' 4 +26112 'lear' 4 +26113 'leck' 4 +26114 'lect' 4 +26115 'leen' 4 +26116 'leep' 4 +26117 'leet' 4 +26118 'left' 4 +26119 'lege' 4 +26120 'lein' 4 +26121 'lems' 4 +26122 'lene' 4 +26123 'lens' 4 +26124 'leon' 4 +26125 'lers' 4 +26126 'lesh' 4 +26127 'less' 4 +26128 'lest' 4 +26129 'lete' 4 +26130 'lets' 4 +26131 'lett' 4 +26132 'leur' 4 +26133 'leys' 4 +26134 'libc' 4 +26135 'libs' 4 +26136 'lica' 4 +26137 'lice' 4 +26138 'lich' 4 +26139 'lick' 4 +26140 'lict' 4 +26141 'lied' 4 +26142 'lier' 4 +26143 'lies' 4 +26144 'life' 4 +26145 'lift' 4 +26146 'liga' 4 +26147 'ligt' 4 +26148 'like' 4 +26149 'lime' 4 +26150 'line' 4 +26151 'ling' 4 +26152 'link' 4 +26153 'lint' 4 +26154 'lion' 4 +26155 'liqu' 4 +26156 'lish' 4 +26157 'list' 4 +26158 'lite' 4 +26159 'live' 4 +26160 'ller' 4 +26161 'lles' 4 +26162 'llvm' 4 +26163 'load' 4 +26164 'loan' 4 +26165 'loat' 4 +26166 'lock' 4 +26167 'logo' 4 +26168 'logs' 4 +26169 'loid' 4 +26170 'long' 4 +26171 'lood' 4 +26172 'look' 4 +26173 'loop' 4 +26174 'loor' 4 +26175 'lord' 4 +26176 'lose' 4 +26177 'loss' 4 +26178 'lost' 4 +26179 'lots' 4 +26180 'love' 4 +26181 'loyd' 4 +26182 'luck' 4 +26183 'lund' 4 +26184 'lung' 4 +26185 'lymp' 4 +26186 'lyph' 4 +26187 'lán' 4 +26188 'lär' 4 +26189 'läu' 4 +26190 'lès' 4 +26191 'lés' 4 +26192 'lês' 4 +26193 'mach' 4 +26194 'made' 4 +26195 'mage' 4 +26196 'magn' 4 +26197 'maid' 4 +26198 'mail' 4 +26199 'main' 4 +26200 'make' 4 +26201 'male' 4 +26202 'mall' 4 +26203 'mana' 4 +26204 'mand' 4 +26205 'mani' 4 +26206 'mann' 4 +26207 'mans' 4 +26208 'mant' 4 +26209 'many' 4 +26210 'maps' 4 +26211 'mare' 4 +26212 'mark' 4 +26213 'mars' 4 +26214 'mart' 4 +26215 'mary' 4 +26216 'mask' 4 +26217 'mass' 4 +26218 'mast' 4 +26219 'mate' 4 +26220 'math' 4 +26221 'maze' 4 +26222 'mber' 4 +26223 'mbox' 4 +26224 'meal' 4 +26225 'mean' 4 +26226 'meas' 4 +26227 'medi' 4 +26228 'meet' 4 +26229 'mega' 4 +26230 'memb' 4 +26231 'memo' 4 +26232 'meno' 4 +26233 'mens' 4 +26234 'ment' 4 +26235 'menu' 4 +26236 'merc' 4 +26237 'mere' 4 +26238 'mers' 4 +26239 'mesh' 4 +26240 'mess' 4 +26241 'meta' 4 +26242 'meth' 4 +26243 'midi' 4 +26244 'midt' 4 +26245 'mile' 4 +26246 'mill' 4 +26247 'mime' 4 +26248 'mina' 4 +26249 'mind' 4 +26250 'mine' 4 +26251 'ming' 4 +26252 'mini' 4 +26253 'mino' 4 +26254 'mins' 4 +26255 'mint' 4 +26256 'misc' 4 +26257 'mise' 4 +26258 'miss' 4 +26259 'mist' 4 +26260 'mite' 4 +26261 'mith' 4 +26262 'mits' 4 +26263 'mitt' 4 +26264 'mium' 4 +26265 'mlin' 4 +26266 'mock' 4 +26267 'mode' 4 +26268 'moil' 4 +26269 'mond' 4 +26270 'mong' 4 +26271 'mono' 4 +26272 'mons' 4 +26273 'mont' 4 +26274 'mony' 4 +26275 'moon' 4 +26276 'more' 4 +26277 'mort' 4 +26278 'most' 4 +26279 'move' 4 +26280 'mpeg' 4 +26281 'msgs' 4 +26282 'much' 4 +26283 'mult' 4 +26284 'mund' 4 +26285 'must' 4 +26286 'mute' 4 +26287 'nail' 4 +26288 'nals' 4 +26289 'nama' 4 +26290 'name' 4 +26291 'nant' 4 +26292 'nbsp' 4 +26293 'ncia' 4 +26294 'ndef' 4 +26295 'nder' 4 +26296 'ndim' 4 +26297 'near' 4 +26298 'neau' 4 +26299 'neck' 4 +26300 'nect' 4 +26301 'need' 4 +26302 'nego' 4 +26303 'nell' 4 +26304 'nels' 4 +26305 'nerg' 4 +26306 'ners' 4 +26307 'ness' 4 +26308 'nest' 4 +26309 'nets' 4 +26310 'nett' 4 +26311 'neum' 4 +26312 'neur' 4 +26313 'neut' 4 +26314 'news' 4 +26315 'next' 4 +26316 'neys' 4 +26317 'nger' 4 +26318 'nice' 4 +26319 'nick' 4 +26320 'nier' 4 +26321 'nine' 4 +26322 'ning' 4 +26323 'nist' 4 +26324 'nię' 4 +26325 'node' 4 +26326 'nome' 4 +26327 'none' 4 +26328 'noon' 4 +26329 'noop' 4 +26330 'norm' 4 +26331 'nose' 4 +26332 'nost' 4 +26333 'note' 4 +26334 'noun' 4 +26335 'nova' 4 +26336 'nown' 4 +26337 'nsic' 4 +26338 'nten' 4 +26339 'nton' 4 +26340 'null' 4 +26341 'nung' 4 +26342 'nuts' 4 +26343 'née' 4 +26344 'nés' 4 +26345 'ník' 4 +26346 'ním' 4 +26347 'oard' 4 +26348 'obal' 4 +26349 'obar' 4 +26350 'obby' 4 +26351 'ober' 4 +26352 'obia' 4 +26353 'obic' 4 +26354 'obil' 4 +26355 'oble' 4 +26356 'obox' 4 +26357 'obra' 4 +26358 'obre' 4 +26359 'obuf' 4 +26360 'ocal' 4 +26361 'ocar' 4 +26362 'occo' 4 +26363 'oche' 4 +26364 'ocks' 4 +26365 'ocoa' 4 +26366 'ocol' 4 +26367 'ocom' 4 +26368 'ocon' 4 +26369 'ocre' 4 +26370 'ocus' 4 +26371 'ocê' 4 +26372 'odal' 4 +26373 'oday' 4 +26374 'oded' 4 +26375 'odel' 4 +26376 'odem' 4 +26377 'oden' 4 +26378 'oder' 4 +26379 'odes' 4 +26380 'odge' 4 +26381 'odia' 4 +26382 'odic' 4 +26383 'odom' 4 +26384 'odon' 4 +26385 'odor' 4 +26386 'odos' 4 +26387 'odot' 4 +26388 'odox' 4 +26389 'odus' 4 +26390 'offs' 4 +26391 'ogan' 4 +26392 'ogel' 4 +26393 'ogen' 4 +26394 'ogle' 4 +26395 'ogly' 4 +26396 'ogne' 4 +26397 'ogon' 4 +26398 'ogra' 4 +26399 'ogue' 4 +26400 'ohan' 4 +26401 'oids' 4 +26402 'oine' 4 +26403 'oint' 4 +26404 'oire' 4 +26405 'oise' 4 +26406 'oked' 4 +26407 'oken' 4 +26408 'oker' 4 +26409 'okes' 4 +26410 'okia' 4 +26411 'okie' 4 +26412 'okin' 4 +26413 'olan' 4 +26414 'olar' 4 +26415 'olas' 4 +26416 'olds' 4 +26417 'oled' 4 +26418 'olem' 4 +26419 'olen' 4 +26420 'oler' 4 +26421 'oles' 4 +26422 'oley' 4 +26423 'olia' 4 +26424 'olic' 4 +26425 'olid' 4 +26426 'olin' 4 +26427 'olip' 4 +26428 'olis' 4 +26429 'olit' 4 +26430 'olla' 4 +26431 'ollo' 4 +26432 'olly' 4 +26433 'olog' 4 +26434 'olon' 4 +26435 'olor' 4 +26436 'olph' 4 +26437 'olta' 4 +26438 'olve' 4 +26439 'omal' 4 +26440 'oman' 4 +26441 'omas' 4 +26442 'omat' 4 +26443 'ombo' 4 +26444 'omed' 4 +26445 'omen' 4 +26446 'omer' 4 +26447 'omes' 4 +26448 'omet' 4 +26449 'omez' 4 +26450 'omic' 4 +26451 'omin' 4 +26452 'omit' 4 +26453 'omon' 4 +26454 'onal' 4 +26455 'onas' 4 +26456 'once' 4 +26457 'onda' 4 +26458 'onde' 4 +26459 'ondo' 4 +26460 'onds' 4 +26461 'oned' 4 +26462 'onel' 4 +26463 'onen' 4 +26464 'oner' 4 +26465 'ones' 4 +26466 'onet' 4 +26467 'oney' 4 +26468 'onga' 4 +26469 'onge' 4 +26470 'ongo' 4 +26471 'ongs' 4 +26472 'onia' 4 +26473 'onic' 4 +26474 'onio' 4 +26475 'onis' 4 +26476 'only' 4 +26477 'onna' 4 +26478 'onne' 4 +26479 'onom' 4 +26480 'onse' 4 +26481 'onso' 4 +26482 'onte' 4 +26483 'onto' 4 +26484 'onym' 4 +26485 'ooks' 4 +26486 'ools' 4 +26487 'oons' 4 +26488 'oooo' 4 +26489 'oops' 4 +26490 'ooth' 4 +26491 'opal' 4 +26492 'oped' 4 +26493 'open' 4 +26494 'oper' 4 +26495 'opes' 4 +26496 'opez' 4 +26497 'ophe' 4 +26498 'ophy' 4 +26499 'opia' 4 +26500 'opic' 4 +26501 'opin' 4 +26502 'ople' 4 +26503 'opol' 4 +26504 'opor' 4 +26505 'opot' 4 +26506 'oppy' 4 +26507 'opro' 4 +26508 'opsy' 4 +26509 'opts' 4 +26510 'opus' 4 +26511 'oque' 4 +26512 'oral' 4 +26513 'oran' 4 +26514 'oras' 4 +26515 'orce' 4 +26516 'orch' 4 +26517 'orde' 4 +26518 'ordo' 4 +26519 'ords' 4 +26520 'orea' 4 +26521 'ored' 4 +26522 'orem' 4 +26523 'oren' 4 +26524 'orer' 4 +26525 'ores' 4 +26526 'oret' 4 +26527 'orge' 4 +26528 'oria' 4 +26529 'oric' 4 +26530 'orie' 4 +26531 'orig' 4 +26532 'orin' 4 +26533 'orio' 4 +26534 'oris' 4 +26535 'orks' 4 +26536 'orld' 4 +26537 'orna' 4 +26538 'orne' 4 +26539 'orno' 4 +26540 'orns' 4 +26541 'oron' 4 +26542 'orph' 4 +26543 'orro' 4 +26544 'orry' 4 +26545 'orse' 4 +26546 'orsi' 4 +26547 'orsk' 4 +26548 'orst' 4 +26549 'orta' 4 +26550 'orte' 4 +26551 'orth' 4 +26552 'orts' 4 +26553 'orum' 4 +26554 'orus' 4 +26555 'osal' 4 +26556 'osas' 4 +26557 'osed' 4 +26558 'osen' 4 +26559 'oser' 4 +26560 'oses' 4 +26561 'osex' 4 +26562 'oshi' 4 +26563 'osin' 4 +26564 'osis' 4 +26565 'osit' 4 +26566 'osos' 4 +26567 'osph' 4 +26568 'ossa' 4 +26569 'osse' 4 +26570 'osta' 4 +26571 'oste' 4 +26572 'osti' 4 +26573 'osto' 4 +26574 'otal' 4 +26575 'oted' 4 +26576 'oten' 4 +26577 'oter' 4 +26578 'otes' 4 +26579 'othe' 4 +26580 'otho' 4 +26581 'othy' 4 +26582 'otic' 4 +26583 'otin' 4 +26584 'otle' 4 +26585 'otom' 4 +26586 'oton' 4 +26587 'otor' 4 +26588 'otos' 4 +26589 'otta' 4 +26590 'otte' 4 +26591 'otti' 4 +26592 'otto' 4 +26593 'otyp' 4 +26594 'ouch' 4 +26595 'oufl' 4 +26596 'ough' 4 +26597 'ould' 4 +26598 'ound' 4 +26599 'ount' 4 +26600 'oupe' 4 +26601 'ourd' 4 +26602 'oure' 4 +26603 'ourg' 4 +26604 'ouri' 4 +26605 'ourn' 4 +26606 'ours' 4 +26607 'ourt' 4 +26608 'ouse' 4 +26609 'ouss' 4 +26610 'oust' 4 +26611 'oute' 4 +26612 'outh' 4 +26613 'outs' 4 +26614 'ouve' 4 +26615 'oval' 4 +26616 'ovan' 4 +26617 'oved' 4 +26618 'oven' 4 +26619 'over' 4 +26620 'oves' 4 +26621 'ovic' 4 +26622 'ovie' 4 +26623 'ová' 4 +26624 'ové' 4 +26625 'ový' 4 +26626 'ově' 4 +26627 'owan' 4 +26628 'owed' 4 +26629 'owel' 4 +26630 'ower' 4 +26631 'ową' 4 +26632 'oxel' 4 +26633 'oxic' 4 +26634 'oxid' 4 +26635 'oyal' 4 +26636 'oyer' 4 +26637 'oyle' 4 +26638 'pace' 4 +26639 'pack' 4 +26640 'page' 4 +26641 'paid' 4 +26642 'pain' 4 +26643 'pair' 4 +26644 'pand' 4 +26645 'para' 4 +26646 'pard' 4 +26647 'pare' 4 +26648 'park' 4 +26649 'pars' 4 +26650 'part' 4 +26651 'pass' 4 +26652 'past' 4 +26653 'path' 4 +26654 'pdev' 4 +26655 'peak' 4 +26656 'pear' 4 +26657 'peat' 4 +26658 'pect' 4 +26659 'peed' 4 +26660 'peek' 4 +26661 'peer' 4 +26662 'pell' 4 +26663 'pend' 4 +26664 'pent' 4 +26665 'perc' 4 +26666 'perf' 4 +26667 'peri' 4 +26668 'perl' 4 +26669 'perm' 4 +26670 'perp' 4 +26671 'pers' 4 +26672 'pert' 4 +26673 'phal' 4 +26674 'phan' 4 +26675 'phas' 4 +26676 'phen' 4 +26677 'pher' 4 +26678 'phia' 4 +26679 'phil' 4 +26680 'phin' 4 +26681 'phis' 4 +26682 'phon' 4 +26683 'phot' 4 +26684 'phys' 4 +26685 'pick' 4 +26686 'pies' 4 +26687 'pile' 4 +26688 'pine' 4 +26689 'ping' 4 +26690 'pink' 4 +26691 'pins' 4 +26692 'pipe' 4 +26693 'pire' 4 +26694 'pite' 4 +26695 'plan' 4 +26696 'plat' 4 +26697 'play' 4 +26698 'pled' 4 +26699 'pler' 4 +26700 'ples' 4 +26701 'plet' 4 +26702 'plex' 4 +26703 'plic' 4 +26704 'plit' 4 +26705 'plot' 4 +26706 'ploy' 4 +26707 'plug' 4 +26708 'plus' 4 +26709 'pmod' 4 +26710 'poke' 4 +26711 'pole' 4 +26712 'poll' 4 +26713 'poly' 4 +26714 'pond' 4 +26715 'pone' 4 +26716 'pong' 4 +26717 'pons' 4 +26718 'pool' 4 +26719 'poon' 4 +26720 'pora' 4 +26721 'port' 4 +26722 'pose' 4 +26723 'poss' 4 +26724 'post' 4 +26725 'pour' 4 +26726 'pped' 4 +26727 'ppen' 4 +26728 'pper' 4 +26729 'prec' 4 +26730 'pred' 4 +26731 'pref' 4 +26732 'prem' 4 +26733 'prep' 4 +26734 'pres' 4 +26735 'pret' 4 +26736 'prev' 4 +26737 'pril' 4 +26738 'prim' 4 +26739 'prit' 4 +26740 'priv' 4 +26741 'prob' 4 +26742 'proc' 4 +26743 'prod' 4 +26744 'prof' 4 +26745 'prog' 4 +26746 'proj' 4 +26747 'prom' 4 +26748 'pron' 4 +26749 'prop' 4 +26750 'prot' 4 +26751 'prov' 4 +26752 'prox' 4 +26753 'prus' 4 +26754 'prü' 4 +26755 'pson' 4 +26756 'ptic' 4 +26757 'pton' 4 +26758 'publ' 4 +26759 'pull' 4 +26760 'punk' 4 +26761 'pure' 4 +26762 'push' 4 +26763 'pute' 4 +26764 'qing' 4 +26765 'quad' 4 +26766 'qual' 4 +26767 'quan' 4 +26768 'quar' 4 +26769 'quat' 4 +26770 'quee' 4 +26771 'quel' 4 +26772 'quer' 4 +26773 'ques' 4 +26774 'quet' 4 +26775 'quez' 4 +26776 'quia' 4 +26777 'quin' 4 +26778 'quir' 4 +26779 'quis' 4 +26780 'quit' 4 +26781 'quiz' 4 +26782 'quot' 4 +26783 'qué' 4 +26784 'race' 4 +26785 'rack' 4 +26786 'ract' 4 +26787 'rada' 4 +26788 'rade' 4 +26789 'radi' 4 +26790 'rado' 4 +26791 'rael' 4 +26792 'raft' 4 +26793 'rage' 4 +26794 'raid' 4 +26795 'rail' 4 +26796 'rain' 4 +26797 'rais' 4 +26798 'rait' 4 +26799 'rale' 4 +26800 'rama' 4 +26801 'rame' 4 +26802 'rams' 4 +26803 'rand' 4 +26804 'rane' 4 +26805 'rang' 4 +26806 'rank' 4 +26807 'rano' 4 +26808 'rans' 4 +26809 'rant' 4 +26810 'raph' 4 +26811 'rare' 4 +26812 'rary' 4 +26813 'rase' 4 +26814 'rast' 4 +26815 'rate' 4 +26816 'rats' 4 +26817 'raud' 4 +26818 'rawl' 4 +26819 'rawn' 4 +26820 'rays' 4 +26821 'read' 4 +26822 'reak' 4 +26823 'real' 4 +26824 'ream' 4 +26825 'reas' 4 +26826 'reat' 4 +26827 'rece' 4 +26828 'reci' 4 +26829 'reck' 4 +26830 'rect' 4 +26831 'recv' 4 +26832 'rede' 4 +26833 'redi' 4 +26834 'redo' 4 +26835 'redu' 4 +26836 'reed' 4 +26837 'reek' 4 +26838 'reen' 4 +26839 'rees' 4 +26840 'reet' 4 +26841 'refs' 4 +26842 'regn' 4 +26843 'regs' 4 +26844 'reib' 4 +26845 'rein' 4 +26846 'rell' 4 +26847 'rels' 4 +26848 'relu' 4 +26849 'reme' 4 +26850 'rena' 4 +26851 'rend' 4 +26852 'rene' 4 +26853 'reno' 4 +26854 'rens' 4 +26855 'rent' 4 +26856 'reon' 4 +26857 'repo' 4 +26858 'repr' 4 +26859 'requ' 4 +26860 'rera' 4 +26861 'rero' 4 +26862 'resa' 4 +26863 'rese' 4 +26864 'resh' 4 +26865 'reso' 4 +26866 'resp' 4 +26867 'ress' 4 +26868 'rest' 4 +26869 'reta' 4 +26870 'rete' 4 +26871 'rets' 4 +26872 'rett' 4 +26873 'reve' 4 +26874 'rgba' 4 +26875 'riad' 4 +26876 'rial' 4 +26877 'rian' 4 +26878 'rias' 4 +26879 'rica' 4 +26880 'rice' 4 +26881 'rich' 4 +26882 'rick' 4 +26883 'rico' 4 +26884 'rics' 4 +26885 'rict' 4 +26886 'ride' 4 +26887 'ried' 4 +26888 'rief' 4 +26889 'riel' 4 +26890 'rien' 4 +26891 'rier' 4 +26892 'ries' 4 +26893 'riet' 4 +26894 'rift' 4 +26895 'rika' 4 +26896 'rike' 4 +26897 'rile' 4 +26898 'rimp' 4 +26899 'rina' 4 +26900 'rine' 4 +26901 'ring' 4 +26902 'rink' 4 +26903 'rint' 4 +26904 'rior' 4 +26905 'rios' 4 +26906 'riot' 4 +26907 'ripp' 4 +26908 'ript' 4 +26909 'rire' 4 +26910 'rise' 4 +26911 'rish' 4 +26912 'risk' 4 +26913 'rist' 4 +26914 'rite' 4 +26915 'rito' 4 +26916 'ritt' 4 +26917 'ritz' 4 +26918 'rium' 4 +26919 'rive' 4 +26920 'rió' 4 +26921 'road' 4 +26922 'robe' 4 +26923 'rock' 4 +26924 'rodu' 4 +26925 'roid' 4 +26926 'rois' 4 +26927 'roit' 4 +26928 'roke' 4 +26929 'role' 4 +26930 'roll' 4 +26931 'roma' 4 +26932 'rome' 4 +26933 'romy' 4 +26934 'rone' 4 +26935 'rong' 4 +26936 'rons' 4 +26937 'ront' 4 +26938 'room' 4 +26939 'root' 4 +26940 'roph' 4 +26941 'rops' 4 +26942 'ropy' 4 +26943 'rors' 4 +26944 'rose' 4 +26945 'ross' 4 +26946 'rost' 4 +26947 'rote' 4 +26948 'rots' 4 +26949 'rott' 4 +26950 'roup' 4 +26951 'rous' 4 +26952 'rout' 4 +26953 'rove' 4 +26954 'rown' 4 +26955 'rows' 4 +26956 'rror' 4 +26957 'ruby' 4 +26958 'ruce' 4 +26959 'ruck' 4 +26960 'ruct' 4 +26961 'ruit' 4 +26962 'rule' 4 +26963 'runs' 4 +26964 'rupt' 4 +26965 'rust' 4 +26966 'ryan' 4 +26967 'rypt' 4 +26968 'rás' 4 +26969 'rän' 4 +26970 'rès' 4 +26971 'rée' 4 +26972 'rés' 4 +26973 'rét' 4 +26974 'ría' 4 +26975 'ród' 4 +26976 'rón' 4 +26977 'safe' 4 +26978 'said' 4 +26979 'sale' 4 +26980 'salt' 4 +26981 'same' 4 +26982 'samp' 4 +26983 'sand' 4 +26984 'sans' 4 +26985 'save' 4 +26986 'scal' 4 +26987 'scan' 4 +26988 'scar' 4 +26989 'sche' 4 +26990 'scre' 4 +26991 'scri' 4 +26992 'seat' 4 +26993 'seau' 4 +26994 'sect' 4 +26995 'seed' 4 +26996 'seek' 4 +26997 'seen' 4 +26998 'sein' 4 +26999 'self' 4 +27000 'sell' 4 +27001 'semb' 4 +27002 'semi' 4 +27003 'send' 4 +27004 'sens' 4 +27005 'sent' 4 +27006 'sequ' 4 +27007 'sers' 4 +27008 'sert' 4 +27009 'serv' 4 +27010 'sess' 4 +27011 'sets' 4 +27012 'sett' 4 +27013 'seud' 4 +27014 'shal' 4 +27015 'shan' 4 +27016 'shaw' 4 +27017 'ship' 4 +27018 'shit' 4 +27019 'shop' 4 +27020 'shot' 4 +27021 'show' 4 +27022 'shut' 4 +27023 'side' 4 +27024 'sign' 4 +27025 'sime' 4 +27026 'simp' 4 +27027 'sing' 4 +27028 'sink' 4 +27029 'site' 4 +27030 'size' 4 +27031 'skin' 4 +27032 'skip' 4 +27033 'ská' 4 +27034 'ské' 4 +27035 'ský' 4 +27036 'ską' 4 +27037 'slot' 4 +27038 'slow' 4 +27039 'slug' 4 +27040 'smtp' 4 +27041 'snap' 4 +27042 'snow' 4 +27043 'soap' 4 +27044 'sock' 4 +27045 'soft' 4 +27046 'sold' 4 +27047 'sole' 4 +27048 'some' 4 +27049 'song' 4 +27050 'sono' 4 +27051 'soon' 4 +27052 'sort' 4 +27053 'soup' 4 +27054 'spam' 4 +27055 'span' 4 +27056 'spar' 4 +27057 'spec' 4 +27058 'spin' 4 +27059 'spir' 4 +27060 'spot' 4 +27061 'sqrt' 4 +27062 'sson' 4 +27063 'stab' 4 +27064 'stad' 4 +27065 'stag' 4 +27066 'stal' 4 +27067 'stan' 4 +27068 'star' 4 +27069 'stat' 4 +27070 'stay' 4 +27071 'sted' 4 +27072 'stem' 4 +27073 'sten' 4 +27074 'step' 4 +27075 'ster' 4 +27076 'stic' 4 +27077 'stim' 4 +27078 'stit' 4 +27079 'stmt' 4 +27080 'ston' 4 +27081 'stop' 4 +27082 'stor' 4 +27083 'stra' 4 +27084 'stre' 4 +27085 'stri' 4 +27086 'stro' 4 +27087 'stru' 4 +27088 'stry' 4 +27089 'stub' 4 +27090 'stud' 4 +27091 'stä' 4 +27092 'stå' 4 +27093 'subs' 4 +27094 'succ' 4 +27095 'such' 4 +27096 'sudo' 4 +27097 'suit' 4 +27098 'summ' 4 +27099 'supp' 4 +27100 'sure' 4 +27101 'surf' 4 +27102 'swap' 4 +27103 'swer' 4 +27104 'sync' 4 +27105 'ség' 4 +27106 'tabs' 4 +27107 'tage' 4 +27108 'tags' 4 +27109 'tail' 4 +27110 'tain' 4 +27111 'tait' 4 +27112 'take' 4 +27113 'talk' 4 +27114 'tang' 4 +27115 'tanh' 4 +27116 'tank' 4 +27117 'task' 4 +27118 'tawa' 4 +27119 'tał' 4 +27120 'team' 4 +27121 'tech' 4 +27122 'teen' 4 +27123 'tegr' 4 +27124 'teil' 4 +27125 'tein' 4 +27126 'tele' 4 +27127 'tell' 4 +27128 'temp' 4 +27129 'tent' 4 +27130 'tera' 4 +27131 'tere' 4 +27132 'term' 4 +27133 'tern' 4 +27134 'tero' 4 +27135 'ters' 4 +27136 'tery' 4 +27137 'test' 4 +27138 'tesy' 4 +27139 'text' 4 +27140 'thal' 4 +27141 'than' 4 +27142 'that' 4 +27143 'thel' 4 +27144 'them' 4 +27145 'then' 4 +27146 'ther' 4 +27147 'thes' 4 +27148 'they' 4 +27149 'thin' 4 +27150 'this' 4 +27151 'thon' 4 +27152 'thor' 4 +27153 'thro' 4 +27154 'thur' 4 +27155 'thus' 4 +27156 'tica' 4 +27157 'tick' 4 +27158 'tico' 4 +27159 'tics' 4 +27160 'tier' 4 +27161 'ties' 4 +27162 'tiff' 4 +27163 'tikz' 4 +27164 'tile' 4 +27165 'time' 4 +27166 'ting' 4 +27167 'tiny' 4 +27168 'tion' 4 +27169 'tipo' 4 +27170 'tips' 4 +27171 'toBe' 4 +27172 'todo' 4 +27173 'tone' 4 +27174 'tons' 4 +27175 'took' 4 +27176 'tool' 4 +27177 'toon' 4 +27178 'tour' 4 +27179 'tout' 4 +27180 'town' 4 +27181 'trac' 4 +27182 'trad' 4 +27183 'trak' 4 +27184 'tran' 4 +27185 'trap' 4 +27186 'tras' 4 +27187 'tree' 4 +27188 'tres' 4 +27189 'trib' 4 +27190 'trie' 4 +27191 'trig' 4 +27192 'trim' 4 +27193 'trip' 4 +27194 'tron' 4 +27195 'true' 4 +27196 'ttes' 4 +27197 'tube' 4 +27198 'ture' 4 +27199 'turn' 4 +27200 'type' 4 +27201 'uala' 4 +27202 'uali' 4 +27203 'uant' 4 +27204 'uart' 4 +27205 'uary' 4 +27206 'uate' 4 +27207 'ubar' 4 +27208 'uben' 4 +27209 'uber' 4 +27210 'ubes' 4 +27211 'ubic' 4 +27212 'uble' 4 +27213 'ubre' 4 +27214 'ucci' 4 +27215 'uced' 4 +27216 'ucer' 4 +27217 'uces' 4 +27218 'ucha' 4 +27219 'uche' 4 +27220 'uchi' 4 +27221 'uchs' 4 +27222 'ucht' 4 +27223 'ucid' 4 +27224 'ucks' 4 +27225 'ucky' 4 +27226 'ucle' 4 +27227 'udad' 4 +27228 'uded' 4 +27229 'uden' 4 +27230 'uder' 4 +27231 'udes' 4 +27232 'udge' 4 +27233 'udio' 4 +27234 'udos' 4 +27235 'uego' 4 +27236 'ueil' 4 +27237 'uela' 4 +27238 'uels' 4 +27239 'uent' 4 +27240 'uers' 4 +27241 'uese' 4 +27242 'uest' 4 +27243 'ueur' 4 +27244 'ufen' 4 +27245 'uffs' 4 +27246 'uffy' 4 +27247 'ugal' 4 +27248 'ugar' 4 +27249 'ugby' 4 +27250 'ugen' 4 +27251 'ught' 4 +27252 'ugin' 4 +27253 'uild' 4 +27254 'uilt' 4 +27255 'uing' 4 +27256 'uins' 4 +27257 'uint' 4 +27258 'uish' 4 +27259 'uite' 4 +27260 'uits' 4 +27261 'uity' 4 +27262 'ují' 4 +27263 'ują' 4 +27264 'ukes' 4 +27265 'ular' 4 +27266 'ulas' 4 +27267 'uled' 4 +27268 'ulen' 4 +27269 'uler' 4 +27270 'ules' 4 +27271 'ulet' 4 +27272 'ulia' 4 +27273 'ulin' 4 +27274 'ulis' 4 +27275 'ulla' 4 +27276 'ulle' 4 +27277 'ulli' 4 +27278 'ulls' 4 +27279 'ully' 4 +27280 'ulos' 4 +27281 'ulpt' 4 +27282 'ulse' 4 +27283 'ulti' 4 +27284 'ults' 4 +27285 'ulty' 4 +27286 'ultz' 4 +27287 'ului' 4 +27288 'ulum' 4 +27289 'ulus' 4 +27290 'umab' 4 +27291 'uman' 4 +27292 'umar' 4 +27293 'umas' 4 +27294 'umat' 4 +27295 'umbn' 4 +27296 'umbo' 4 +27297 'umbs' 4 +27298 'umed' 4 +27299 'umen' 4 +27300 'umer' 4 +27301 'umes' 4 +27302 'umin' 4 +27303 'ummy' 4 +27304 'umni' 4 +27305 'umor' 4 +27306 'umph' 4 +27307 'umps' 4 +27308 'umpy' 4 +27309 'unal' 4 +27310 'unar' 4 +27311 'unas' 4 +27312 'unce' 4 +27313 'unch' 4 +27314 'unci' 4 +27315 'unct' 4 +27316 'unda' 4 +27317 'unde' 4 +27318 'undo' 4 +27319 'unds' 4 +27320 'undy' 4 +27321 'uned' 4 +27322 'uner' 4 +27323 'unes' 4 +27324 'unge' 4 +27325 'ungs' 4 +27326 'unic' 4 +27327 'unik' 4 +27328 'uniq' 4 +27329 'unit' 4 +27330 'unix' 4 +27331 'unks' 4 +27332 'unkt' 4 +27333 'unos' 4 +27334 'unta' 4 +27335 'unte' 4 +27336 'unto' 4 +27337 'unts' 4 +27338 'untu' 4 +27339 'unya' 4 +27340 'uous' 4 +27341 'upal' 4 +27342 'uper' 4 +27343 'upid' 4 +27344 'uple' 4 +27345 'upon' 4 +27346 'urai' 4 +27347 'ural' 4 +27348 'uran' 4 +27349 'uras' 4 +27350 'urch' 4 +27351 'urdy' 4 +27352 'ured' 4 +27353 'uren' 4 +27354 'urer' 4 +27355 'ures' 4 +27356 'uria' 4 +27357 'uris' 4 +27358 'urls' 4 +27359 'uron' 4 +27360 'urop' 4 +27361 'urre' 4 +27362 'urry' 4 +27363 'urse' 4 +27364 'urst' 4 +27365 'urus' 4 +27366 'usal' 4 +27367 'usat' 4 +27368 'usch' 4 +27369 'used' 4 +27370 'user' 4 +27371 'uses' 4 +27372 'uset' 4 +27373 'ushi' 4 +27374 'usic' 4 +27375 'ussy' 4 +27376 'usta' 4 +27377 'usto' 4 +27378 'ustr' 4 +27379 'utan' 4 +27380 'utar' 4 +27381 'utch' 4 +27382 'uted' 4 +27383 'uten' 4 +27384 'uter' 4 +27385 'utes' 4 +27386 'util' 4 +27387 'utor' 4 +27388 'utos' 4 +27389 'utra' 4 +27390 'utta' 4 +27391 'utto' 4 +27392 'uuid' 4 +27393 'uvre' 4 +27394 'uzzi' 4 +27395 'uzzy' 4 +27396 'ués' 4 +27397 'vais' 4 +27398 'vale' 4 +27399 'vals' 4 +27400 'valu' 4 +27401 'vana' 4 +27402 'vant' 4 +27403 'vard' 4 +27404 'vare' 4 +27405 'vari' 4 +27406 'vars' 4 +27407 'vecs' 4 +27408 'vect' 4 +27409 'veis' 4 +27410 'vell' 4 +27411 'velt' 4 +27412 'vely' 4 +27413 'vens' 4 +27414 'vent' 4 +27415 'verb' 4 +27416 'vere' 4 +27417 'vern' 4 +27418 'vers' 4 +27419 'vert' 4 +27420 'very' 4 +27421 'vest' 4 +27422 'vice' 4 +27423 'vict' 4 +27424 'vide' 4 +27425 'vier' 4 +27426 'view' 4 +27427 'vill' 4 +27428 'vine' 4 +27429 'ving' 4 +27430 'viol' 4 +27431 'virt' 4 +27432 'vity' 4 +27433 'vić' 4 +27434 'vlan' 4 +27435 'void' 4 +27436 'voir' 4 +27437 'voke' 4 +27438 'volt' 4 +27439 'vote' 4 +27440 'vous' 4 +27441 'vron' 4 +27442 'ván' 4 +27443 'vés' 4 +27444 'wait' 4 +27445 'wake' 4 +27446 'wald' 4 +27447 'walk' 4 +27448 'wall' 4 +27449 'wand' 4 +27450 'wang' 4 +27451 'want' 4 +27452 'ward' 4 +27453 'ware' 4 +27454 'warf' 4 +27455 'warm' 4 +27456 'warn' 4 +27457 'wart' 4 +27458 'warz' 4 +27459 'wash' 4 +27460 'wave' 4 +27461 'ways' 4 +27462 'weak' 4 +27463 'wear' 4 +27464 'weed' 4 +27465 'week' 4 +27466 'ween' 4 +27467 'weep' 4 +27468 'weet' 4 +27469 'well' 4 +27470 'wend' 4 +27471 'went' 4 +27472 'were' 4 +27473 'wers' 4 +27474 'wert' 4 +27475 'west' 4 +27476 'what' 4 +27477 'whel' 4 +27478 'when' 4 +27479 'wich' 4 +27480 'wick' 4 +27481 'wide' 4 +27482 'wife' 4 +27483 'wifi' 4 +27484 'wiki' 4 +27485 'wild' 4 +27486 'will' 4 +27487 'wind' 4 +27488 'wine' 4 +27489 'wing' 4 +27490 'wire' 4 +27491 'wise' 4 +27492 'wish' 4 +27493 'with' 4 +27494 'witz' 4 +27495 'wią' 4 +27496 'wię' 4 +27497 'wner' 4 +27498 'wolf' 4 +27499 'wood' 4 +27500 'word' 4 +27501 'work' 4 +27502 'worm' 4 +27503 'wort' 4 +27504 'wrap' 4 +27505 'writ' 4 +27506 'wär' 4 +27507 'wür' 4 +27508 'xico' 4 +27509 'ximo' 4 +27510 'xlim' 4 +27511 'xlsx' 4 +27512 'xmax' 4 +27513 'xton' 4 +27514 'xxxx' 4 +27515 'yaml' 4 +27516 'yang' 4 +27517 'yard' 4 +27518 'ycle' 4 +27519 'ydia' 4 +27520 'ydro' 4 +27521 'year' 4 +27522 'yect' 4 +27523 'yers' 4 +27524 'ygon' 4 +27525 'ying' 4 +27526 'ylan' 4 +27527 'yles' 4 +27528 'ylim' 4 +27529 'ylon' 4 +27530 'ylum' 4 +27531 'ymax' 4 +27532 'ymph' 4 +27533 'ynam' 4 +27534 'ynch' 4 +27535 'ynes' 4 +27536 'yond' 4 +27537 'your' 4 +27538 'yout' 4 +27539 'ypes' 4 +27540 'yrus' 4 +27541 'yses' 4 +27542 'ysis' 4 +27543 'yson' 4 +27544 'ysql' 4 +27545 'ytic' 4 +27546 'yyyy' 4 +27547 'zahl' 4 +27548 'zech' 4 +27549 'zeit' 4 +27550 'zens' 4 +27551 'zent' 4 +27552 'zero' 4 +27553 'zeta' 4 +27554 'zeug' 4 +27555 'zeń' 4 +27556 'ześ' 4 +27557 'zhen' 4 +27558 'zhou' 4 +27559 'zial' 4 +27560 'ziel' 4 +27561 'zier' 4 +27562 'zing' 4 +27563 'ził' 4 +27564 'zone' 4 +27565 'zoom' 4 +27566 'zung' 4 +27567 'zyme' 4 +27568 'zyć' 4 +27569 'zyż' 4 +27570 'zzle' 4 +27571 'zés' 4 +27572 'zös' 4 +27573 'ząd' 4 +27574 'ząt' 4 +27575 '}}' 5 +32856 '="../' 5 +32857 '=====' 5 +32858 'ABASE' 5 +32859 'ACION' 5 +32860 'ACTER' 5 +32861 'ADMIN' 5 +32862 'ALIGN' 5 +32863 'ALLOW' 5 +32864 'ALTER' 5 +32865 'AMPLE' 5 +32866 'ANNEL' 5 +32867 'ANTLR' 5 +32868 'APTER' 5 +32869 'ARGET' 5 +32870 'ARRAY' 5 +32871 'ASCII' 5 +32872 'ATING' 5 +32873 'ATION' 5 +32874 'ATIVE' 5 +32875 'ATURE' 5 +32876 'About' 5 +32877 'Above' 5 +32878 'Activ' 5 +32879 'Actor' 5 +32880 'Added' 5 +32881 'Addon' 5 +32882 'Admin' 5 +32883 'After' 5 +32884 'Again' 5 +32885 'Agent' 5 +32886 'Alarm' 5 +32887 'Album' 5 +32888 'Alert' 5 +32889 'Alias' 5 +32890 'Alice' 5 +32891 'Align' 5 +32892 'Alive' 5 +32893 'Allen' 5 +32894 'Alloc' 5 +32895 'Allow' 5 +32896 'Along' 5 +32897 'Alpha' 5 +32898 'Alter' 5 +32899 'Among' 5 +32900 'Analy' 5 +32901 'Andre' 5 +32902 'Angel' 5 +32903 'Angle' 5 +32904 'Apart' 5 +32905 'Apple' 5 +32906 'Apply' 5 +32907 'Appro' 5 +32908 'April' 5 +32909 'Arena' 5 +32910 'Arial' 5 +32911 'Armor' 5 +32912 'Array' 5 +32913 'Arrow' 5 +32914 'Asian' 5 +32915 'Asked' 5 +32916 'Asset' 5 +32917 'Async' 5 +32918 'Atlas' 5 +32919 'Attrs' 5 +32920 'Audio' 5 +32921 'Audit' 5 +32922 'Autom' 5 +32923 'Aware' 5 +32924 'Azure' 5 +32925 'BEGIN' 5 +32926 'BLACK' 5 +32927 'BLOCK' 5 +32928 'BOARD' 5 +32929 'BOOST' 5 +32930 'BUILD' 5 +32931 'Based' 5 +32932 'Basic' 5 +32933 'Batch' 5 +32934 'Beans' 5 +32935 'Begin' 5 +32936 'Being' 5 +32937 'Below' 5 +32938 'Berry' 5 +32939 'Billy' 5 +32940 'Birth' 5 +32941 'Black' 5 +32942 'Blank' 5 +32943 'Block' 5 +32944 'Blood' 5 +32945 'Board' 5 +32946 'Bonus' 5 +32947 'Books' 5 +32948 'Boost' 5 +32949 'Bound' 5 +32950 'Brain' 5 +32951 'Brand' 5 +32952 'Break' 5 +32953 'Brian' 5 +32954 'Brien' 5 +32955 'Bring' 5 +32956 'Broad' 5 +32957 'Brown' 5 +32958 'Brush' 5 +32959 'Build' 5 +32960 'Built' 5 +32961 'Bytes' 5 +32962 'Bạn' 5 +32963 'CACHE' 5 +32964 'CCESS' 5 +32965 'CDATA' 5 +32966 'CHANT' 5 +32967 'CHECK' 5 +32968 'CLAIM' 5 +32969 'CLASS' 5 +32970 'CLEAR' 5 +32971 'CLUDE' 5 +32972 'COLOR' 5 +32973 'CONST' 5 +32974 'COUNT' 5 +32975 'COVID' 5 +32976 'CRIPT' 5 +32977 'CRYPT' 5 +32978 'CTION' 5 +32979 'CTYPE' 5 +32980 'Cache' 5 +32981 'Calls' 5 +32982 'Carol' 5 +32983 'Catal' 5 +32984 'Catch' 5 +32985 'Cause' 5 +32986 'Cells' 5 +32987 'Chain' 5 +32988 'Chang' 5 +32989 'Chars' 5 +32990 'Chart' 5 +32991 'Check' 5 +32992 'Chief' 5 +32993 'Child' 5 +32994 'China' 5 +32995 'Chris' 5 +32996 'Chunk' 5 +32997 'Civil' 5 +32998 'Claim' 5 +32999 'Class' 5 +33000 'Clean' 5 +33001 'Clear' 5 +33002 'Click' 5 +33003 'Clock' 5 +33004 'Clone' 5 +33005 'Close' 5 +33006 'Cloud' 5 +33007 'Codec' 5 +33008 'Codes' 5 +33009 'Color' 5 +33010 'Combo' 5 +33011 'Compl' 5 +33012 'Const' 5 +33013 'Contr' 5 +33014 'Coord' 5 +33015 'Could' 5 +33016 'Count' 5 +33017 'Court' 5 +33018 'Cover' 5 +33019 'Craft' 5 +33020 'Creat' 5 +33021 'Cross' 5 +33022 'Crypt' 5 +33023 'Curve' 5 +33024 'Cycle' 5 +33025 'Cómo' 5 +33026 'DEBUG' 5 +33027 'DELAY' 5 +33028 'DEPTH' 5 +33029 'Daily' 5 +33030 'Dates' 5 +33031 'Datum' 5 +33032 'David' 5 +33033 'Davis' 5 +33034 'Death' 5 +33035 'Debug' 5 +33036 'Decor' 5 +33037 'Delay' 5 +33038 'Deleg' 5 +33039 'Delta' 5 +33040 'Dense' 5 +33041 'Depth' 5 +33042 'Digit' 5 +33043 'Dirty' 5 +33044 'Domin' 5 +33045 'Draft' 5 +33046 'Dream' 5 +33047 'Drive' 5 +33048 'Dummy' 5 +33049 'EMAIL' 5 +33050 'EMBER' 5 +33051 'EMENT' 5 +33052 'EMPTY' 5 +33053 'ENAME' 5 +33054 'ENCES' 5 +33055 'ENDER' 5 +33056 'ENGTH' 5 +33057 'ENTER' 5 +33058 'ENTRY' 5 +33059 'EQUAL' 5 +33060 'ERROR' 5 +33061 'ETHER' 5 +33062 'ETHOD' 5 +33063 'EVENT' 5 +33064 'EXIST' 5 +33065 'Early' 5 +33066 'Earth' 5 +33067 'Edges' 5 +33068 'Eight' 5 +33069 'Elect' 5 +33070 'Email' 5 +33071 'Embed' 5 +33072 'Emily' 5 +33073 'Empty' 5 +33074 'Enjoy' 5 +33075 'Enter' 5 +33076 'Entry' 5 +33077 'Epoch' 5 +33078 'Equal' 5 +33079 'Error' 5 +33080 'Estim' 5 +33081 'Evalu' 5 +33082 'Event' 5 +33083 'Every' 5 +33084 'Exact' 5 +33085 'Excel' 5 +33086 'Exist' 5 +33087 'Extra' 5 +33088 'FALSE' 5 +33089 'FAULT' 5 +33090 'FIELD' 5 +33091 'FILES' 5 +33092 'FIRST' 5 +33093 'FIXME' 5 +33094 'FLAGS' 5 +33095 'FLOAT' 5 +33096 'FOUND' 5 +33097 'FRAME' 5 +33098 'Faces' 5 +33099 'False' 5 +33100 'Fatal' 5 +33101 'Fault' 5 +33102 'Fetch' 5 +33103 'Field' 5 +33104 'Files' 5 +33105 'Final' 5 +33106 'First' 5 +33107 'Fixed' 5 +33108 'Flags' 5 +33109 'Flash' 5 +33110 'Float' 5 +33111 'Floor' 5 +33112 'Flush' 5 +33113 'Focus' 5 +33114 'Force' 5 +33115 'Forms' 5 +33116 'Forum' 5 +33117 'Found' 5 +33118 'Frame' 5 +33119 'Franc' 5 +33120 'Frank' 5 +33121 'Fresh' 5 +33122 'Front' 5 +33123 'GENER' 5 +33124 'GRAPH' 5 +33125 'GREEN' 5 +33126 'GRESS' 5 +33127 'GROUP' 5 +33128 'Games' 5 +33129 'Gamma' 5 +33130 'Gener' 5 +33131 'Genre' 5 +33132 'Georg' 5 +33133 'Getty' 5 +33134 'Ghost' 5 +33135 'Given' 5 +33136 'Glyph' 5 +33137 'Going' 5 +33138 'Grade' 5 +33139 'Grand' 5 +33140 'Grant' 5 +33141 'Graph' 5 +33142 'Great' 5 +33143 'Greek' 5 +33144 'Green' 5 +33145 'Group' 5 +33146 'Guard' 5 +33147 'Guest' 5 +33148 'Guide' 5 +33149 'Guild' 5 +33150 'HTTPS' 5 +33151 'Happy' 5 +33152 'Harry' 5 +33153 'Heart' 5 +33154 'Heavy' 5 +33155 'Hello' 5 +33156 'Henry' 5 +33157 'Hotel' 5 +33158 'Hours' 5 +33159 'House' 5 +33160 'Hover' 5 +33161 'Human' 5 +33162 'Hydro' 5 +33163 'Hyper' 5 +33164 'IDDEN' 5 +33165 'IDDLE' 5 +33166 'IDENT' 5 +33167 'IFIED' 5 +33168 'ILITY' 5 +33169 'IMAGE' 5 +33170 'IMARY' 5 +33171 'INDEX' 5 +33172 'INESS' 5 +33173 'INPUT' 5 +33174 'INTER' 5 +33175 'ISHED' 5 +33176 'ISING' 5 +33177 'ISION' 5 +33178 'ISTER' 5 +33179 'ITIES' 5 +33180 'ITION' 5 +33181 'IVATE' 5 +33182 'IVERS' 5 +33183 'Icons' 5 +33184 'Ident' 5 +33185 'Image' 5 +33186 'Impro' 5 +33187 'Incre' 5 +33188 'Index' 5 +33189 'India' 5 +33190 'Infos' 5 +33191 'Inner' 5 +33192 'Input' 5 +33193 'Instr' 5 +33194 'Intel' 5 +33195 'Inter' 5 +33196 'Intro' 5 +33197 'Islam' 5 +33198 'Issue' 5 +33199 'Items' 5 +33200 'Jacob' 5 +33201 'James' 5 +33202 'Japan' 5 +33203 'Jason' 5 +33204 'Jesus' 5 +33205 'Jimmy' 5 +33206 'Joint' 5 +33207 'Jones' 5 +33208 'Judge' 5 +33209 'KNOWN' 5 +33210 'Kelly' 5 +33211 'Kevin' 5 +33212 'Known' 5 +33213 'Krist' 5 +33214 'LABEL' 5 +33215 'LEASE' 5 +33216 'LEVEL' 5 +33217 'LIGHT' 5 +33218 'LIMIT' 5 +33219 'LOBAL' 5 +33220 'LOCAL' 5 +33221 'LOGIN' 5 +33222 'Label' 5 +33223 'Labor' 5 +33224 'Large' 5 +33225 'Later' 5 +33226 'Latin' 5 +33227 'Laura' 5 +33228 'Layer' 5 +33229 'Leaks' 5 +33230 'Learn' 5 +33231 'Leave' 5 +33232 'Legal' 5 +33233 'Lemma' 5 +33234 'Level' 5 +33235 'Lewis' 5 +33236 'Lexer' 5 +33237 'Light' 5 +33238 'Limit' 5 +33239 'Lines' 5 +33240 'Links' 5 +33241 'Linux' 5 +33242 'Lists' 5 +33243 'Liter' 5 +33244 'Local' 5 +33245 'Logic' 5 +33246 'Login' 5 +33247 'Looks' 5 +33248 'Louis' 5 +33249 'Lower' 5 +33250 'MATCH' 5 +33251 'MENTS' 5 +33252 'MODEL' 5 +33253 'MONTH' 5 +33254 'Macro' 5 +33255 'Magic' 5 +33256 'Major' 5 +33257 'Maker' 5 +33258 'March' 5 +33259 'Marco' 5 +33260 'Maria' 5 +33261 'Marie' 5 +33262 'Mario' 5 +33263 'Match' 5 +33264 'Maybe' 5 +33265 'Mayor' 5 +33266 'Means' 5 +33267 'Media' 5 +33268 'Merge' 5 +33269 'Metal' 5 +33270 'Meter' 5 +33271 'Miami' 5 +33272 'Micro' 5 +33273 'Minor' 5 +33274 'Mixed' 5 +33275 'Mixin' 5 +33276 'Modal' 5 +33277 'Model' 5 +33278 'Modes' 5 +33279 'Money' 5 +33280 'Mongo' 5 +33281 'Month' 5 +33282 'Motor' 5 +33283 'Mount' 5 +33284 'Mouse' 5 +33285 'Movie' 5 +33286 'Multi' 5 +33287 'Music' 5 +33288 'MySQL' 5 +33289 'Named' 5 +33290 'Names' 5 +33291 'Neill' 5 +33292 'Never' 5 +33293 'Night' 5 +33294 'Nodes' 5 +33295 'Noise' 5 +33296 'North' 5 +33297 'Notes' 5 +33298 'Numer' 5 +33299 'OAuth' 5 +33300 'ODULE' 5 +33301 'ORDER' 5 +33302 'ORMAL' 5 +33303 'OTHER' 5 +33304 'OURCE' 5 +33305 'Obama' 5 +33306 'Occup' 5 +33307 'Offer' 5 +33308 'Olymp' 5 +33309 'Omega' 5 +33310 'Optim' 5 +33311 'Order' 5 +33312 'Organ' 5 +33313 'Other' 5 +33314 'Outer' 5 +33315 'Owner' 5 +33316 'PARAM' 5 +33317 'PATCH' 5 +33318 'PLIED' 5 +33319 'POINT' 5 +33320 'PRESS' 5 +33321 'PRINT' 5 +33322 'PROTO' 5 +33323 'Pager' 5 +33324 'Pages' 5 +33325 'Paint' 5 +33326 'Panel' 5 +33327 'Paper' 5 +33328 'Param' 5 +33329 'Paris' 5 +33330 'Parse' 5 +33331 'Parts' 5 +33332 'Party' 5 +33333 'Paste' 5 +33334 'Patch' 5 +33335 'Paths' 5 +33336 'Pause' 5 +33337 'Peter' 5 +33338 'Phase' 5 +33339 'Phone' 5 +33340 'Photo' 5 +33341 'Piece' 5 +33342 'Pitch' 5 +33343 'Pixel' 5 +33344 'Place' 5 +33345 'Plain' 5 +33346 'Plane' 5 +33347 'Plant' 5 +33348 'Plate' 5 +33349 'Point' 5 +33350 'Polit' 5 +33351 'Popup' 5 +33352 'Posts' 5 +33353 'Power' 5 +33354 'Press' 5 +33355 'Price' 5 +33356 'Prime' 5 +33357 'Print' 5 +33358 'Prior' 5 +33359 'Probe' 5 +33360 'Produ' 5 +33361 'Proof' 5 +33362 'Props' 5 +33363 'Proto' 5 +33364 'Proxy' 5 +33365 'Psych' 5 +33366 'QUERY' 5 +33367 'QUEST' 5 +33368 'Quant' 5 +33369 'Queen' 5 +33370 'Query' 5 +33371 'Quest' 5 +33372 'Queue' 5 +33373 'Quick' 5 +33374 'Quote' 5 +33375 'READY' 5 +33376 'REATE' 5 +33377 'RESET' 5 +33378 'RIGHT' 5 +33379 'ROUND' 5 +33380 'Radio' 5 +33381 'Raise' 5 +33382 'Range' 5 +33383 'Ratio' 5 +33384 'React' 5 +33385 'Ready' 5 +33386 'Refer' 5 +33387 'Regex' 5 +33388 'Reply' 5 +33389 'Reset' 5 +33390 'Retry' 5 +33391 'Right' 5 +33392 'River' 5 +33393 'Robin' 5 +33394 'Robot' 5 +33395 'Roger' 5 +33396 'Roles' 5 +33397 'Roman' 5 +33398 'Round' 5 +33399 'Route' 5 +33400 'Royal' 5 +33401 'Rules' 5 +33402 'SHIFT' 5 +33403 'SHORT' 5 +33404 'SPACE' 5 +33405 'SSION' 5 +33406 'STAND' 5 +33407 'START' 5 +33408 'STATE' 5 +33409 'STORE' 5 +33410 'STYLE' 5 +33411 'Saint' 5 +33412 'Sales' 5 +33413 'Santa' 5 +33414 'Sarah' 5 +33415 'Saved' 5 +33416 'Scale' 5 +33417 'Scene' 5 +33418 'Sched' 5 +33419 'Scope' 5 +33420 'Score' 5 +33421 'Scott' 5 +33422 'Sense' 5 +33423 'Separ' 5 +33424 'Setup' 5 +33425 'Seven' 5 +33426 'Shape' 5 +33427 'Share' 5 +33428 'Sharp' 5 +33429 'Sheet' 5 +33430 'Shell' 5 +33431 'Shift' 5 +33432 'Short' 5 +33433 'Sigma' 5 +33434 'Simon' 5 +33435 'Since' 5 +33436 'Sizer' 5 +33437 'Skill' 5 +33438 'Sleep' 5 +33439 'Slice' 5 +33440 'Slide' 5 +33441 'Small' 5 +33442 'Smart' 5 +33443 'Smith' 5 +33444 'Solar' 5 +33445 'Solid' 5 +33446 'Songs' 5 +33447 'Sorry' 5 +33448 'Sound' 5 +33449 'South' 5 +33450 'Space' 5 +33451 'Spain' 5 +33452 'Spark' 5 +33453 'Spawn' 5 +33454 'Spect' 5 +33455 'Speed' 5 +33456 'Spell' 5 +33457 'Split' 5 +33458 'Sport' 5 +33459 'Stack' 5 +33460 'Staff' 5 +33461 'Stage' 5 +33462 'Stamp' 5 +33463 'Stand' 5 +33464 'Stars' 5 +33465 'Start' 5 +33466 'State' 5 +33467 'Stats' 5 +33468 'Steps' 5 +33469 'Steve' 5 +33470 'Still' 5 +33471 'Stock' 5 +33472 'Stone' 5 +33473 'Store' 5 +33474 'Storm' 5 +33475 'Story' 5 +33476 'Strip' 5 +33477 'Study' 5 +33478 'Style' 5 +33479 'Suite' 5 +33480 'Super' 5 +33481 'Susan' 5 +33482 'Sweet' 5 +33483 'Swift' 5 +33484 'TABLE' 5 +33485 'TEGER' 5 +33486 'TITLE' 5 +33487 'TOKEN' 5 +33488 'TRACE' 5 +33489 'TRACK' 5 +33490 'TRACT' 5 +33491 'TRAIN' 5 +33492 'TRANS' 5 +33493 'TYPES' 5 +33494 'Table' 5 +33495 'Taken' 5 +33496 'Tasks' 5 +33497 'Techn' 5 +33498 'Terms' 5 +33499 'Tests' 5 +33500 'Texas' 5 +33501 'Thank' 5 +33502 'Their' 5 +33503 'Theme' 5 +33504 'There' 5 +33505 'These' 5 +33506 'Theta' 5 +33507 'Thing' 5 +33508 'Think' 5 +33509 'Third' 5 +33510 'Those' 5 +33511 'Three' 5 +33512 'Throw' 5 +33513 'Thumb' 5 +33514 'Thêm' 5 +33515 'Tiles' 5 +33516 'Timer' 5 +33517 'Times' 5 +33518 'Title' 5 +33519 'ToOne' 5 +33520 'Today' 5 +33521 'Token' 5 +33522 'Tools' 5 +33523 'Topic' 5 +33524 'Total' 5 +33525 'Touch' 5 +33526 'Trace' 5 +33527 'Track' 5 +33528 'Trade' 5 +33529 'Train' 5 +33530 'Trait' 5 +33531 'Trans' 5 +33532 'Trial' 5 +33533 'Trump' 5 +33534 'Trust' 5 +33535 'Truth' 5 +33536 'Tuple' 5 +33537 'Tweet' 5 +33538 'Typed' 5 +33539 'Types' 5 +33540 'UMENT' 5 +33541 'USTOM' 5 +33542 'UTERS' 5 +33543 'UTION' 5 +33544 'Unary' 5 +33545 'Under' 5 +33546 'Union' 5 +33547 'Units' 5 +33548 'Unity' 5 +33549 'Until' 5 +33550 'Upper' 5 +33551 'Urban' 5 +33552 'Usage' 5 +33553 'Users' 5 +33554 'Using' 5 +33555 'Utils' 5 +33556 'VALID' 5 +33557 'VALUE' 5 +33558 'VIDEO' 5 +33559 'VIDIA' 5 +33560 'Valid' 5 +33561 'Valor' 5 +33562 'Value' 5 +33563 'Video' 5 +33564 'Views' 5 +33565 'Visit' 5 +33566 'Você' 5 +33567 'Voice' 5 +33568 'WHERE' 5 +33569 'WHITE' 5 +33570 'WIDTH' 5 +33571 'WRITE' 5 +33572 'Watch' 5 +33573 'Water' 5 +33574 'Wheel' 5 +33575 'Where' 5 +33576 'Which' 5 +33577 'While' 5 +33578 'White' 5 +33579 'Whole' 5 +33580 'Width' 5 +33581 'Women' 5 +33582 'Words' 5 +33583 'Works' 5 +33584 'World' 5 +33585 'Would' 5 +33586 'Write' 5 +33587 'Years' 5 +33588 'Young' 5 +33589 '[:,:,' 5 +33590 '[…]' 5 +33591 '\\":\\"' 5 +33592 '^−^' 5 +33593 'abama' 5 +33594 'abase' 5 +33595 'abbit' 5 +33596 'abeth' 5 +33597 'abled' 5 +33598 'ables' 5 +33599 'abort' 5 +33600 'about' 5 +33601 'above' 5 +33602 'abric' 5 +33603 'accum' 5 +33604 'accur' 5 +33605 'aceae' 5 +33606 'acent' 5 +33607 'acerb' 5 +33608 'aceut' 5 +33609 'ached' 5 +33610 'achel' 5 +33611 'achen' 5 +33612 'acher' 5 +33613 'aches' 5 +33614 'acial' 5 +33615 'acies' 5 +33616 'acing' 5 +33617 'acion' 5 +33618 'acity' 5 +33619 'ació' 5 +33620 'ację' 5 +33621 'acked' 5 +33622 'acker' 5 +33623 'acket' 5 +33624 'acles' 5 +33625 'acons' 5 +33626 'acted' 5 +33627 'acter' 5 +33628 'actic' 5 +33629 'activ' 5 +33630 'actly' 5 +33631 'actor' 5 +33632 'actus' 5 +33633 'acute' 5 +33634 'adapt' 5 +33635 'adata' 5 +33636 'adays' 5 +33637 'addTo' 5 +33638 'added' 5 +33639 'adder' 5 +33640 'addle' 5 +33641 'addon' 5 +33642 'adena' 5 +33643 'adeon' 5 +33644 'adequ' 5 +33645 'aders' 5 +33646 'adesh' 5 +33647 'adian' 5 +33648 'adier' 5 +33649 'adies' 5 +33650 'ading' 5 +33651 'adium' 5 +33652 'admin' 5 +33653 'adoop' 5 +33654 'adora' 5 +33655 'adors' 5 +33656 'adows' 5 +33657 'adult' 5 +33658 'adém' 5 +33659 'afety' 5 +33660 'affer' 5 +33661 'after' 5 +33662 'again' 5 +33663 'agara' 5 +33664 'agens' 5 +33665 'agent' 5 +33666 'agers' 5 +33667 'agged' 5 +33668 'agger' 5 +33669 'aggio' 5 +33670 'agher' 5 +33671 'agine' 5 +33672 'aging' 5 +33673 'agles' 5 +33674 'agner' 5 +33675 'agnet' 5 +33676 'agram' 5 +33677 'agree' 5 +33678 'agrid' 5 +33679 'agues' 5 +33680 'ahead' 5 +33681 'ahoma' 5 +33682 'ahren' 5 +33683 'aient' 5 +33684 'ailed' 5 +33685 'aille' 5 +33686 'ained' 5 +33687 'ainen' 5 +33688 'ainer' 5 +33689 'aines' 5 +33690 'aired' 5 +33691 'aires' 5 +33692 'aiser' 5 +33693 'aises' 5 +33694 'aison' 5 +33695 'ając' 5 +33696 'akers' 5 +33697 'aking' 5 +33698 'akter' 5 +33699 'aland' 5 +33700 'alarm' 5 +33701 'album' 5 +33702 'alert' 5 +33703 'ależ' 5 +33704 'algia' 5 +33705 'alian' 5 +33706 'alias' 5 +33707 'alice' 5 +33708 'alien' 5 +33709 'align' 5 +33710 'aline' 5 +33711 'aling' 5 +33712 'alion' 5 +33713 'alist' 5 +33714 'ality' 5 +33715 'alive' 5 +33716 'alkyl' 5 +33717 'allah' 5 +33718 'allas' 5 +33719 'alled' 5 +33720 'allel' 5 +33721 'allen' 5 +33722 'aller' 5 +33723 'alles' 5 +33724 'allet' 5 +33725 'allic' 5 +33726 'alloc' 5 +33727 'allow' 5 +33728 'alone' 5 +33729 'along' 5 +33730 'alore' 5 +33731 'alous' 5 +33732 'alpha' 5 +33733 'alter' 5 +33734 'amate' 5 +33735 'ambda' 5 +33736 'amber' 5 +33737 'ambia' 5 +33738 'ambig' 5 +33739 'amble' 5 +33740 'amboo' 5 +33741 'ament' 5 +33742 'amera' 5 +33743 'amide' 5 +33744 'amily' 5 +33745 'amina' 5 +33746 'amine' 5 +33747 'aming' 5 +33748 'amino' 5 +33749 'amins' 5 +33750 'ammad' 5 +33751 'ammed' 5 +33752 'ammer' 5 +33753 'among' 5 +33754 'amoto' 5 +33755 'amour' 5 +33756 'amous' 5 +33757 'amped' 5 +33758 'ample' 5 +33759 'amura' 5 +33760 'analy' 5 +33761 'anced' 5 +33762 'ancel' 5 +33763 'ancer' 5 +33764 'ances' 5 +33765 'anche' 5 +33766 'ancia' 5 +33767 'andal' 5 +33768 'andan' 5 +33769 'andas' 5 +33770 'anded' 5 +33771 'andel' 5 +33772 'anden' 5 +33773 'ander' 5 +33774 'andez' 5 +33775 'andid' 5 +33776 'andin' 5 +33777 'andle' 5 +33778 'andom' 5 +33779 'andon' 5 +33780 'andra' 5 +33781 'andre' 5 +33782 'andro' 5 +33783 'andum' 5 +33784 'anean' 5 +33785 'anese' 5 +33786 'angan' 5 +33787 'anged' 5 +33788 'angel' 5 +33789 'angen' 5 +33790 'anger' 5 +33791 'anges' 5 +33792 'angle' 5 +33793 'anian' 5 +33794 'anine' 5 +33795 'aning' 5 +33796 'anish' 5 +33797 'anity' 5 +33798 'anium' 5 +33799 'anked' 5 +33800 'anmar' 5 +33801 'annah' 5 +33802 'anned' 5 +33803 'annel' 5 +33804 'anner' 5 +33805 'annes' 5 +33806 'annie' 5 +33807 'annon' 5 +33808 'annot' 5 +33809 'anova' 5 +33810 'ansas' 5 +33811 'ansen' 5 +33812 'ansom' 5 +33813 'anson' 5 +33814 'antal' 5 +33815 'antan' 5 +33816 'anted' 5 +33817 'anten' 5 +33818 'anter' 5 +33819 'antes' 5 +33820 'antha' 5 +33821 'antic' 5 +33822 'antis' 5 +33823 'antly' 5 +33824 'antom' 5 +33825 'anton' 5 +33826 'antry' 5 +33827 'anuts' 5 +33828 'anyon' 5 +33829 'ança' 5 +33830 'apers' 5 +33831 'apest' 5 +33832 'apeut' 5 +33833 'aping' 5 +33834 'apons' 5 +33835 'apore' 5 +33836 'apped' 5 +33837 'appen' 5 +33838 'apper' 5 +33839 'apple' 5 +33840 'apply' 5 +33841 'appro' 5 +33842 'apsed' 5 +33843 'apses' 5 +33844 'apter' 5 +33845 'aptic' 5 +33846 'aptop' 5 +33847 'arant' 5 +33848 'archy' 5 +33849 'arded' 5 +33850 'arden' 5 +33851 'ardin' 5 +33852 'ardon' 5 +33853 'areas' 5 +33854 'arena' 5 +33855 'arent' 5 +33856 'arest' 5 +33857 'areth' 5 +33858 'argar' 5 +33859 'arger' 5 +33860 'arget' 5 +33861 'argin' 5 +33862 'argon' 5 +33863 'arial' 5 +33864 'arian' 5 +33865 'arias' 5 +33866 'ariat' 5 +33867 'aries' 5 +33868 'arily' 5 +33869 'arine' 5 +33870 'aring' 5 +33871 'arios' 5 +33872 'arith' 5 +33873 'arity' 5 +33874 'arium' 5 +33875 'arius' 5 +33876 'arked' 5 +33877 'arker' 5 +33878 'armac' 5 +33879 'armed' 5 +33880 'armor' 5 +33881 'array' 5 +33882 'arrow' 5 +33883 'arser' 5 +33884 'arten' 5 +33885 'arter' 5 +33886 'arthy' 5 +33887 'artic' 5 +33888 'arton' 5 +33889 'arxiv' 5 +33890 'aría' 5 +33891 'asaki' 5 +33892 'asant' 5 +33893 'ascal' 5 +33894 'ascii' 5 +33895 'ascus' 5 +33896 'asers' 5 +33897 'ashed' 5 +33898 'ashes' 5 +33899 'asian' 5 +33900 'aside' 5 +33901 'asing' 5 +33902 'asion' 5 +33903 'asive' 5 +33904 'asket' 5 +33905 'asons' 5 +33906 'asper' 5 +33907 'assed' 5 +33908 'assen' 5 +33909 'asser' 5 +33910 'asses' 5 +33911 'asset' 5 +33912 'assic' 5 +33913 'assin' 5 +33914 'assis' 5 +33915 'assoc' 5 +33916 'asted' 5 +33917 'aster' 5 +33918 'astes' 5 +33919 'astic' 5 +33920 'aston' 5 +33921 'astro' 5 +33922 'asure' 5 +33923 'asury' 5 +33924 'async' 5 +33925 'ataka' 5 +33926 'atche' 5 +33927 'ategy' 5 +33928 'ately' 5 +33929 'atern' 5 +33930 'aters' 5 +33931 'atest' 5 +33932 'ateur' 5 +33933 'atham' 5 +33934 'athan' 5 +33935 'athed' 5 +33936 'ather' 5 +33937 'athom' 5 +33938 'athon' 5 +33939 'atial' 5 +33940 'atica' 5 +33941 'atics' 5 +33942 'atile' 5 +33943 'ating' 5 +33944 'ation' 5 +33945 'atisf' 5 +33946 'atism' 5 +33947 'ativa' 5 +33948 'ative' 5 +33949 'ativo' 5 +33950 'atoes' 5 +33951 'atoms' 5 +33952 'atomy' 5 +33953 'atore' 5 +33954 'atori' 5 +33955 'ators' 5 +33956 'atory' 5 +33957 'atrix' 5 +33958 'atted' 5 +33959 'atten' 5 +33960 'atter' 5 +33961 'attle' 5 +33962 'attrs' 5 +33963 'atura' 5 +33964 'ature' 5 +33965 'atype' 5 +33966 'atég' 5 +33967 'audio' 5 +33968 'audit' 5 +33969 'aught' 5 +33970 'aukee' 5 +33971 'aurus' 5 +33972 'ausal' 5 +33973 'aused' 5 +33974 'auses' 5 +33975 'autom' 5 +33976 'autor' 5 +33977 'autos' 5 +33978 'autre' 5 +33979 'auté' 5 +33980 'avage' 5 +33981 'avail' 5 +33982 'avery' 5 +33983 'avian' 5 +33984 'avier' 5 +33985 'aving' 5 +33986 'avoid' 5 +33987 'avoir' 5 +33988 'avors' 5 +33989 'avour' 5 +33990 'await' 5 +33991 'award' 5 +33992 'aware' 5 +33993 'aways' 5 +33994 'axter' 5 +33995 'ayers' 5 +33996 'aying' 5 +33997 'aylor' 5 +33998 'ayout' 5 +33999 'azard' 5 +34000 'azine' 5 +34001 'azing' 5 +34002 'azole' 5 +34003 'azure' 5 +34004 'babel' 5 +34005 'bably' 5 +34006 'backs' 5 +34007 'badge' 5 +34008 'balls' 5 +34009 'bands' 5 +34010 'banks' 5 +34011 'based' 5 +34012 'basic' 5 +34013 'basis' 5 +34014 'batch' 5 +34015 'beans' 5 +34016 'becca' 5 +34017 'becue' 5 +34018 'begin' 5 +34019 'being' 5 +34020 'below' 5 +34021 'bench' 5 +34022 'benef' 5 +34023 'beros' 5 +34024 'berra' 5 +34025 'berry' 5 +34026 'berta' 5 +34027 'berto' 5 +34028 'binom' 5 +34029 'birds' 5 +34030 'birth' 5 +34031 'bject' 5 +34032 'black' 5 +34033 'blade' 5 +34034 'blank' 5 +34035 'blast' 5 +34036 'blems' 5 +34037 'blind' 5 +34038 'bling' 5 +34039 'block' 5 +34040 'blogs' 5 +34041 'blood' 5 +34042 'boBox' 5 +34043 'board' 5 +34044 'bones' 5 +34045 'books' 5 +34046 'boost' 5 +34047 'borne' 5 +34048 'bound' 5 +34049 'bourg' 5 +34050 'boxes' 5 +34051 'brace' 5 +34052 'brain' 5 +34053 'brand' 5 +34054 'brane' 5 +34055 'bread' 5 +34056 'break' 5 +34057 'brevi' 5 +34058 'brief' 5 +34059 'bring' 5 +34060 'broad' 5 +34061 'brook' 5 +34062 'brown' 5 +34063 'brush' 5 +34064 'bráz' 5 +34065 'bsite' 5 +34066 'bucks' 5 +34067 'build' 5 +34068 'built' 5 +34069 'buntu' 5 +34070 'burgh' 5 +34071 'burst' 5 +34072 'byter' 5 +34073 'bytes' 5 +34074 'cache' 5 +34075 'caffe' 5 +34076 'calls' 5 +34077 'camel' 5 +34078 'cards' 5 +34079 'caret' 5 +34080 'carry' 5 +34081 'cases' 5 +34082 'casts' 5 +34083 'catal' 5 +34084 'catch' 5 +34085 'cause' 5 +34086 'ccess' 5 +34087 'ccion' 5 +34088 'cció' 5 +34089 'ccoli' 5 +34090 'cdnjs' 5 +34091 'cdots' 5 +34092 'ceans' 5 +34093 'cedes' 5 +34094 'ceive' 5 +34095 'cells' 5 +34096 'cence' 5 +34097 'cents' 5 +34098 'cerpt' 5 +34099 'cesso' 5 +34100 'chaft' 5 +34101 'chain' 5 +34102 'chair' 5 +34103 'chang' 5 +34104 'chant' 5 +34105 'charg' 5 +34106 'chars' 5 +34107 'chart' 5 +34108 'check' 5 +34109 'chell' 5 +34110 'chemy' 5 +34111 'cheon' 5 +34112 'chers' 5 +34113 'chest' 5 +34114 'chief' 5 +34115 'child' 5 +34116 'ching' 5 +34117 'chini' 5 +34118 'chlor' 5 +34119 'chool' 5 +34120 'chrom' 5 +34121 'chron' 5 +34122 'chten' 5 +34123 'chter' 5 +34124 'chunk' 5 +34125 'cible' 5 +34126 'cient' 5 +34127 'civil' 5 +34128 'ción' 5 +34129 'cknow' 5 +34130 'ckså' 5 +34131 'claim' 5 +34132 'clair' 5 +34133 'clamp' 5 +34134 'clang' 5 +34135 'class' 5 +34136 'clave' 5 +34137 'clean' 5 +34138 'clear' 5 +34139 'click' 5 +34140 'cline' 5 +34141 'cling' 5 +34142 'clock' 5 +34143 'clone' 5 +34144 'close' 5 +34145 'cloth' 5 +34146 'cloud' 5 +34147 'clude' 5 +34148 'clust' 5 +34149 'coach' 5 +34150 'codec' 5 +34151 'coded' 5 +34152 'coder' 5 +34153 'codes' 5 +34154 'coeff' 5 +34155 'cohol' 5 +34156 'coins' 5 +34157 'colon' 5 +34158 'color' 5 +34159 'combe' 5 +34160 'combo' 5 +34161 'comed' 5 +34162 'comes' 5 +34163 'comic' 5 +34164 'comma' 5 +34165 'compl' 5 +34166 'conda' 5 +34167 'conde' 5 +34168 'conom' 5 +34169 'const' 5 +34170 'contr' 5 +34171 'coord' 5 +34172 'cores' 5 +34173 'could' 5 +34174 'count' 5 +34175 'court' 5 +34176 'cover' 5 +34177 'craft' 5 +34178 'crawl' 5 +34179 'creat' 5 +34180 'creen' 5 +34181 'crete' 5 +34182 'crets' 5 +34183 'cribe' 5 +34184 'crime' 5 +34185 'cript' 5 +34186 'crire' 5 +34187 'croft' 5 +34188 'cross' 5 +34189 'crypt' 5 +34190 'ctica' 5 +34191 'ction' 5 +34192 'ctors' 5 +34193 'ctype' 5 +34194 'cubic' 5 +34195 'cular' 5 +34196 'cules' 5 +34197 'culos' 5 +34198 'culus' 5 +34199 'curve' 5 +34200 'cycle' 5 +34201 'daily' 5 +34202 'datab' 5 +34203 'datas' 5 +34204 'datat' 5 +34205 'dated' 5 +34206 'dater' 5 +34207 'dates' 5 +34208 'datum' 5 +34209 'death' 5 +34210 'debug' 5 +34211 'decay' 5 +34212 'decor' 5 +34213 'defer' 5 +34214 'defin' 5 +34215 'delay' 5 +34216 'deleg' 5 +34217 'delta' 5 +34218 'denly' 5 +34219 'dense' 5 +34220 'depth' 5 +34221 'deque' 5 +34222 'deriv' 5 +34223 'descr' 5 +34224 'devel' 5 +34225 'dfrac' 5 +34226 'digit' 5 +34227 'dimen' 5 +34228 'dings' 5 +34229 'dirty' 5 +34230 'doesn' 5 +34231 'doing' 5 +34232 'domin' 5 +34233 'doors' 5 +34234 'draft' 5 +34235 'dream' 5 +34236 'drive' 5 +34237 'dtype' 5 +34238 'duced' 5 +34239 'ducer' 5 +34240 'duino' 5 +34241 'dummy' 5 +34242 'earch' 5 +34243 'early' 5 +34244 'earth' 5 +34245 'ebook' 5 +34246 'ecess' 5 +34247 'ectar' 5 +34248 'ected' 5 +34249 'ector' 5 +34250 'edges' 5 +34251 'eding' 5 +34252 'eenth' 5 +34253 'eeper' 5 +34254 'efore' 5 +34255 'eigen' 5 +34256 'eight' 5 +34257 'eking' 5 +34258 'eland' 5 +34259 'elect' 5 +34260 'eless' 5 +34261 'elfth' 5 +34262 'elian' 5 +34263 'elijk' 5 +34264 'eline' 5 +34265 'eling' 5 +34266 'elist' 5 +34267 'elius' 5 +34268 'ellan' 5 +34269 'ellar' 5 +34270 'elled' 5 +34271 'ellen' 5 +34272 'eller' 5 +34273 'elles' 5 +34274 'ellig' 5 +34275 'ellij' 5 +34276 'ellow' 5 +34277 'elman' 5 +34278 'elong' 5 +34279 'elope' 5 +34280 'elsen' 5 +34281 'elson' 5 +34282 'elter' 5 +34283 'elves' 5 +34284 'email' 5 +34285 'emale' 5 +34286 'emann' 5 +34287 'emark' 5 +34288 'embed' 5 +34289 'ember' 5 +34290 'emble' 5 +34291 'embre' 5 +34292 'embro' 5 +34293 'ement' 5 +34294 'emies' 5 +34295 'emoji' 5 +34296 'emory' 5 +34297 'emplo' 5 +34298 'empor' 5 +34299 'empre' 5 +34300 'empty' 5 +34301 'emás' 5 +34302 'ename' 5 +34303 'enant' 5 +34304 'enary' 5 +34305 'enced' 5 +34306 'encer' 5 +34307 'ences' 5 +34308 'encia' 5 +34309 'encil' 5 +34310 'endar' 5 +34311 'endas' 5 +34312 'ended' 5 +34313 'enden' 5 +34314 'ender' 5 +34315 'endez' 5 +34316 'endif' 5 +34317 'endix' 5 +34318 'endor' 5 +34319 'endra' 5 +34320 'endum' 5 +34321 'eners' 5 +34322 'enery' 5 +34323 'eness' 5 +34324 'enger' 5 +34325 'ength' 5 +34326 'ening' 5 +34327 'enium' 5 +34328 'ennen' 5 +34329 'ennes' 5 +34330 'ennis' 5 +34331 'ensch' 5 +34332 'ensed' 5 +34333 'ensen' 5 +34334 'enser' 5 +34335 'enses' 5 +34336 'ensis' 5 +34337 'enson' 5 +34338 'ensor' 5 +34339 'ensus' 5 +34340 'ental' 5 +34341 'ented' 5 +34342 'enter' 5 +34343 'entes' 5 +34344 'entic' 5 +34345 'entin' 5 +34346 'ently' 5 +34347 'enton' 5 +34348 'entre' 5 +34349 'entry' 5 +34350 'enzie' 5 +34351 'ença' 5 +34352 'epend' 5 +34353 'eping' 5 +34354 'epoch' 5 +34355 'equal' 5 +34356 'equip' 5 +34357 'equiv' 5 +34358 'erala' 5 +34359 'erald' 5 +34360 'erals' 5 +34361 'erase' 5 +34362 'erate' 5 +34363 'ereum' 5 +34364 'ergic' 5 +34365 'ergus' 5 +34366 'erial' 5 +34367 'eries' 5 +34368 'ering' 5 +34369 'erior' 5 +34370 'ermal' 5 +34371 'erman' 5 +34372 'ernal' 5 +34373 'ernel' 5 +34374 'erner' 5 +34375 'errno' 5 +34376 'error' 5 +34377 'ersen' 5 +34378 'erset' 5 +34379 'erson' 5 +34380 'erten' 5 +34381 'erton' 5 +34382 'erved' 5 +34383 'erver' 5 +34384 'erves' 5 +34385 'esian' 5 +34386 'esity' 5 +34387 'esium' 5 +34388 'esome' 5 +34389 'espan' 5 +34390 'esper' 5 +34391 'essed' 5 +34392 'essel' 5 +34393 'essen' 5 +34394 'esser' 5 +34395 'esses' 5 +34396 'essim' 5 +34397 'essor' 5 +34398 'ested' 5 +34399 'ester' 5 +34400 'estic' 5 +34401 'estim' 5 +34402 'eston' 5 +34403 'estre' 5 +34404 'estro' 5 +34405 'etary' 5 +34406 'eteor' 5 +34407 'eters' 5 +34408 'ether' 5 +34409 'ethod' 5 +34410 'ethyl' 5 +34411 'etics' 5 +34412 'eties' 5 +34413 'etime' 5 +34414 'etine' 5 +34415 'eting' 5 +34416 'etric' 5 +34417 'ettel' 5 +34418 'etter' 5 +34419 'ettes' 5 +34420 'ettle' 5 +34421 'etype' 5 +34422 'evalu' 5 +34423 'event' 5 +34424 'every' 5 +34425 'ewnę' 5 +34426 'exact' 5 +34427 'excel' 5 +34428 'exist' 5 +34429 'exper' 5 +34430 'explo' 5 +34431 'extra' 5 +34432 'faces' 5 +34433 'facts' 5 +34434 'faith' 5 +34435 'falls' 5 +34436 'false' 5 +34437 'fasta' 5 +34438 'fatal' 5 +34439 'fault' 5 +34440 'favor' 5 +34441 'fetch' 5 +34442 'ffect' 5 +34443 'ffiti' 5 +34444 'ffset' 5 +34445 'fiber' 5 +34446 'field' 5 +34447 'fight' 5 +34448 'filer' 5 +34449 'files' 5 +34450 'filtr' 5 +34451 'final' 5 +34452 'fires' 5 +34453 'first' 5 +34454 'fixed' 5 +34455 'flags' 5 +34456 'flake' 5 +34457 'flare' 5 +34458 'flash' 5 +34459 'flies' 5 +34460 'float' 5 +34461 'floor' 5 +34462 'flows' 5 +34463 'fluid' 5 +34464 'fluor' 5 +34465 'flush' 5 +34466 'fname' 5 +34467 'focus' 5 +34468 'folio' 5 +34469 'fonts' 5 +34470 'force' 5 +34471 'forge' 5 +34472 'forma' 5 +34473 'forme' 5 +34474 'forms' 5 +34475 'forth' 5 +34476 'forum' 5 +34477 'found' 5 +34478 'frame' 5 +34479 'fresh' 5 +34480 'frica' 5 +34481 'fried' 5 +34482 'front' 5 +34483 'fruit' 5 +34484 'ftime' 5 +34485 'ftype' 5 +34486 'fully' 5 +34487 'führ' 5 +34488 'gaard' 5 +34489 'gable' 5 +34490 'games' 5 +34491 'gamma' 5 +34492 'gauge' 5 +34493 'geant' 5 +34494 'geben' 5 +34495 'gebra' 5 +34496 'gence' 5 +34497 'gency' 5 +34498 'gende' 5 +34499 'gener' 5 +34500 'genes' 5 +34501 'genic' 5 +34502 'genre' 5 +34503 'geois' 5 +34504 'geons' 5 +34505 'gesch' 5 +34506 'getId' 5 +34507 'giene' 5 +34508 'given' 5 +34509 'glass' 5 +34510 'glyph' 5 +34511 'gmail' 5 +34512 'gment' 5 +34513 'goals' 5 +34514 'going' 5 +34515 'grade' 5 +34516 'grams' 5 +34517 'grand' 5 +34518 'grant' 5 +34519 'graph' 5 +34520 'grass' 5 +34521 'grave' 5 +34522 'great' 5 +34523 'green' 5 +34524 'gress' 5 +34525 'group' 5 +34526 'grown' 5 +34527 'grund' 5 +34528 'guard' 5 +34529 'guess' 5 +34530 'guest' 5 +34531 'guide' 5 +34532 'guild' 5 +34533 'gunta' 5 +34534 'habit' 5 +34535 'hagen' 5 +34536 'hands' 5 +34537 'happy' 5 +34538 'hardt' 5 +34539 'harma' 5 +34540 'ható' 5 +34541 'haust' 5 +34542 'haven' 5 +34543 'heads' 5 +34544 'heard' 5 +34545 'heart' 5 +34546 'heast' 5 +34547 'heavy' 5 +34548 'heets' 5 +34549 'heits' 5 +34550 'hello' 5 +34551 'hemat' 5 +34552 'hemer' 5 +34553 'henyl' 5 +34554 'heres' 5 +34555 'herty' 5 +34556 'heses' 5 +34557 'hesia' 5 +34558 'hesis' 5 +34559 'heter' 5 +34560 'hetic' 5 +34561 'hetti' 5 +34562 'hetto' 5 +34563 'heure' 5 +34564 'hibit' 5 +34565 'hicle' 5 +34566 'hline' 5 +34567 'holds' 5 +34568 'holes' 5 +34569 'homme' 5 +34570 'hooks' 5 +34571 'hores' 5 +34572 'horse' 5 +34573 'hosts' 5 +34574 'hotel' 5 +34575 'hours' 5 +34576 'house' 5 +34577 'hover' 5 +34578 'hower' 5 +34579 'https' 5 +34580 'human' 5 +34581 'hurst' 5 +34582 'hydro' 5 +34583 'hyper' 5 +34584 'hält' 5 +34585 'häng' 5 +34586 'hões' 5 +34587 'iable' 5 +34588 'ially' 5 +34589 'ialog' 5 +34590 'iance' 5 +34591 'iasis' 5 +34592 'iated' 5 +34593 'iates' 5 +34594 'iator' 5 +34595 'iała' 5 +34596 'ibaba' 5 +34597 'ibile' 5 +34598 'ibles' 5 +34599 'iből' 5 +34600 'icago' 5 +34601 'icals' 5 +34602 'icana' 5 +34603 'icans' 5 +34604 'icate' 5 +34605 'ichen' 5 +34606 'icher' 5 +34607 'ichte' 5 +34608 'icial' 5 +34609 'ician' 5 +34610 'icide' 5 +34611 'icine' 5 +34612 'icing' 5 +34613 'icion' 5 +34614 'icios' 5 +34615 'icism' 5 +34616 'icity' 5 +34617 'ició' 5 +34618 'icked' 5 +34619 'icken' 5 +34620 'icker' 5 +34621 'icket' 5 +34622 'ická' 5 +34623 'ické' 5 +34624 'ický' 5 +34625 'icles' 5 +34626 'icode' 5 +34627 'icons' 5 +34628 'icted' 5 +34629 'ictor' 5 +34630 'icult' 5 +34631 'idade' 5 +34632 'idase' 5 +34633 'idata' 5 +34634 'idden' 5 +34635 'iddle' 5 +34636 'ideal' 5 +34637 'ident' 5 +34638 'ideos' 5 +34639 'iders' 5 +34640 'idget' 5 +34641 'idian' 5 +34642 'idine' 5 +34643 'iding' 5 +34644 'idity' 5 +34645 'idium' 5 +34646 'idual' 5 +34647 'idée' 5 +34648 'iedad' 5 +34649 'ieder' 5 +34650 'iegel' 5 +34651 'ielle' 5 +34652 'ience' 5 +34653 'iency' 5 +34654 'iendo' 5 +34655 'ienen' 5 +34656 'ienna' 5 +34657 'ienne' 5 +34658 'iente' 5 +34659 'iento' 5 +34660 'ients' 5 +34661 'ienza' 5 +34662 'ieren' 5 +34663 'ierno' 5 +34664 'ieron' 5 +34665 'ierra' 5 +34666 'ierre' 5 +34667 'ierte' 5 +34668 'ierto' 5 +34669 'iesel' 5 +34670 'iesen' 5 +34671 'ieurs' 5 +34672 'ieval' 5 +34673 'ieved' 5 +34674 'ieves' 5 +34675 'iface' 5 +34676 'ifact' 5 +34677 'ifdef' 5 +34678 'ifest' 5 +34679 'iffer' 5 +34680 'ifica' 5 +34681 'ifice' 5 +34682 'ified' 5 +34683 'ifier' 5 +34684 'ifies' 5 +34685 'ifié' 5 +34686 'ifold' 5 +34687 'iform' 5 +34688 'iforn' 5 +34689 'ifter' 5 +34690 'igate' 5 +34691 'igent' 5 +34692 'igest' 5 +34693 'igger' 5 +34694 'ighed' 5 +34695 'ighth' 5 +34696 'ights' 5 +34697 'igion' 5 +34698 'igmat' 5 +34699 'igned' 5 +34700 'igner' 5 +34701 'ignon' 5 +34702 'igram' 5 +34703 'igung' 5 +34704 'ijing' 5 +34705 'ikawa' 5 +34706 'ikers' 5 +34707 'iking' 5 +34708 'ilage' 5 +34709 'iland' 5 +34710 'ilder' 5 +34711 'ilent' 5 +34712 'ilers' 5 +34713 'ilian' 5 +34714 'iliar' 5 +34715 'ilies' 5 +34716 'iline' 5 +34717 'iling' 5 +34718 'ilion' 5 +34719 'ility' 5 +34720 'illac' 5 +34721 'illar' 5 +34722 'illas' 5 +34723 'illed' 5 +34724 'iller' 5 +34725 'illes' 5 +34726 'illet' 5 +34727 'illin' 5 +34728 'illon' 5 +34729 'illus' 5 +34730 'illé' 5 +34731 'ilogy' 5 +34732 'ilton' 5 +34733 'image' 5 +34734 'imals' 5 +34735 'imate' 5 +34736 'imens' 5 +34737 'iment' 5 +34738 'imgur' 5 +34739 'imits' 5 +34740 'imize' 5 +34741 'immer' 5 +34742 'imony' 5 +34743 'imore' 5 +34744 'imoto' 5 +34745 'imper' 5 +34746 'imple' 5 +34747 'impro' 5 +34748 'imuth' 5 +34749 'inals' 5 +34750 'iname' 5 +34751 'inand' 5 +34752 'inant' 5 +34753 'inary' 5 +34754 'inate' 5 +34755 'inces' 5 +34756 'incip' 5 +34757 'incre' 5 +34758 'inden' 5 +34759 'inder' 5 +34760 'index' 5 +34761 'indic' 5 +34762 'indle' 5 +34763 'indow' 5 +34764 'indre' 5 +34765 'inear' 5 +34766 'inees' 5 +34767 'inely' 5 +34768 'inent' 5 +34769 'iners' 5 +34770 'inery' 5 +34771 'inese' 5 +34772 'iness' 5 +34773 'infer' 5 +34774 'infra' 5 +34775 'infty' 5 +34776 'ingen' 5 +34777 'inger' 5 +34778 'inges' 5 +34779 'ingle' 5 +34780 'ingly' 5 +34781 'inian' 5 +34782 'ining' 5 +34783 'inion' 5 +34784 'inite' 5 +34785 'inity' 5 +34786 'inkel' 5 +34787 'inker' 5 +34788 'inkle' 5 +34789 'inned' 5 +34790 'innen' 5 +34791 'inner' 5 +34792 'inode' 5 +34793 'inois' 5 +34794 'inous' 5 +34795 'input' 5 +34796 'inset' 5 +34797 'insic' 5 +34798 'inski' 5 +34799 'insky' 5 +34800 'inson' 5 +34801 'instr' 5 +34802 'intel' 5 +34803 'inter' 5 +34804 'inton' 5 +34805 'intro' 5 +34806 'inté' 5 +34807 'iolet' 5 +34808 'ional' 5 +34809 'ioned' 5 +34810 'iones' 5 +34811 'ionic' 5 +34812 'iosis' 5 +34813 'iotic' 5 +34814 'ioxid' 5 +34815 'ipart' 5 +34816 'ipers' 5 +34817 'ipher' 5 +34818 'iples' 5 +34819 'ipped' 5 +34820 'ipper' 5 +34821 'ippet' 5 +34822 'ipple' 5 +34823 'ipzig' 5 +34824 'iques' 5 +34825 'iquid' 5 +34826 'iqué' 5 +34827 'ircle' 5 +34828 'irect' 5 +34829 'iring' 5 +34830 'irmed' 5 +34831 'irror' 5 +34832 'isans' 5 +34833 'iscal' 5 +34834 'ische' 5 +34835 'isers' 5 +34836 'ished' 5 +34837 'isher' 5 +34838 'ishes' 5 +34839 'ishly' 5 +34840 'ishop' 5 +34841 'ising' 5 +34842 'ision' 5 +34843 'isman' 5 +34844 'ismic' 5 +34845 'ismus' 5 +34846 'isnan' 5 +34847 'isode' 5 +34848 'isons' 5 +34849 'issan' 5 +34850 'issen' 5 +34851 'isser' 5 +34852 'isses' 5 +34853 'isset' 5 +34854 'isson' 5 +34855 'issue' 5 +34856 'istan' 5 +34857 'istar' 5 +34858 'istas' 5 +34859 'isted' 5 +34860 'istem' 5 +34861 'isten' 5 +34862 'ister' 5 +34863 'istes' 5 +34864 'istic' 5 +34865 'istik' 5 +34866 'istle' 5 +34867 'istol' 5 +34868 'iston' 5 +34869 'istor' 5 +34870 'istra' 5 +34871 'istro' 5 +34872 'istry' 5 +34873 'istä' 5 +34874 'isure' 5 +34875 'isée' 5 +34876 'isés' 5 +34877 'itage' 5 +34878 'itals' 5 +34879 'itant' 5 +34880 'itary' 5 +34881 'itate' 5 +34882 'itect' 5 +34883 'itely' 5 +34884 'items' 5 +34885 'iterr' 5 +34886 'ither' 5 +34887 'ithub' 5 +34888 'itial' 5 +34889 'ities' 5 +34890 'itime' 5 +34891 'iting' 5 +34892 'ition' 5 +34893 'itive' 5 +34894 'itié' 5 +34895 'itled' 5 +34896 'itles' 5 +34897 'itone' 5 +34898 'itore' 5 +34899 'itori' 5 +34900 'itors' 5 +34901 'itory' 5 +34902 'itsch' 5 +34903 'itted' 5 +34904 'ittee' 5 +34905 'itten' 5 +34906 'itter' 5 +34907 'ittle' 5 +34908 'itude' 5 +34909 'itung' 5 +34910 'iture' 5 +34911 'itzer' 5 +34912 'ität' 5 +34913 'ités' 5 +34914 'ivals' 5 +34915 'ivari' 5 +34916 'ivate' 5 +34917 'iveau' 5 +34918 'ively' 5 +34919 'ivent' 5 +34920 'ivers' 5 +34921 'ivery' 5 +34922 'iving' 5 +34923 'ivism' 5 +34924 'ivist' 5 +34925 'ivity' 5 +34926 'ixels' 5 +34927 'izada' 5 +34928 'izado' 5 +34929 'izard' 5 +34930 'izens' 5 +34931 'izers' 5 +34932 'izing' 5 +34933 'izons' 5 +34934 'izont' 5 +34935 'izoph' 5 +34936 'ième' 5 +34937 'ière' 5 +34938 'jamin' 5 +34939 'jango' 5 +34940 'javax' 5 +34941 'jiang' 5 +34942 'joint' 5 +34943 'jours' 5 +34944 'juana' 5 +34945 'judge' 5 +34946 'junit' 5 +34947 'juven' 5 +34948 'jähr' 5 +34949 'jší' 5 +34950 'kappa' 5 +34951 'keley' 5 +34952 'keras' 5 +34953 'klass' 5 +34954 'klär' 5 +34955 'known' 5 +34956 'ktion' 5 +34957 'ként' 5 +34958 'label' 5 +34959 'labor' 5 +34960 'laden' 5 +34961 'lando' 5 +34962 'lands' 5 +34963 'lapse' 5 +34964 'large' 5 +34965 'ları' 5 +34966 'lated' 5 +34967 'later' 5 +34968 'latex' 5 +34969 'latin' 5 +34970 'layer' 5 +34971 'ldots' 5 +34972 'leans' 5 +34973 'learn' 5 +34974 'lease' 5 +34975 'least' 5 +34976 'leave' 5 +34977 'ledge' 5 +34978 'legal' 5 +34979 'legen' 5 +34980 'leich' 5 +34981 'leigh' 5 +34982 'leman' 5 +34983 'lemen' 5 +34984 'lemma' 5 +34985 'letal' 5 +34986 'leted' 5 +34987 'letes' 5 +34988 'letic' 5 +34989 'leton' 5 +34990 'lette' 5 +34991 'level' 5 +34992 'lexer' 5 +34993 'lical' 5 +34994 'lices' 5 +34995 'liche' 5 +34996 'licht' 5 +34997 'licit' 5 +34998 'lickr' 5 +34999 'lient' 5 +35000 'liers' 5 +35001 'liest' 5 +35002 'ließ' 5 +35003 'light' 5 +35004 'ligne' 5 +35005 'liked' 5 +35006 'limit' 5 +35007 'lined' 5 +35008 'liner' 5 +35009 'lines' 5 +35010 'lings' 5 +35011 'linha' 5 +35012 'links' 5 +35013 'linux' 5 +35014 'lique' 5 +35015 'lista' 5 +35016 'lists' 5 +35017 'liter' 5 +35018 'lived' 5 +35019 'liver' 5 +35020 'loads' 5 +35021 'lobal' 5 +35022 'local' 5 +35023 'locks' 5 +35024 'logic' 5 +35025 'login' 5 +35026 'loops' 5 +35027 'lopen' 5 +35028 'lords' 5 +35029 'lotte' 5 +35030 'lover' 5 +35031 'lower' 5 +35032 'luent' 5 +35033 'lycer' 5 +35034 'lying' 5 +35035 'länd' 5 +35036 'macro' 5 +35037 'magic' 5 +35038 'mails' 5 +35039 'maint' 5 +35040 'major' 5 +35041 'maker' 5 +35042 'makes' 5 +35043 'mania' 5 +35044 'mares' 5 +35045 'marks' 5 +35046 'ması' 5 +35047 'match' 5 +35048 'mates' 5 +35049 'matic' 5 +35050 'maven' 5 +35051 'maxim' 5 +35052 'maybe' 5 +35053 'means' 5 +35054 'media' 5 +35055 'mente' 5 +35056 'ments' 5 +35057 'merce' 5 +35058 'merge' 5 +35059 'meric' 5 +35060 'metal' 5 +35061 'meter' 5 +35062 'metic' 5 +35063 'metro' 5 +35064 'metry' 5 +35065 'micro' 5 +35066 'might' 5 +35067 'miner' 5 +35068 'minim' 5 +35069 'minor' 5 +35070 'minus' 5 +35071 'mixed' 5 +35072 'mkdir' 5 +35073 'modal' 5 +35074 'model' 5 +35075 'modes' 5 +35076 'money' 5 +35077 'mongo' 5 +35078 'monic' 5 +35079 'month' 5 +35080 'morph' 5 +35081 'motor' 5 +35082 'mount' 5 +35083 'mouse' 5 +35084 'mouth' 5 +35085 'movie' 5 +35086 'multi' 5 +35087 'music' 5 +35088 'mutex' 5 +35089 'mysql' 5 +35090 'même' 5 +35091 'nabla' 5 +35092 'nable' 5 +35093 'naire' 5 +35094 'named' 5 +35095 'names' 5 +35096 'nants' 5 +35097 'natal' 5 +35098 'neath' 5 +35099 'needs' 5 +35100 'negie' 5 +35101 'nelle' 5 +35102 'nergy' 5 +35103 'nesty' 5 +35104 'nette' 5 +35105 'never' 5 +35106 'nginx' 5 +35107 'night' 5 +35108 'nikov' 5 +35109 'nings' 5 +35110 'nodes' 5 +35111 'noise' 5 +35112 'nonce' 5 +35113 'north' 5 +35114 'notes' 5 +35115 'notin' 5 +35116 'nucle' 5 +35117 'numer' 5 +35118 'numpy' 5 +35119 'nyder' 5 +35120 'nées' 5 +35121 'ného' 5 +35122 'ních' 5 +35123 'ního' 5 +35124 'ných' 5 +35125 'oauth' 5 +35126 'obile' 5 +35127 'obody' 5 +35128 'ocado' 5 +35129 'ocamp' 5 +35130 'ocard' 5 +35131 'ocate' 5 +35132 'occup' 5 +35133 'occur' 5 +35134 'occus' 5 +35135 'ocene' 5 +35136 'ocent' 5 +35137 'ocese' 5 +35138 'ochem' 5 +35139 'ocial' 5 +35140 'ocide' 5 +35141 'ocity' 5 +35142 'ocker' 5 +35143 'ocket' 5 +35144 'ockey' 5 +35145 'ocode' 5 +35146 'ocrat' 5 +35147 'ocyan' 5 +35148 'ocyte' 5 +35149 'odies' 5 +35150 'oding' 5 +35151 'odium' 5 +35152 'odont' 5 +35153 'odore' 5 +35154 'odule' 5 +35155 'offee' 5 +35156 'offer' 5 +35157 'offic' 5 +35158 'often' 5 +35159 'ogene' 5 +35160 'ogens' 5 +35161 'oggle' 5 +35162 'oglob' 5 +35163 'ograf' 5 +35164 'ogram' 5 +35165 'ograp' 5 +35166 'ográ' 5 +35167 'oidal' 5 +35168 'okers' 5 +35169 'oking' 5 +35170 'okrat' 5 +35171 'oland' 5 +35172 'olars' 5 +35173 'olate' 5 +35174 'older' 5 +35175 'olean' 5 +35176 'olics' 5 +35177 'olina' 5 +35178 'oline' 5 +35179 'oling' 5 +35180 'olini' 5 +35181 'olith' 5 +35182 'ollah' 5 +35183 'ollar' 5 +35184 'ollen' 5 +35185 'oller' 5 +35186 'ollow' 5 +35187 'ology' 5 +35188 'olson' 5 +35189 'olulu' 5 +35190 'olute' 5 +35191 'olved' 5 +35192 'olver' 5 +35193 'olves' 5 +35194 'ológ' 5 +35195 'omain' 5 +35196 'omaly' 5 +35197 'ombie' 5 +35198 'omega' 5 +35199 'oment' 5 +35200 'omers' 5 +35201 'omial' 5 +35202 'omics' 5 +35203 'oming' 5 +35204 'ommen' 5 +35205 'omnia' 5 +35206 'omore' 5 +35207 'områ' 5 +35208 'onald' 5 +35209 'onaut' 5 +35210 'onces' 5 +35211 'oncé' 5 +35212 'onder' 5 +35213 'ondon' 5 +35214 'onent' 5 +35215 'onial' 5 +35216 'onian' 5 +35217 'onica' 5 +35218 'onies' 5 +35219 'oning' 5 +35220 'onium' 5 +35221 'onomy' 5 +35222 'onset' 5 +35223 'onyms' 5 +35224 'ookie' 5 +35225 'ooter' 5 +35226 'opard' 5 +35227 'opath' 5 +35228 'openh' 5 +35229 'opens' 5 +35230 'opher' 5 +35231 'ophil' 5 +35232 'ophys' 5 +35233 'opian' 5 +35234 'oping' 5 +35235 'oplan' 5 +35236 'oples' 5 +35237 'oplus' 5 +35238 'opoly' 5 +35239 'oprop' 5 +35240 'opsis' 5 +35241 'opter' 5 +35242 'optic' 5 +35243 'optim' 5 +35244 'orage' 5 +35245 'orama' 5 +35246 'orate' 5 +35247 'orbit' 5 +35248 'ordan' 5 +35249 'orden' 5 +35250 'order' 5 +35251 'ordin' 5 +35252 'ordon' 5 +35253 'oreal' 5 +35254 'orean' 5 +35255 'orest' 5 +35256 'organ' 5 +35257 'orgen' 5 +35258 'orget' 5 +35259 'orial' 5 +35260 'orian' 5 +35261 'ories' 5 +35262 'oring' 5 +35263 'ority' 5 +35264 'ormal' 5 +35265 'orman' 5 +35266 'orney' 5 +35267 'orous' 5 +35268 'orpor' 5 +35269 'orrow' 5 +35270 'ortal' 5 +35271 'orted' 5 +35272 'orter' 5 +35273 'ortex' 5 +35274 'ortho' 5 +35275 'orthy' 5 +35276 'ortic' 5 +35277 'orton' 5 +35278 'ortun' 5 +35279 'osaic' 5 +35280 'osaur' 5 +35281 'osing' 5 +35282 'osion' 5 +35283 'osite' 5 +35284 'osity' 5 +35285 'oslav' 5 +35286 'osome' 5 +35287 'ospel' 5 +35288 'ossip' 5 +35289 'ostat' 5 +35290 'osten' 5 +35291 'oster' 5 +35292 'ostic' 5 +35293 'oston' 5 +35294 'oteca' 5 +35295 'otech' 5 +35296 'oters' 5 +35297 'other' 5 +35298 'otics' 5 +35299 'otide' 5 +35300 'otine' 5 +35301 'oting' 5 +35302 'otion' 5 +35303 'otive' 5 +35304 'otomy' 5 +35305 'otrop' 5 +35306 'otted' 5 +35307 'otten' 5 +35308 'ottom' 5 +35309 'otype' 5 +35310 'ouble' 5 +35311 'ought' 5 +35312 'oulos' 5 +35313 'ounce' 5 +35314 'ounds' 5 +35315 'ounge' 5 +35316 'ounty' 5 +35317 'ource' 5 +35318 'oured' 5 +35319 'ourse' 5 +35320 'oused' 5 +35321 'ousel' 5 +35322 'ouses' 5 +35323 'ously' 5 +35324 'ousse' 5 +35325 'outer' 5 +35326 'ouver' 5 +35327 'overn' 5 +35328 'overs' 5 +35329 'overy' 5 +35330 'ovich' 5 +35331 'oving' 5 +35332 'ović' 5 +35333 'ovsky' 5 +35334 'ować' 5 +35335 'ował' 5 +35336 'owell' 5 +35337 'owing' 5 +35338 'owitz' 5 +35339 'owler' 5 +35340 'owned' 5 +35341 'owner' 5 +35342 'ownik' 5 +35343 'owski' 5 +35344 'oxide' 5 +35345 'ozzá' 5 +35346 'ości' 5 +35347 'paced' 5 +35348 'paces' 5 +35349 'pages' 5 +35350 'paint' 5 +35351 'pairs' 5 +35352 'panel' 5 +35353 'panic' 5 +35354 'paper' 5 +35355 'param' 5 +35356 'paras' 5 +35357 'paren' 5 +35358 'parse' 5 +35359 'parts' 5 +35360 'party' 5 +35361 'paste' 5 +35362 'patch' 5 +35363 'paths' 5 +35364 'pathy' 5 +35365 'pause' 5 +35366 'peace' 5 +35367 'pedia' 5 +35368 'peech' 5 +35369 'pered' 5 +35370 'peria' 5 +35371 'peror' 5 +35372 'perse' 5 +35373 'perty' 5 +35374 'phalt' 5 +35375 'phant' 5 +35376 'phase' 5 +35377 'pherd' 5 +35378 'phere' 5 +35379 'phins' 5 +35380 'phinx' 5 +35381 'phone' 5 +35382 'phony' 5 +35383 'photo' 5 +35384 'piece' 5 +35385 'pires' 5 +35386 'pitch' 5 +35387 'pivot' 5 +35388 'pixel' 5 +35389 'place' 5 +35390 'plain' 5 +35391 'plane' 5 +35392 'plant' 5 +35393 'plate' 5 +35394 'platz' 5 +35395 'plays' 5 +35396 'pless' 5 +35397 'plete' 5 +35398 'plets' 5 +35399 'plica' 5 +35400 'plied' 5 +35401 'plier' 5 +35402 'plies' 5 +35403 'pline' 5 +35404 'pling' 5 +35405 'plist' 5 +35406 'pload' 5 +35407 'plots' 5 +35408 'point' 5 +35409 'polar' 5 +35410 'polit' 5 +35411 'ponse' 5 +35412 'poons' 5 +35413 'popup' 5 +35414 'porte' 5 +35415 'ports' 5 +35416 'posal' 5 +35417 'posed' 5 +35418 'poser' 5 +35419 'poses' 5 +35420 'posit' 5 +35421 'posix' 5 +35422 'posta' 5 +35423 'posts' 5 +35424 'pound' 5 +35425 'power' 5 +35426 'ppers' 5 +35427 'pping' 5 +35428 'pread' 5 +35429 'press' 5 +35430 'price' 5 +35431 'prime' 5 +35432 'pring' 5 +35433 'print' 5 +35434 'prior' 5 +35435 'prise' 5 +35436 'probe' 5 +35437 'produ' 5 +35438 'promo' 5 +35439 'proof' 5 +35440 'props' 5 +35441 'prote' 5 +35442 'proto' 5 +35443 'prove' 5 +35444 'proxy' 5 +35445 'près' 5 +35446 'prés' 5 +35447 'psych' 5 +35448 'ptide' 5 +35449 'ption' 5 +35450 'ptive' 5 +35451 'ptune' 5 +35452 'pulse' 5 +35453 'punkt' 5 +35454 'puted' 5 +35455 'puter' 5 +35456 'pués' 5 +35457 'qquad' 5 +35458 'quake' 5 +35459 'quant' 5 +35460 'quare' 5 +35461 'quart' 5 +35462 'queda' 5 +35463 'quent' 5 +35464 'query' 5 +35465 'quest' 5 +35466 'queue' 5 +35467 'quick' 5 +35468 'quier' 5 +35469 'quiet' 5 +35470 'quipe' 5 +35471 'quire' 5 +35472 'quiry' 5 +35473 'quist' 5 +35474 'quite' 5 +35475 'quito' 5 +35476 'quivo' 5 +35477 'quota' 5 +35478 'quote' 5 +35479 'rades' 5 +35480 'radio' 5 +35481 'rador' 5 +35482 'ragon' 5 +35483 'raham' 5 +35484 'rails' 5 +35485 'raine' 5 +35486 'rains' 5 +35487 'raint' 5 +35488 'raise' 5 +35489 'raits' 5 +35490 'ramer' 5 +35491 'ramid' 5 +35492 'rance' 5 +35493 'ranch' 5 +35494 'range' 5 +35495 'rapid' 5 +35496 'rases' 5 +35497 'rated' 5 +35498 'rates' 5 +35499 'ratio' 5 +35500 'ravel' 5 +35501 'razil' 5 +35502 'reach' 5 +35503 'react' 5 +35504 'reads' 5 +35505 'ready' 5 +35506 'realm' 5 +35507 'reate' 5 +35508 'recht' 5 +35509 'redit' 5 +35510 'reens' 5 +35511 'refer' 5 +35512 'refix' 5 +35513 'regex' 5 +35514 'regon' 5 +35515 'regor' 5 +35516 'reich' 5 +35517 'reira' 5 +35518 'relax' 5 +35519 'rella' 5 +35520 'rence' 5 +35521 'rench' 5 +35522 'rende' 5 +35523 'renew' 5 +35524 'rente' 5 +35525 'reply' 5 +35526 'repos' 5 +35527 'reset' 5 +35528 'resid' 5 +35529 'resol' 5 +35530 'resse' 5 +35531 'retch' 5 +35532 'reten' 5 +35533 'retry' 5 +35534 'rette' 5 +35535 'reuse' 5 +35536 'riage' 5 +35537 'rians' 5 +35538 'rible' 5 +35539 'ribly' 5 +35540 'rical' 5 +35541 'rices' 5 +35542 'richt' 5 +35543 'ricia' 5 +35544 'ricks' 5 +35545 'rides' 5 +35546 'ridge' 5 +35547 'riend' 5 +35548 'rient' 5 +35549 'riers' 5 +35550 'rieve' 5 +35551 'right' 5 +35552 'rimin' 5 +35553 'ringe' 5 +35554 'rings' 5 +35555 'riors' 5 +35556 'rique' 5 +35557 'rison' 5 +35558 'rists' 5 +35559 'riter' 5 +35560 'rites' 5 +35561 'ritic' 5 +35562 'ritis' 5 +35563 'rival' 5 +35564 'rived' 5 +35565 'river' 5 +35566 'roads' 5 +35567 'robat' 5 +35568 'robot' 5 +35569 'rocal' 5 +35570 'rogen' 5 +35571 'roles' 5 +35572 'rolls' 5 +35573 'rolog' 5 +35574 'romes' 5 +35575 'rones' 5 +35576 'ronic' 5 +35577 'ronym' 5 +35578 'rooms' 5 +35579 'roots' 5 +35580 'rophe' 5 +35581 'rophy' 5 +35582 'ropic' 5 +35583 'ropol' 5 +35584 'ropri' 5 +35585 'rored' 5 +35586 'rosis' 5 +35587 'rosse' 5 +35588 'rough' 5 +35589 'round' 5 +35590 'route' 5 +35591 'rowse' 5 +35592 'rowth' 5 +35593 'rozen' 5 +35594 'ruary' 5 +35595 'ruits' 5 +35596 'rules' 5 +35597 'rying' 5 +35598 'rypto' 5 +35599 'sales' 5 +35600 'saved' 5 +35601 'sburg' 5 +35602 'scala' 5 +35603 'scale' 5 +35604 'scape' 5 +35605 'scene' 5 +35606 'sched' 5 +35607 'schen' 5 +35608 'scope' 5 +35609 'score' 5 +35610 'scrib' 5 +35611 'sembl' 5 +35612 'senal' 5 +35613 'sense' 5 +35614 'separ' 5 +35615 'serie' 5 +35616 'serve' 5 +35617 'setUp' 5 +35618 'setup' 5 +35619 'seudo' 5 +35620 'seven' 5 +35621 'sever' 5 +35622 'shake' 5 +35623 'shall' 5 +35624 'shape' 5 +35625 'share' 5 +35626 'sharp' 5 +35627 'sheet' 5 +35628 'shelf' 5 +35629 'shell' 5 +35630 'shift' 5 +35631 'shine' 5 +35632 'ships' 5 +35633 'shire' 5 +35634 'shirt' 5 +35635 'shoot' 5 +35636 'shops' 5 +35637 'shore' 5 +35638 'short' 5 +35639 'shots' 5 +35640 'shown' 5 +35641 'shows' 5 +35642 'sible' 5 +35643 'sided' 5 +35644 'sight' 5 +35645 'sigma' 5 +35646 'simeq' 5 +35647 'simpl' 5 +35648 'since' 5 +35649 'sites' 5 +35650 'sized' 5 +35651 'sizes' 5 +35652 'skill' 5 +35653 'skins' 5 +35654 'slack' 5 +35655 'slant' 5 +35656 'slash' 5 +35657 'slave' 5 +35658 'sleep' 5 +35659 'slice' 5 +35660 'slide' 5 +35661 'slope' 5 +35662 'slots' 5 +35663 'small' 5 +35664 'smart' 5 +35665 'smith' 5 +35666 'snake' 5 +35667 'sofar' 5 +35668 'solar' 5 +35669 'solid' 5 +35670 'solve' 5 +35671 'sound' 5 +35672 'south' 5 +35673 'space' 5 +35674 'spark' 5 +35675 'spawn' 5 +35676 'spect' 5 +35677 'speed' 5 +35678 'spell' 5 +35679 'split' 5 +35680 'sport' 5 +35681 'spots' 5 +35682 'stack' 5 +35683 'stadt' 5 +35684 'staff' 5 +35685 'stage' 5 +35686 'stalk' 5 +35687 'stamp' 5 +35688 'stand' 5 +35689 'stant' 5 +35690 'stars' 5 +35691 'start' 5 +35692 'stash' 5 +35693 'state' 5 +35694 'stats' 5 +35695 'stdin' 5 +35696 'stdio' 5 +35697 'stead' 5 +35698 'steel' 5 +35699 'stein' 5 +35700 'stell' 5 +35701 'steps' 5 +35702 'stere' 5 +35703 'sters' 5 +35704 'stery' 5 +35705 'stick' 5 +35706 'still' 5 +35707 'stime' 5 +35708 'stock' 5 +35709 'stone' 5 +35710 'stood' 5 +35711 'store' 5 +35712 'storm' 5 +35713 'story' 5 +35714 'stown' 5 +35715 'strap' 5 +35716 'strip' 5 +35717 'strom' 5 +35718 'study' 5 +35719 'stuff' 5 +35720 'ství' 5 +35721 'style' 5 +35722 'stype' 5 +35723 'stüt' 5 +35724 'subst' 5 +35725 'suite' 5 +35726 'super' 5 +35727 'sweet' 5 +35728 'swers' 5 +35729 'swick' 5 +35730 'swift' 5 +35731 'swing' 5 +35732 'szág' 5 +35733 'table' 5 +35734 'tails' 5 +35735 'taire' 5 +35736 'taken' 5 +35737 'takes' 5 +35738 'tasks' 5 +35739 'tbody' 5 +35740 'techn' 5 +35741 'teger' 5 +35742 'templ' 5 +35743 'temps' 5 +35744 'tered' 5 +35745 'terms' 5 +35746 'terra' 5 +35747 'tests' 5 +35748 'texto' 5 +35749 'texts' 5 +35750 'tfrac' 5 +35751 'thank' 5 +35752 'thead' 5 +35753 'their' 5 +35754 'theme' 5 +35755 'there' 5 +35756 'thern' 5 +35757 'thers' 5 +35758 'these' 5 +35759 'theta' 5 +35760 'thick' 5 +35761 'thing' 5 +35762 'think' 5 +35763 'third' 5 +35764 'thood' 5 +35765 'those' 5 +35766 'three' 5 +35767 'thren' 5 +35768 'throw' 5 +35769 'thumb' 5 +35770 'tical' 5 +35771 'ticks' 5 +35772 'tight' 5 +35773 'tilde' 5 +35774 'tiles' 5 +35775 'timer' 5 +35776 'times' 5 +35777 'tings' 5 +35778 'title' 5 +35779 'tober' 5 +35780 'today' 5 +35781 'todos' 5 +35782 'token' 5 +35783 'tools' 5 +35784 'topic' 5 +35785 'torch' 5 +35786 'total' 5 +35787 'touch' 5 +35788 'trace' 5 +35789 'track' 5 +35790 'tract' 5 +35791 'trade' 5 +35792 'trail' 5 +35793 'train' 5 +35794 'trait' 5 +35795 'trans' 5 +35796 'trash' 5 +35797 'treat' 5 +35798 'trees' 5 +35799 'trend' 5 +35800 'trial' 5 +35801 'tries' 5 +35802 'tring' 5 +35803 'trunc' 5 +35804 'trust' 5 +35805 'truth' 5 +35806 'tuple' 5 +35807 'tures' 5 +35808 'tweet' 5 +35809 'twist' 5 +35810 'typed' 5 +35811 'types' 5 +35812 'uable' 5 +35813 'ually' 5 +35814 'uario' 5 +35815 'uated' 5 +35816 'uates' 5 +35817 'ubble' 5 +35818 'ubern' 5 +35819 'ubert' 5 +35820 'ublic' 5 +35821 'ublin' 5 +35822 'ubyte' 5 +35823 'uchar' 5 +35824 'uchen' 5 +35825 'ucing' 5 +35826 'ucion' 5 +35827 'ucked' 5 +35828 'ucker' 5 +35829 'ucket' 5 +35830 'uckle' 5 +35831 'uctor' 5 +35832 'uddle' 5 +35833 'udeau' 5 +35834 'udent' 5 +35835 'uding' 5 +35836 'udson' 5 +35837 'uelle' 5 +35838 'uerdo' 5 +35839 'uerto' 5 +35840 'uesta' 5 +35841 'uesto' 5 +35842 'ufact' 5 +35843 'uffed' 5 +35844 'uffer' 5 +35845 'uffix' 5 +35846 'uffle' 5 +35847 'uggle' 5 +35848 'ugins' 5 +35849 'uitar' 5 +35850 'ulant' 5 +35851 'ulate' 5 +35852 'ulent' 5 +35853 'uliar' 5 +35854 'uling' 5 +35855 'ulkan' 5 +35856 'ullah' 5 +35857 'ullen' 5 +35858 'ulner' 5 +35859 'ulong' 5 +35860 'ulose' 5 +35861 'ulous' 5 +35862 'ultan' 5 +35863 'ultur' 5 +35864 'ulté' 5 +35865 'umann' 5 +35866 'umbai' 5 +35867 'umber' 5 +35868 'umble' 5 +35869 'ument' 5 +35870 'umina' 5 +35871 'uming' 5 +35872 'ummer' 5 +35873 'umped' 5 +35874 'umper' 5 +35875 'uncan' 5 +35876 'uncia' 5 +35877 'undai' 5 +35878 'unday' 5 +35879 'undef' 5 +35880 'unden' 5 +35881 'under' 5 +35882 'undle' 5 +35883 'ungal' 5 +35884 'ungen' 5 +35885 'unger' 5 +35886 'ungle' 5 +35887 'uning' 5 +35888 'union' 5 +35889 'units' 5 +35890 'unity' 5 +35891 'unker' 5 +35892 'unned' 5 +35893 'unnel' 5 +35894 'unque' 5 +35895 'unset' 5 +35896 'unted' 5 +35897 'unter' 5 +35898 'until' 5 +35899 'untos' 5 +35900 'uplic' 5 +35901 'upper' 5 +35902 'uracy' 5 +35903 'urate' 5 +35904 'urban' 5 +35905 'urbed' 5 +35906 'ureau' 5 +35907 'urent' 5 +35908 'urers' 5 +35909 'urger' 5 +35910 'uries' 5 +35911 'uring' 5 +35912 'urity' 5 +35913 'urnal' 5 +35914 'urope' 5 +35915 'urous' 5 +35916 'urred' 5 +35917 'ursed' 5 +35918 'urses' 5 +35919 'ursor' 5 +35920 'urtle' 5 +35921 'usage' 5 +35922 'users' 5 +35923 'useum' 5 +35924 'ushed' 5 +35925 'ushes' 5 +35926 'using' 5 +35927 'usion' 5 +35928 'usive' 5 +35929 'ussed' 5 +35930 'ussen' 5 +35931 'ussia' 5 +35932 'usted' 5 +35933 'uster' 5 +35934 'ustin' 5 +35935 'ustom' 5 +35936 'usual' 5 +35937 'utely' 5 +35938 'uters' 5 +35939 'uteur' 5 +35940 'uther' 5 +35941 'utils' 5 +35942 'uting' 5 +35943 'ution' 5 +35944 'utive' 5 +35945 'utors' 5 +35946 'utory' 5 +35947 'utral' 5 +35948 'utsch' 5 +35949 'utter' 5 +35950 'utton' 5 +35951 'uture' 5 +35952 'uyên' 5 +35953 'uzzle' 5 +35954 'vable' 5 +35955 'valid' 5 +35956 'valor' 5 +35957 'value' 5 +35958 'varez' 5 +35959 'vault' 5 +35960 'vdots' 5 +35961 'velle' 5 +35962 'velop' 5 +35963 'venir' 5 +35964 'venth' 5 +35965 'vents' 5 +35966 'venue' 5 +35967 'verbs' 5 +35968 'verse' 5 +35969 'verte' 5 +35970 'verts' 5 +35971 'verty' 5 +35972 'vette' 5 +35973 'video' 5 +35974 'vider' 5 +35975 'vidia' 5 +35976 'views' 5 +35977 'villa' 5 +35978 'ville' 5 +35979 'vious' 5 +35980 'viron' 5 +35981 'virus' 5 +35982 'vised' 5 +35983 'visit' 5 +35984 'visor' 5 +35985 'vival' 5 +35986 'vocab' 5 +35987 'voice' 5 +35988 'votes' 5 +35989 'väst' 5 +35990 'wagen' 5 +35991 'walls' 5 +35992 'wards' 5 +35993 'wares' 5 +35994 'watch' 5 +35995 'water' 5 +35996 'waves' 5 +35997 'wedge' 5 +35998 'weeks' 5 +35999 'weets' 5 +36000 'weise' 5 +36001 'wheel' 5 +36002 'where' 5 +36003 'which' 5 +36004 'while' 5 +36005 'white' 5 +36006 'whole' 5 +36007 'whose' 5 +36008 'width' 5 +36009 'witch' 5 +36010 'wives' 5 +36011 'wiąz' 5 +36012 'woman' 5 +36013 'women' 5 +36014 'woods' 5 +36015 'words' 5 +36016 'works' 5 +36017 'world' 5 +36018 'worth' 5 +36019 'would' 5 +36020 'write' 5 +36021 'wrong' 5 +36022 'xhtml' 5 +36023 'xiety' 5 +36024 'xmlns' 5 +36025 'xpath' 5 +36026 'xture' 5 +36027 'xygen' 5 +36028 'yahoo' 5 +36029 'yards' 5 +36030 'ycler' 5 +36031 'years' 5 +36032 'yield' 5 +36033 'ylene' 5 +36034 'ylvan' 5 +36035 'ymbol' 5 +36036 'yntax' 5 +36037 'young' 5 +36038 'ystem' 5 +36039 'yster' 5 +36040 'ython' 5 +36041 'ytics' 5 +36042 'zeich' 5 +36043 'zeros' 5 +36044 'ział' 5 +36045 'zilla' 5 +36046 'zione' 5 +36047 'zsche' 5 +36048 '}}_{\\' 5 +36049 'ÇÃO' 5 +36050 'État' 5 +36051 'ában' 5 +36052 'ácil' 5 +36053 'ález' 5 +36054 'ális' 5 +36055 'álva' 5 +36056 'ámos' 5 +36057 'ának' 5 +36058 'ános' 5 +36059 'ání' 5 +36060 'ária' 5 +36061 'ário' 5 +36062 'ások' 5 +36063 'átum' 5 +36064 'ával' 5 +36065 'ável' 5 +36066 'ází' 5 +36067 'ână' 5 +36068 'âtre' 5 +36069 'äche' 5 +36070 'ächs' 5 +36071 'ächt' 5 +36072 'äger' 5 +36073 'ählt' 5 +36074 'äler' 5 +36075 'älle' 5 +36076 'ällt' 5 +36077 'ämä' 5 +36078 'ände' 5 +36079 'änge' 5 +36080 'ären' 5 +36081 'ässt' 5 +36082 'äter' 5 +36083 'ätte' 5 +36084 'ätze' 5 +36085 'äude' 5 +36086 'ään' 5 +36087 'ædia' 5 +36088 'çais' 5 +36089 'çois' 5 +36090 'çoit' 5 +36091 'ção' 5 +36092 'èces' 5 +36093 'èles' 5 +36094 'èmes' 5 +36095 'ènes' 5 +36096 'èque' 5 +36097 'ères' 5 +36098 'ètes' 5 +36099 'ètre' 5 +36100 'èves' 5 +36101 'ébec' 5 +36102 'ében' 5 +36103 'écur' 5 +36104 'éder' 5 +36105 'édia' 5 +36106 'édie' 5 +36107 'édé' 5 +36108 'élé' 5 +36109 'émet' 5 +36110 'émie' 5 +36111 'émon' 5 +36112 'ének' 5 +36113 'énez' 5 +36114 'énom' 5 +36115 'éné' 5 +36116 'éral' 5 +36117 'érer' 5 +36118 'érez' 5 +36119 'éric' 5 +36120 'érie' 5 +36121 'ério' 5 +36122 'éré' 5 +36123 'ésie' 5 +36124 'éső' 5 +36125 'état' 5 +36126 'éter' 5 +36127 'été' 5 +36128 'ével' 5 +36129 'êmes' 5 +36130 'êque' 5 +36131 'êtes' 5 +36132 'être' 5 +36133 'ícia' 5 +36134 'ício' 5 +36135 'ícul' 5 +36136 'ící' 5 +36137 'ígen' 5 +36138 'ília' 5 +36139 'ínez' 5 +36140 'íses' 5 +36141 'ível' 5 +36142 'ître' 5 +36143 'ñana' 5 +36144 'òria' 5 +36145 'ództ' 5 +36146 'ópez' 5 +36147 'ória' 5 +36148 'ório' 5 +36149 'ôtel' 5 +36150 'öder' 5 +36151 'önig' 5 +36152 'öße' 5 +36153 'úmer' 5 +36154 'über' 5 +36155 'ücke' 5 +36156 'ügel' 5 +36157 'ügen' 5 +36158 'ühle' 5 +36159 'ührt' 5 +36160 'üler' 5 +36161 'ület' 5 +36162 'ünst' 5 +36163 'ční' 5 +36164 'ędzy' 5 +36165 'ění' 5 +36166 'ılı' 5 +36167 'ında' 5 +36168 'ını' 5 +36169 'łoż' 5 +36170 'łuż' 5 +36171 'łów' 5 +36172 'ńczy' 5 +36173 'ńska' 5 +36174 'ński' 5 +36175 'ństw' 5 +36176 'ście' 5 +36177 'śnie' 5 +36178 'ště' 5 +36179 'ším' 5 +36180 'ướ' 5 +36181 'ườ' 5 +36182 'ưở' 5 +36183 'ượ' 5 +36184 'ảng' 5 +36185 'ằng' 5 +36186 'ịch' 5 +36187 'ống' 5 +36188 'ồng' 5 +36189 'ụng' 5 +36190 'ứng' 5 +36191 'ững' 5 +36192 '’il' 5 +36193 '’ll' 5 +36194 '’re' 5 +36195 '’ve' 5 +36196 '“No' 5 +36197 '”),' 5 +36198 '”).' 5 +36199 '…..' 5 +36200 '!",' 5 +36201 ':", + "eos_token": "\n\n", + "unk_token": "<|rwkv_tokenizer_end_of_text|>", + "pad_token": "<|rwkv_tokenizer_end_of_text|>" +} diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/tokenizer_config.json b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..7857c40972ab904d10c87b53920213583149e6a4 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/CosyVoice-BlankEN/tokenizer_config.json @@ -0,0 +1,28 @@ +{ + "add_prefix_space": false, + "added_tokens_decoder": { + "0": { + "content": "<|rwkv_tokenizer_end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "auto_map": { + "AutoTokenizer": [ + "hf_rwkv_tokenizer.RwkvTokenizer", + null + ] + }, + "bos_token": "<|rwkv_tokenizer_end_of_text|>", + "pad_token": "<|rwkv_tokenizer_end_of_text|>", + "clean_up_tokenization_spaces": false, + "eos_token": "\n\n", + "model_max_length": 1000000000000000019884624838656, + "tokenizer_class": "RwkvTokenizer", + "unk_token": "<|rwkv_tokenizer_end_of_text|>", + "use_fast": false, + "chat_template": "{{ '<|rwkv_tokenizer_end_of_text|>' }}{% for message in messages %}{% if message['role'] == 'user' %}{{'User: ' + message['content'] + '\n\n'}}{% elif message['role'] == 'system' %}{{'System: ' + message['content'] + '\n\n'}}{% elif message['role'] == 'assistant' %}{{'Assistant: ' + message['content'] + '\n\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}" +} diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/README.md b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/README.md new file mode 100644 index 0000000000000000000000000000000000000000..393a48312a64b7aef635c6fb94cd01e9eb28a196 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/README.md @@ -0,0 +1,14 @@ +--- +language: +- zh +- en +- ko +- ja +base_model: +- fla-hub/rwkv7-1.5B-world +pipeline_tag: text-to-speech +--- +This is TTS model combined with Cosy's FSQ and RWKV Language model. +Please refer : +https://github.com/yynil/RWKVTTS/blob/main/Inference.md +to use this checkpoint. \ No newline at end of file diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/asset/dingding.png b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/asset/dingding.png new file mode 100644 index 0000000000000000000000000000000000000000..9a644005c7b38fd64597c1eadfc6c708973e9a94 Binary files /dev/null and b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/asset/dingding.png differ diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/campplus.onnx b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/campplus.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7b08523b2e28e437cfb1a0312723a5ab0bac287e --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/campplus.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6ac6a63997761ae2997373e2ee1c47040854b4b759ea41ec48e4e42df0f4d73 +size 28303423 diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/configuration.json b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/configuration.json new file mode 100644 index 0000000000000000000000000000000000000000..5e812fae901c12933ac69ebf3eb79d0eb49bbab4 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/configuration.json @@ -0,0 +1 @@ +{"framework":"Pytorch","task":"text-to-speech"} \ No newline at end of file diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/cosyvoice.yaml b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/cosyvoice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fff0fb30113dcd820a4c68fee4f971103ac9bf99 --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/cosyvoice.yaml @@ -0,0 +1,116 @@ +# set random seed, so that you may reproduce your result. +__set_seed1: !apply:random.seed [1986] +__set_seed2: !apply:numpy.random.seed [1986] +__set_seed3: !apply:torch.manual_seed [1986] +__set_seed4: !apply:torch.cuda.manual_seed_all [1986] + +# fixed params +sample_rate: 24000 +llm_input_size: 2048 +llm_output_size: 2048 +spk_embed_dim: 192 +qwen_pretrain_path: '' + +# model params +# for all class/function included in this repo, we use ! or ! for intialization, so that user may find all corresponding class/function according to one single yaml. +# for system/third_party class/function, we do not require this. +llm: !new:model.llm.llm.RWKV7LM + llm_input_size: !ref + llm_output_size: !ref + speech_token_size: 6561 + length_normalized_loss: True + lsm_weight: 0 + vocab_size: 65548 + llm: !ref + sampling: !name:cosyvoice.utils.common.ras_sampling + top_p: 0.8 + top_k: 25 + win_size: 10 + tau_r: 0.1 + +flow: !new:cosyvoice.flow.flow.CausalMaskedDiffWithXvec + input_size: 512 + output_size: 80 + spk_embed_dim: !ref + output_type: 'mel' + vocab_size: 6561 + input_frame_rate: 25 + only_mask_loss: True + token_mel_ratio: 2 + pre_lookahead_len: 3 + encoder: !new:cosyvoice.transformer.upsample_encoder.UpsampleConformerEncoder + output_size: 512 + attention_heads: 8 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.1 + normalize_before: True + input_layer: 'linear' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + input_size: 512 + use_cnn_module: False + macaron_style: False + decoder: !new:cosyvoice.flow.flow_matching.CausalConditionalCFM + in_channels: 240 + n_spks: 1 + spk_emb_dim: 80 + cfm_params: !new:omegaconf.DictConfig + content: + sigma_min: 1e-06 + solver: 'euler' + t_scheduler: 'cosine' + training_cfg_rate: 0.2 + inference_cfg_rate: 0.7 + reg_loss_type: 'l1' + estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder + in_channels: 320 + out_channels: 80 + causal: True + channels: [256] + dropout: 0.0 + attention_head_dim: 64 + n_blocks: 4 + num_mid_blocks: 12 + num_heads: 8 + act_fn: 'gelu' + +hift: !new:cosyvoice.hifigan.generator.HiFTGenerator + in_channels: 80 + base_channels: 512 + nb_harmonics: 8 + sampling_rate: !ref + nsf_alpha: 0.1 + nsf_sigma: 0.003 + nsf_voiced_threshold: 10 + upsample_rates: [8, 5, 3] + upsample_kernel_sizes: [16, 11, 7] + istft_params: + n_fft: 16 + hop_len: 4 + resblock_kernel_sizes: [3, 7, 11] + resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + source_resblock_kernel_sizes: [7, 7, 11] + source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + lrelu_slope: 0.1 + audio_limit: 0.99 + f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor + num_class: 1 + in_channels: 80 + cond_channels: 512 + +# processor functions +get_tokenizer: !name:utils.utilities.get_tokenizer + model_dir: !ref +allowed_special: 'all' +feat_extractor: !name:matcha.utils.audio.mel_spectrogram + n_fft: 1920 + num_mels: 80 + sampling_rate: !ref + hop_size: 480 + win_size: 1920 + fmin: 0 + fmax: 8000 + center: False diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.encoder.fp16.zip b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.encoder.fp16.zip new file mode 100644 index 0000000000000000000000000000000000000000..1fa00a566e360c9fe08b5714cc6a46d18df5f1db --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.encoder.fp16.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d2539ad8bdb90026cd50cb42e45bd389f10108111d742b912feddca105aeb6 +size 116703414 diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/hift.pt b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/hift.pt new file mode 100644 index 0000000000000000000000000000000000000000..783dc8c987ad378f69fea694e9bfdb8c17f3062e --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/hift.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d4af0d661a416c69544eec83ff9c070dc80c37ee53ef44af3a37d910c95bc21 +size 83364158 diff --git a/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/spk2info.pt b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/spk2info.pt new file mode 100644 index 0000000000000000000000000000000000000000..a657663bf78b17d759803d4e4329c4a9c041f8be --- /dev/null +++ b/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/spk2info.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbc8f9064db35ee8163b538c0f6ed9fe0c3e2fe0f560cca910e578138d961285 +size 3281245 diff --git a/Inference.md b/Inference.md new file mode 100644 index 0000000000000000000000000000000000000000..b34d82c58a681fff414fc8a34f356701e3fdd62e --- /dev/null +++ b/Inference.md @@ -0,0 +1,98 @@ +# Install the code base and the dependencies +```bash +git clone https://github.com/yynil/RWKVTTS +``` +Add these two directories to the PYTHONPATH +```bash +export PYTHONPATH=$PYTHONPATH:/home/user/RWKVTTS:/home/user/RWKVTTS/third_party +``` +# Install the dependencies +```bash +conda create -n rwkvtts-311 -y python=3.11 +conda activate rwkvtts-311 +conda install -y -c conda-forge pynini==2.1.6 +cd RWKVTTS +pip install -r rwkvtts_requirements.txt +``` + +Download the pretrained models from the following links: +https://huggingface.co/yueyulin/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO + +Place the CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO to local directory. Let's say /home/user/CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO + +Add two directories to the PYTHONPATH + +The example code for inference is as follows: +```python +def do_tts(tts_text,prompt_texts,cosyvoice): + import logging + for i, (prompt_audio_file, prompt_text) in enumerate(zip(prompt_audios, prompt_texts)): + logging.info(f'Processing {prompt_text}') + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + with torch.no_grad(): + if prompt_text is not None: + for j, k in enumerate(cosyvoice.inference_zero_shot(tts_text,prompt_text, prompt_speech_16k, stream=False,speed=1)): + torchaudio.save('zero_{}_{}.wav'.format(i, j), k['tts_speech'], cosyvoice.sample_rate) + else: + for j, k in enumerate(cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=False,speed=1)): + torchaudio.save('zero_{}_{}.wav'.format(i, j), k['tts_speech'], cosyvoice.sample_rate) + logging.info(f'Finished processing {prompt_text}') +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + is_flow_only = sys.argv[3]=='True' if len(sys.argv) > 3 else False + print(f'is_flow_only: {is_flow_only}') + cosyvoice = CosyVoice2(model_path,device=device,fp16=False,load_jit=False) + + from cosyvoice.utils.file_utils import load_wav + import torchaudio + prompt_audios = [ + '/home/yueyulin/github/RWKVTTS/zero_shot_prompt.wav', + '/home/yueyulin/github/RWKVTTS/mine.wav', + '/home/yueyulin/github/RWKVTTS/new.wav', + '/home/yueyulin/github/RWKVTTS/Trump.wav', + ] + + if not is_flow_only: + prompt_texts = [ + '希望你以后做的比我还好呦。', + '少年强则中国强。', + '我随便说一句话,我喊开始录就开始录。', + 'numbers of Latino, African American, Asian American and native American voters.' + ] + else: + prompt_texts = [ + None, + None, + None, + None + ] + do_tts('Make America great again!',prompt_texts,cosyvoice) +``` +More examples can be found in the model/test directory. + +[Instruct example](model/test/test_instructed.py) is an example to use the instructed voice flow to generate the audio. +[Embedded ref voice example](model/test/test_speaker_adapter.py) is an example to use the speaker adapter to generate the audio. + +Please refer the [Service Call URL](service/README.md) for the instructions and reference voices. + +If you pass the prompt_texts as None, the engine will only clone the voice flow and texture which is good to clone voice cross lingual. If you pass the correct prompt texts to the engine, the engine will try to continue to finish the audio tokens following the prompt audio you provided. This will be good to continue the audio you provided but it will be weird when you try to mix languages. + +The test source code is [test code](model/test/test_initialize.py). + +Please change the paths to the correct paths in your system. + +You can also use your own prompt audio and text. Since the llm module is to finish your audio tokens for you, so please make sure the audio is clean,complete and the text is correct. Otherwise, the result may not be good. + +The following table shows the example results of the above code: +| Prompt Audio | Prompt Text | TTS Text | Result | +| --- | --- | --- | --- | +| https://github.com/yynil/RWKVTTS/raw/main/zero_shot_prompt.wav | 希望你以后做的比我还好呦。 | 中国在东亚,是世界上最大的国家,也是世界上人口最多的国家。 | https://github.com/yynil/RWKVTTS/raw/main/zero_0_0.wav | +| https://github.com/yynil/RWKVTTS/raw/main/mine.wav| 少年强则中国强。 | 中国在东亚,是世界上最大的国家,也是世界上人口最多的国家。 | https://github.com/yynil/RWKVTTS/raw/main/zero_1_0.wav | +| https://github.com/yynil/RWKVTTS/raw/main/new.wav | 我随便说一句话,我喊开始录就开始录。 | 中国在东亚,是世界上最大的国家,也是世界上人口最多的国家。 | https://github.com/yynil/RWKVTTS/raw/main/zero_2_0.wav | diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 7be5fc7f47d5db027d120b8024982df93db95b74..a395fd562842e8b0ec7a5badab5e178d0141fb22 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,181 @@ ---- -license: mit ---- +# RWKVTTS +This project is to train an RWKV LLM for TTS generation which compatible to other TTS engine(like fish/cosy/chattts). + +For most of modern LLM based TTS engine, there are two parts : +1. VQ VAE: this model is to encode audio to audio tokens and decode audio tokens to audio. +2. LLM: this model is to generate audio tokens using text tokens and the prompt audio tokens. The prompt audio tokens are also from VQ VAE. + +Typically the training of the LLM based TTS involves VQ-VAE training and LLM training, like CosyTTS, ChatTTS and FishTTS. However we focus to train an RWKV LLM model to replace the LLM part in these TTS engines. + +```mermaid +flowchart TB + node_1[["Input Prompt Text"]] + node_2(["Text Tokenizer"]) + node_3(["Audio Tokenizer(VQ)"]) + node_4[["Input Reference Audio"]] + node_5[["Text Tokens"]] + node_6[["Audio Tokens"]] + node_7(["Text Embedder"]) + node_8(["Audio Embedder"]) + node_9[["Text Embeddings"]] + node_10[["Audio Embeddings"]] + node_11(["Concatenate Embeddings"]) + node_12[["Input Embeddings"]] + node_13{{"Language Model"}} + node_14[["Hidden States"]] + node_15(["Audio Head"]) + node_16{"Continue to decode?"} + node_17(["Next Step Input"]) + node_18(["Finish Decode"]) + node_1 --> node_2 + node_4 --> node_3 + node_2 --> node_5 + node_3 --> node_6 + node_5 --> node_7 + node_6 --> node_8 + node_7 --> node_9 + node_8 --> node_10 + node_9 --> node_11 + node_10 --> node_11 + node_11 --> node_12 + node_12 --> node_13 + node_13 --> node_14 + node_14 --> node_15 + node_15 --> node_16 + node_16 --"Yes"--> node_17 + node_17 --> node_13 + node_16 --"No"--> node_18 +``` + +Different TTS engines might have different data layout and different special control token, so we need to prepare different data and train a RWKV LLM model for each TTS engine. + +# Process to train LLM for different TTS engine + +## Cosy 2.0 + +### Cosy 2.0 Data Layout + +The layout of Cosy 2.0 for LLM: + +```mermaid + +flowchart LR + node_1[["SOS Embeddings"]] + node_2[["Text Embeddings"]] + node_3[["Task ID Embedings"]] + node_4[["Audio Embeddings"]] + node_5[["Last Audio Embeddings"]] + node_1 --- node_2 + node_2 --- node_3 + node_3 --> node_4 + node_4 --> node_5 + +``` + +The forward of LLM for cosy 2.0: +```mermaid +graph TD + A[Input: batch] --> B[Extract tokens and lengths] + B --> C1[Prepare LLM Target] + B --> C2[Encode Text Tokens] + B --> C3[Generate SOS/EOS and Task ID Embeddings] + B --> C4[Encode Speech Tokens] + + C1[Prepare LLM Target] --> D1["Create target sequence for each sample
[IGNORE_ID, ..., speech_tokens, EOS]"] + D1 --> D2[Pad and move target to device] + + C2[Encode Text Tokens] --> E1[Apply text_embedding layer] + + C3[Generate SOS/EOS and Task ID Embeddings] --> F1[Get SOS/EOS embeddings from llm_embedding] + C3 --> F2[Get task_id embeddings from llm_embedding] + + C4[Encode Speech Tokens] --> G1[Apply speech_embedding layer] + + E1 --> H[Unpad and pad sequence] + F1 --> H + F2 --> H + G1 --> H + + H --> I1[Generate LM input] + H --> I2[Create attention mask] + + I1 --> J[Run LLM forward pass] + I2 --> J + + J --> K[Extract hidden states] + K --> L[Generate logits through llm_decoder] + + D2 --> M[Compute loss and accuracy] + L --> M + + M --> N[Return loss and accuracy] +``` + +There are some points to note for Cosy 2.0: +1. The prompt audio tokens are used to act reference audio, LLM will generate audio tokens mimic the reference audio. +2. '<|endofprompt|>' is used for prompt text, it is a special token to indicate this prompt is an instruction. + +### Cosy 2.0 Data Preparation + +1. Download reference audio files from https://huggingface.co/datasets/yueyulin/TTS_Reference and put them to folder $REF_AUDIO_DIR. These audios are used to generate audio tokens. +2. Download Cosy 2.0-0.5B model from https://huggingface.co/FunAudioLLM/CosyVoice2-0.5B and put it to folder $MODEL_DIR. +3. Clone the Cosy 2.0 repo from:https://github.com/yynil/CosyVoice and follow the instruction to install the environment. In this repository, I change the codes to allow user to specify cuda device for multiple processes generation. If you have installed torch 2.6, please remember to force triton downgrading to 3.1.0. +4. Prepare the text data for audio tokens's training dataset. Currently we support parquet files and jsonl files. The text field is the only required field in the data file. I download the parquet from [wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia) for Chinese and Engish parquet files. +5. Generate the audio tokens using the following command: +```bash +bash run_multiple_process.sh --parquet_files /home/yueyulin/data/wiki/zh/train-00000-of-00006.parquet /home/yueyulin/data/wiki/zh/train-00001-of-00006.parquet /home/yueyulin/data/wiki/zh/train-00002-of-00006.parquet /home/yueyulin/data/wiki/zh/train-00003-of-00006.parquet /home/yueyulin/data/wiki/zh/train-00004-of-00006.parquet /home/yueyulin/data/wiki/zh/train-00005-of-00006.parquet --language zh --prompts_dir extract_data/prompts/zh --device cuda:0 --output_dir /home/yueyulin/data/speech_corpus +``` +The prompts_dir is the $REF_AUDIO_DIR, the parquet_files are the list of files downloaded from wikimedia, each file is processed by one file. In my experience, one 4090 can process 6 files at the same time. The output_dir is the dirctory that audio tokens generated and saved. + + +### Cosy 2.0 LLM Training +After data is generated and saved, we will get the JSONL files like : +```json +{"text": "甄别重点监测企业是确保监测数据全面性和代表性的基础。首先,需要根据预警机制的覆盖范围和目标,明确监测企业的选择标准。选择标准可以包括企业规模、市场份额、行业影响力等。其次,通过企业调查、行业协会推荐等方式,初步筛选出符合条件的潜在监测企业。", "tts_speech_tokens": [2031, 4137, 6405, 6405, 6405, 6405, 6405, 6324, 6324, 6324, 6324, 6324, 6324, 4218, 1761, 4509, 2333, 4483, 5934, 6258, 1929, 3482, 314, 2300, 957, 5163, 6309, 5064, 6425, 3992, 1932, 80, 305, 734, 1479, 5650, 2472, 4778, 4487, 6175, 5667, 5373, 2187, 4851, 137, 141, 4919, 4407, 2436, 1295, 2024, 1294, 4940, 4778, 2330, 764, 1762, 2031, 1788, 5943, 5319, 5238, 5338, 3872, 1614, 4920, 6055, 6027, 3084, 5343, 4605, 2330, 218, 2172, 572, 1949, 1331, 865, 4921, 2472, 4688, 4379, 5850, 6342, 6373, 2997, 2529, 5087, 623, 3700, 6292, 6291, 5823, 5830, 2102, 1041, 6225, 6316, 3887, 889, 5487, 3813, 1626, 953, 734, 909, 4314, 4804, 4821, 4463, 23, 4683, 4678, 2724, 4832, 992, 1238, 2673, 324, 2099, 2486, 135, 2001, 4537, 5271, 2519, 957, 1699, 953, 1304, 1028, 4752, 2553, 5560, 4154, 1287, 59, 879, 4921, 2499, 5748, 5019, 240, 5889, 6264, 4293, 2186, 2105, 2005, 6405, 6405, 6324, 6324, 6324, 4137, 4218, 3651, 6048, 3132, 1433, 1457, 3962, 4515, 2482, 4490, 4561, 4669, 6054, 6270, 6316, 4615, 4781, 575, 632, 2031, 183, 4598, 4479, 6181, 5496, 4128, 3887, 1943, 1861, 6288, 5343, 6072, 3319, 2733, 322, 1187, 1727, 1807, 4921, 4677, 5668, 5019, 2427, 2976, 6066, 5332, 63, 73, 380, 4239, 6534, 6543, 5101, 1452, 213, 5921, 2273, 6453, 4347, 4537, 4459, 11, 2124, 866, 386, 485, 2511, 333, 632, 4317, 5772, 5803, 1457, 2163, 889, 5021, 2381, 5675, 5056, 5092, 1951, 3888, 3645, 4218, 6405, 6324, 4137, 1884, 1646, 2726, 377, 3992, 5529, 2481, 6054, 3822, 5340, 2330, 71, 2733, 2499, 5012, 4463, 5850, 6342, 6373, 2268, 4851, 137, 151, 4921, 4435, 4650, 528, 1295, 1295, 2023, 2753, 4850, 4570, 2243, 1047, 56, 113, 4512, 5568, 1662, 971, 5, 1480, 6387, 1045, 65, 460, 2160, 5102, 4568, 5056, 5098, 1602, 6048, 4367, 956, 59, 1524, 6405, 6405, 6324, 6324, 6324, 6324, 6324, 4137, 2031, 2706, 5325, 1653, 3887, 2219, 3667, 5664, 803, 4592, 2163, 5587, 4598, 5026, 5089, 1692, 5976, 1937, 146, 41, 1507, 1950, 2031, 0, 2349, 343, 4607, 5019, 566, 1683, 2166, 5051, 5678, 5057, 5830, 573, 2835, 2856, 5099, 707, 947, 1113, 4675, 4408, 4623, 1294, 2024, 2023, 3481, 4778, 2411, 1208, 1302, 660, 5827, 5345, 5074, 4560, 6501, 1403, 635, 716, 680, 5057, 4970, 1947, 3645, 1458, 1707, 6024, 6049, 5238, 5340, 1696, 5244, 1468, 1946, 509, 1318, 6534, 2800, 4510, 2234, 1991, 2017, 2018, 1370, 470, 2891, 4997, 1972, 1701, 5832, 1458, 1950, 4860, 5589, 1946, 1949, 509, 5369, 4966, 5019, 4849, 2411, 314, 1293, 1267, 377, 6421, 4800, 4416, 4893, 8, 1946, 1967, 1584, 4615, 5019, 2510, 867, 63, 245, 533, 1991, 4218, 6405, 6405, 6324, 6324, 6324, 6324, 6324, 4137, 1950, 4920, 4516, 276, 2024, 4777, 4194, 6373, 5643, 4851, 4448, 65, 1517, 1978, 4218, 6405, 4218, 2112, 1350, 4860, 5074, 5772, 6262, 672, 5097, 5090, 221, 1032, 4675, 4408, 285, 1295, 1294, 557, 4490, 228, 276, 4858, 4807, 2870, 1675, 6051, 1539, 4141, 1946, 4133, 6320, 4699, 982, 1950, 5832, 5835, 3645, 1947, 5589, 5589, 4136, 1946, 1235, 4642, 4993, 4857, 4598, 62, 4431, 4675, 285, 1043, 314, 2414, 2760, 2850, 5094, 3158, 1214, 1032, 2997, 2763, 5345, 5100, 402, 4677, 4857, 4543, 5, 1482, 2004, 56, 515, 1970, 2077, 6534, 3488, 5591, 5690, 5869, 5319, 2331, 5342, 1688, 1679, 1735, 4218, 6324, 6324, 6405, 4218, 2031, 5886, 6291, 6480, 2883, 5829, 5826, 2175, 5799, 5826, 2186, 2183, 5940, 5322, 120, 5918, 4571, 4687, 3813, 962, 737, 1561, 5886, 4077, 1429, 5831, 6560, 3644, 6429, 6507, 6534, 2101, 2186, 5097, 2682, 2673, 2017, 2576, 4594, 1005, 4785, 2760, 854, 1946, 683, 4844, 2733, 4695, 4840, 2192, 1482, 72, 29, 788, 1761, 4921, 4408, 2517, 566, 35, 2192, 5934, 4209, 5652, 4537, 5920, 278, 160, 3462, 4686, 5021, 4490, 5853, 3912, 6374, 2997, 4716, 2567, 140, 3462, 4435, 2436, 1295, 1295, 2023, 3482, 4769, 4598, 89, 1736, 4218, 6405, 6405, 6324, 6324, 4137], "prompt_text": "那么就在两侧的象限同时忙碌。", "llm_prompt_speech_token": [3686, 6324, 4137, 1959, 3666, 4376, 2836, 2127, 578, 2441, 1041, 2337, 6073, 3560, 1369, 5650, 4691, 5192, 2924, 89, 1687, 1539, 4218, 1848, 160, 4760, 2825, 1463, 1946, 1223, 1313, 2067, 5648, 2997, 2268, 2277, 4842, 4763, 308, 1038, 140, 842, 2983, 4672, 4650, 4696, 5995, 5603, 1238, 1238, 4672, 4650, 4777, 2474, 8, 767, 1731, 4299, 2079, 4941, 4947, 665, 719, 4319, 6424, 5067, 5967, 6048, 5967, 5238, 1523, 3875, 3872, 4314, 661, 1946, 1217, 500, 6422, 1506, 4852, 5831, 1457, 1448]} +{"text": "Once all the Cabinet and Cabinet-level officers have been invested, the act of their investiture usually ends with a \"family photo\" of the new Administration around the new president and vice-president. For this photo, the new ministers' alignment and proximity to the president is dictated by the order of precedence, with the ministers who head older departments standing in the first row, and the heads of the newer departments standing in the back rows. Some departments, such as the Department of Defence, take precedence from prior departments now abolished.", "tts_speech_tokens": [764, 35, 1896, 4299, 6486, 4299, 4299, 4299, 4218, 651, 2112, 2131, 1403, 2792, 2207, 1725, 5401, 281, 575, 683, 4997, 3474, 4492, 195, 87, 5109, 5846, 6077, 2270, 2172, 3828, 4424, 4543, 1520, 1753, 6258, 4075, 141, 5109, 5845, 3647, 1188, 3987, 3750, 4414, 1516, 4180, 5014, 5348, 1441, 6534, 5075, 5100, 1274, 1301, 3569, 3488, 3996, 6183, 4752, 4919, 2328, 3158, 6071, 5264, 5482, 5403, 5844, 5837, 191, 2139, 1839, 2255, 831, 4508, 4576, 6255, 1857, 29, 2, 2228, 5482, 6459, 2004, 2253, 2267, 2255, 885, 2112, 1788, 5916, 5835, 5919, 5919, 5919, 4056, 4299, 2058, 2982, 1295, 305, 1463, 3647, 2383, 2112, 3054, 4603, 3043, 4272, 2260, 4841, 6029, 6062, 5329, 6256, 6465, 2386, 2921, 2204, 4429, 5647, 2085, 2490, 809, 159, 546, 5325, 5298, 917, 1688, 3863, 3872, 3884, 3481, 3480, 4130, 5993, 5979, 5322, 5257, 5634, 4691, 4533, 5100, 1277, 764, 5111, 5, 47, 3748, 4929, 2376, 3583, 2990, 6456, 2232, 2306, 6507, 6210, 4463, 5840, 2270, 4071, 5693, 4663, 5100, 5226, 6510, 6534, 2900, 2567, 137, 882, 1199, 2831, 632, 389, 4251, 4191, 73, 49, 3831, 404, 971, 4853, 4613, 4074, 4314, 2417, 3750, 4507, 4416, 4594, 3624, 5325, 962, 224, 404, 5295, 4596, 2238, 3670, 3848, 4339, 1676, 812, 2441, 6097, 3934, 2261, 3750, 1564, 3401, 6074, 5823, 1383, 4293, 3816, 3734, 2219, 4450, 5482, 2996, 150, 3063, 143, 3019, 3667, 149, 3748, 4278, 4347, 3485, 5270, 4858, 5239, 2568, 2028, 4050, 3011, 32, 2264, 4672, 2991, 888, 804, 149, 2234, 5934, 1744, 2112, 3975, 5916, 5943, 5919, 5943, 5919, 5946, 5916, 3972, 4299, 6402, 6534, 1927, 140, 1038, 2263, 4567, 4413, 5563, 4672, 3999, 6264, 4826, 2810, 2567, 228, 227, 2324, 2504, 1773, 6375, 77, 3831, 754, 3401, 4612, 6498, 4311, 2411, 831, 2255, 4414, 5320, 4920, 2328, 5345, 5169, 4752, 4763, 5014, 6449, 2687, 3413, 3647, 2276, 3670, 4069, 1883, 2330, 4499, 1525, 1762, 1490, 2921, 1639, 2166, 4050, 4304, 2837, 732, 6049, 5405, 2266, 910, 4315, 2399, 798, 4859, 4857, 1923, 4434, 4485, 5152, 4206, 4447, 1917, 2136, 3807, 3740, 5, 2264, 5166, 5409, 806, 2982, 878, 2258, 860, 1525, 1762, 3320, 5169, 2166, 546, 2994, 4526, 4056, 2112, 60, 2274, 2528, 5084, 231, 4450, 4597, 1938, 2163, 650, 5108, 2335, 4188, 4859, 1760, 2096, 2903, 4349, 1684, 873, 3872, 6059, 6058, 5976, 4299, 2136, 4050, 3740, 2, 4432, 6455, 2226, 886, 3063, 881, 71, 2234, 5937, 5650, 5238, 4296, 1422, 2342, 2139, 3462, 2261, 1641, 4314, 230, 186, 2965, 4523, 4509, 4999, 4839, 5345, 6070, 5263, 4839, 3813, 3018, 5825, 2926, 5106, 2924, 194, 147, 1433, 728, 2915, 477, 2325, 5330, 6070, 1527, 2421, 2166, 3564, 6166, 1865, 1676, 2092, 4068, 2255, 1483, 5658, 5726, 2085, 3219, 71, 35, 2219, 3828, 2210, 5047, 6100, 4526, 2934, 3909, 4511, 6453, 6534, 3367, 3863, 3146, 5241, 5323, 6054, 1872, 3881, 947, 380, 632, 2909, 2884, 4296, 5913, 5835, 5919, 5919, 5919, 5838, 3975, 2112, 3648, 2192, 831, 3906, 2222, 5118, 5111, 4487, 879, 5650, 4422, 5256, 6465, 4446, 4522, 3831, 2294, 5588, 5825, 3377, 6050, 1698, 147, 1920, 1404, 6328, 1622, 1676, 2083, 2124, 2336, 3669, 5402, 4269, 2490, 71, 8, 113, 1563, 395, 4238, 2510, 3016, 3936, 4430, 2163, 461, 5192, 5998, 5272, 1869, 651, 4302, 1685, 221, 380, 389, 803, 5412, 4753, 2244, 2028, 3648, 3729, 5916, 5919, 5916, 3732, 3975, 2112, 3894, 5239, 5648, 2250, 2918, 4807, 6258, 879, 4600, 2166, 3483, 6327, 6239, 1652, 1757, 1881, 128, 2264, 5935, 5631, 5729, 5482, 2198, 2309, 1329, 4756, 2263, 4448, 4437, 6454, 4272, 3465, 157, 66, 954, 2166, 5598, 3980, 3836, 1838, 2064, 4069, 2371, 2938, 4565, 4356, 789, 4612, 5940, 6510, 3270, 5, 737, 8, 2234, 3747, 5650, 5482, 4269, 303, 2193, 2447, 4849, 2112, 2085, 4050, 3739, 2192, 4428, 5486, 2253, 885, 2992, 2249, 5205, 3453, 4672, 6186, 6534, 6059, 4068, 2184, 4320, 3978, 4052, 1622, 926, 3140, 231, 157, 2160, 1404, 6084, 3809, 1598, 2092, 6255, 2234, 3750, 5405, 3459, 3669, 23, 1463, 974, 2675, 2891, 2166, 712, 5030, 5023, 5080, 2741, 308, 32, 2203, 5217, 4593, 1437, 303, 2112, 3975], "prompt_text": " So I am gonna do this right now. So let's do it.", "llm_prompt_speech_token": [1822, 5727, 5000, 930, 5015, 2912, 3616, 692, 1250, 1978, 4214, 3485, 2036, 1298, 2918, 5192, 5056, 5074, 5065, 4813, 3005, 3002, 3313, 4238, 795, 4523, 4520, 3038, 4496, 859, 1887, 2490, 3309, 6235, 5264, 6074, 6047, 5339, 5474, 4291, 2915, 2666, 3759, 4056, 4299, 3975, 6159, 6186, 6186, 6186, 5838, 5109, 3732, 2112, 2139, 3945, 4534, 4569, 4575, 6453, 5405, 4461, 4338, 5572, 3809, 2411, 1214, 1205, 3805, 4526, 4379, 2189, 3890, 3242, 1418, 2876, 5828, 2799, 5133, 5563, 5481, 2325, 155, 533, 2801, 3617, 725, 56, 4385, 834, 3444, 5482, 3273, 2166, 2328, 1908, 1372, 868]} +``` + +We use Deepspeed to train the model: +```bash +deepspeed --num_nodes 1 --num_gpus 4 train_scripts/train_llm.py --data_file /external_data/yueyudata/speech_corpus/ --model_name /external_data/models/rwkv7-1.5B-world/ --output_dir /external_data/yueyudata/cosy_voice_llm --max_length 2048 --wandb_project toy_cosy_llm --wandb_run_name server2_rwkv_7_1.5B --ds_param_offload True --ds_optimizer_offload True --ds_stage 2 --gradient_checkpointing True --logging_steps 10 --per_device_train_batch_size 8 +``` +The base model can be downloaded from https://huggingface.co/collections/fla-hub/rwkv7-6790fd37b4b6137b088a0d8a , just choose a proper model for your training. + + +### Cosy 2.0 LLM Inference + +### Some samples + +#### Zero shot inference +prompt audio : +[prompt audio](mine.wav) + +prompt text: "今天天气挺不错的。" + +tts text: "收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。" + +tts audio: +[tts audio](zero_shot_0.wav) + + + +### TODO: +0. Drop prompt audio tokens randomly to simulate unconditional guided generation. +1. Add special control tokens in Cosy 2.0 in RWKV tokenizer and add them to generate audio tokens again: +```python + special_tokens = { + 'eos_token': '<|endoftext|>', + 'pad_token': '<|endoftext|>', + 'additional_special_tokens': [ + '<|im_start|>', '<|im_end|>', '<|endofprompt|>', + '[breath]', '', '', '[noise]', + '[laughter]', '[cough]', '[clucking]', '[accent]', + '[quick_breath]', + "", "", + "[hissing]", "[sigh]", "[vocalized-noise]", + "[lipsmack]", "[mn]" + ] + } +``` +2. Add special control tokens like dialects in RWKV7LM and generate audio tokens for training. +3. Implement streaming generation for Cosy 2.0 in RWKV7LM. diff --git a/Trump.wav b/Trump.wav new file mode 100644 index 0000000000000000000000000000000000000000..9940287ab6f0474d15f9d45ebb8ad92e7b69450a --- /dev/null +++ b/Trump.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:296432bb06954080b77c04a88841d61928d936077f5162947359520fa17836be +size 342108 diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..5189eb447b1df77f2fb79ef6e0f842c5010388ba --- /dev/null +++ b/_config.yml @@ -0,0 +1,3 @@ +markdown: kramdown +kramdown: + parse_block_html: true \ No newline at end of file diff --git a/another.wav b/another.wav new file mode 100644 index 0000000000000000000000000000000000000000..582b6b850ebf5c9c102d444c85534b3cfce2dbf7 --- /dev/null +++ b/another.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4d103efaf538db967559861dbcf9995b60eca582360a6add5cf27c3faf3a49e +size 199724 diff --git a/badXT_71.wav b/badXT_71.wav new file mode 100644 index 0000000000000000000000000000000000000000..c43b58911ead99e2451cb696af4d22b4c16508da --- /dev/null +++ b/badXT_71.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c5e28420eb8c4506a1988d484fe9270b8422161d733c567abfccd74c106ceb9 +size 794726 diff --git a/data/cosy/data/data_processor.py b/data/cosy/data/data_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..1dcd1db3bf92e2f07bc97cbc1c07ee3446b225e9 --- /dev/null +++ b/data/cosy/data/data_processor.py @@ -0,0 +1,128 @@ +from pyexpat import model +import torchaudio +from hyperpyyaml import load_hyperpyyaml +import os +from cosyvoice.cli.frontend import CosyVoiceFrontEnd +from cosyvoice.cli.cosyvoice import CosyVoice2 +import json +import torch + +def load_from_configuration(model_dir): + with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f: + configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')}) + return configs +def init_process(model_dir,device): + cosyvoice = CosyVoice2(model_dir, load_jit=False, load_trt=False, fp16=True,device=device) + # configs = load_from_configuration(model_dir) + # frontend = CosyVoiceFrontEnd(configs['get_tokenizer'], + # configs['feat_extractor'], + # '{}/campplus.onnx'.format(model_dir), + # '{}/speech_tokenizer_v2.onnx'.format(model_dir), + # '{}/spk2info.pt'.format(model_dir), + # configs['allowed_special'], + # device) + frontend = cosyvoice.frontend + llm = cosyvoice.model.llm + return frontend,llm,cosyvoice + + +def preprocess_prompts(frontend,prompts_dir): + language_results = {} + final_rate = 24000 + for root, dirs, files in os.walk(prompts_dir): + for file in files: + if file.endswith('.json'): + json_file = os.path.join(root, file) + print(f"处理文件 {json_file}") + language = json_file.split('/')[-2] + if language not in language_results: + language_results[language] = [] + + # 尝试不同的编码格式读取文件 + try: + with open(json_file, 'r', encoding='utf-8') as f: + json_data = json.load(f) + except UnicodeDecodeError: + try: + # 尝试 GB2312/GBK 编码 (常用于中文) + with open(json_file, 'r', encoding='gbk') as f: + json_data = json.load(f) + except UnicodeDecodeError: + try: + # 尝试 GB18030 编码 (扩展的中文编码) + with open(json_file, 'r', encoding='gb18030') as f: + json_data = json.load(f) + except Exception as e: + print(f"无法读取文件 {json_file}: {e}") + continue + + wav_file = json_file.replace('.json', '.wav') + prompt_text = json_data['text'] + prompt_speech = torchaudio.load(wav_file, backend='soundfile')[0] + fake_tts_text = "a" + with torch.no_grad(): + model_input = frontend.frontend_zero_shot(fake_tts_text, prompt_text, prompt_speech,final_rate) + language_results[language].append((model_input,prompt_text)) + return language_results + +def generate_speech_tokens(llm,frontend,tts_text,model_input,device): + tts_text = frontend.text_normalize(tts_text,split=False, text_frontend=True) + tts_text_token, tts_text_token_len = frontend._extract_text_token(tts_text) + tts_text_token_len = torch.tensor([tts_text_token.shape[1]], dtype=torch.int32).to(device) + prompt_text = model_input['prompt_text'].to(device) + prompt_text_len = torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(device) + llm_prompt_speech_token = model_input['llm_prompt_speech_token'].to(device) + prompt_speech_token_len = torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(device) + flow_prompt_speech_token = model_input['flow_prompt_speech_token'].to(device) + prompt_speech_feat = model_input['prompt_speech_feat'].to(device) + llm_embedding = model_input['llm_embedding'].to(device) + flow_embedding = model_input['flow_embedding'].to(device) + speech_tokens = [] + for i in llm.inference(text = tts_text_token, + text_len = tts_text_token_len, + prompt_text = prompt_text, + prompt_text_len = prompt_text_len, + prompt_speech_token = llm_prompt_speech_token, + prompt_speech_token_len = prompt_speech_token_len, + embedding=llm_embedding + ): + speech_tokens.append(i) + tts_speech_tokens = torch.tensor(speech_tokens).unsqueeze(dim=0).to(device) + return tts_speech_tokens + +if __name__ == '__main__': + model_dir = '/data/yueyu/models/CosyVoice2-0.5B' + prompts_dir = 'extract_data/prompts' + + device = 'cuda:0' + frontend,llm,cosyvoice = init_process(model_dir + ,device) + prompts = preprocess_prompts(frontend,prompts_dir) + print(prompts) + model_input = prompts['zh'][0][0] + prompt_text = prompts['zh'][0][1] + tts_text = '扫一扫,立即体验中国银行信用卡好礼、绑卡立减等热门活动,实时掌握更多优惠信息。' + tts_text = '在中国的一个偏远山区,有一位名叫李远的年轻人,他对集群通信系统有着浓厚的兴趣。每天晚上,他都会在自己的小屋里研究各种关于集群通信系统的资料,试图弄懂其中的原理和运作机制。他对这个领域的研究不仅仅停留在理论层面,还亲手制作了一些模型,试图通过实践来加深理解。' + tts_text = "歷史(现代汉语词汇,古典文言文称之为史),指人类社会过去的事件和行动,以及对这些事件行为有系统的记录、诠释和研究。歷史可提供今人理解過去,作為未來行事的參考依據,与伦理、哲学和艺术同属人类精神文明的重要成果。历史的第二个含义,即对过去事件的记录和研究,又称历史学”,或简称“史学”。隶属于历史学或与其密切相关的学科有年代学、编纂学、家谱学、古文字学、计量历史学、考古学、社会学和新闻学等,参见历史学。记录和研究历史的人称为历史学家,简称“史学家”,中国古代称为史官。记录历史的书籍称为史书,如《史記》、《汉书》等,粗分為「官修」與「民載」兩類。" + tts_text = "### 如何提高花样游泳水平" + tts_speech_tokens = generate_speech_tokens(llm,frontend,tts_text,model_input,device) + print(tts_speech_tokens) + + + flow_prompt_speech_token = model_input['flow_prompt_speech_token'].to(device) + prompt_speech_feat = model_input['prompt_speech_feat'].to(device) + llm_embedding = model_input['llm_embedding'].to(device) + flow_embedding = model_input['flow_embedding'].to(device) + cosyvoice.model.hift_cache_dict['xxxx'] = None + tts_speech = cosyvoice.model.token2wav(token=tts_speech_tokens, + prompt_token=flow_prompt_speech_token, + prompt_feat=prompt_speech_feat, + embedding=flow_embedding, + uuid='xxxx', + token_offset=0, + finalize=True, + speed=1.0) + print(f'tts_speech shape:{tts_speech.shape}') + tts_speech = tts_speech.cpu() + torchaudio.save('zh_tts_S.wav', tts_speech, 24000) + print(model_input) \ No newline at end of file diff --git a/data/cosy/test/test_vq.py b/data/cosy/test/test_vq.py new file mode 100644 index 0000000000000000000000000000000000000000..d348f988df433f311e1b151bda5fefc25a784296 --- /dev/null +++ b/data/cosy/test/test_vq.py @@ -0,0 +1,171 @@ +from turtle import back +from click import prompt +import torch +from cosyvoice.cli.cosyvoice import CosyVoice2 +print(torch.cuda.is_available()) +print(torch.cuda.current_device()) +print(torch.cuda.device(0)) +print(torch.cuda.device_count()) +model_path = '/data/yueyu/models/CosyVoice2-0.5B' +# cosyvoice = CosyVoice2(model_path, load_jit=False, load_trt=False, fp16=False) +# print(cosyvoice) +# from cosyvoice.utils.file_utils import load_wav +# import torchaudio +# prompt_speech_16k = load_wav('/home/yueyulin/github/CosyVoice/asset/zero_shot_prompt.wav', 16000) +# # prompt_speech_16k = torch.rand((1, 16000)) +# for i, j in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)): +# torchaudio.save('zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) + +# for i, j in enumerate(cosyvoice.inference_cross_lingual('在他讲述那个荒诞故事的过程中,他突然[laughter]停下来,因为他自己也被逗笑了[laughter]。', prompt_speech_16k, stream=False)): +# torchaudio.save('fine_grained_control_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) +# # instruct usage +# for i, j in enumerate(cosyvoice.inference_instruct2('吾今朝早上去外婆家吃饭。', '用上海话说这句话', prompt_speech_16k, stream=False)): +# torchaudio.save('instruct_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) + +from hyperpyyaml import load_hyperpyyaml +import os +def load_from_configuration(model_dir): + with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f: + configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')}) + return configs + +configs = load_from_configuration(model_path) +print(configs) + +import torchaudio +def load_wav(wav, target_sr): + speech, sample_rate = torchaudio.load(wav, backend='soundfile') + speech = speech.mean(dim=0, keepdim=True) + if sample_rate != target_sr: + assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) + speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) + return speech + +zh_prompt_tar_file="/data/yueyu/data/Emilia-Dataset/Emilia/ZH/ZH-B000000.tar" +en_prompt_tar_file="/data/yueyu/data/Emilia-Dataset/Emilia/EN/EN-B000000.tar" + + +def load_file_list(tar_file): + #the files are FILE_NAME.mp3/FILE_NAME.json + #return all FILE_NAME as a list which has a mp3 and json + import tarfile + with tarfile.open(tar_file, 'r') as f: + file_names = f.getnames() + mp3_files = [i for i in file_names if i.endswith('.mp3')] + json_files = [i for i in file_names if i.endswith('.json')] + + #filter mp3_files without corresponded json + mp3_files = [i for i in mp3_files if i.replace('.mp3', '.json') in json_files] + return mp3_files + +zh_files = load_file_list(zh_prompt_tar_file) +print(zh_files[:10]) +en_files = load_file_list(en_prompt_tar_file) +print(en_files[:10]) +import io + +def load_random_samples_from_tar(tar_file, files, num_samples,target_sr,max_duration=10): + import random + import tarfile + import json + samples = [] + with tarfile.open(tar_file, 'r') as f: + for i in random.sample(files, len(files)): + mp3 = f.extractfile(i) + mp3_bytes = io.BytesIO(mp3.read()) + speech, sample_rate = torchaudio.load(mp3_bytes,backend='soundfile') + json_file = f.extractfile(i.replace('.mp3', '.json')) + json_data = json.load(json_file) + duration = json_data['duration'] + if duration > max_duration: + continue + speech = speech.mean(dim=0, keepdim=True) + if sample_rate != target_sr: + assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) + speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) + samples.append((speech, json_data,sample_rate)) + if len(samples) == num_samples: + break + return samples +target_sr = 16000 +zh_samples = load_random_samples_from_tar(zh_prompt_tar_file, zh_files, 10, target_sr) + +one_sample,one_json,sample_rate = zh_samples[0] +print(one_json) +print(sample_rate) +torchaudio.save('zh_sample.wav', one_sample, target_sr) +print(len(zh_samples)) + +en_samples = load_random_samples_from_tar(en_prompt_tar_file, en_files, 10, target_sr) +one_sample,one_json,sample_rate = en_samples[0] +print(one_json) +print(sample_rate) +torchaudio.save('en_sample.wav', one_sample, target_sr) +print(len(en_samples)) + +def resample_audio(samples, target_sr): + resampled_samples = [] + for i in samples: + speech, sample_rate = i + if sample_rate != target_sr: + assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) + speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) + resampled_samples.append((speech, sample_rate)) + return resampled_samples + +prompt_text = zh_samples[0][1]['text'] +prompt_speech = zh_samples[0][0] +print(prompt_text) +print(prompt_speech) +from cosyvoice.cli.cosyvoice import CosyVoice2 +cosyvoice = CosyVoice2(model_path, load_jit=False, load_trt=False, fp16=True) +from cosyvoice.cli.frontend import CosyVoiceFrontEnd +frontend = cosyvoice.frontend +prompt_text = frontend.text_normalize(prompt_text,split=False, text_frontend=True) +print(f'normalized prompt_text:{prompt_text}') +tts_text = '扫一扫,立即体验中国银行信用卡好礼、绑卡立减等热门活动,实时掌握更多优惠信息。' +tts_text = "在中国的一个偏远山区,有一位名叫李远的年轻人,他对集群通信系统有着浓厚的兴趣。每天晚上,他都会在自己的小屋里研究各种关于集群通信系统的资料,试图弄懂其中的原理和运作机制。他对这个领域的研究不仅仅停留在理论层面,还亲手制作了一些模型,试图通过实践来加深理解。" +tts_text = "歷史(现代汉语词汇,古典文言文称之为史),指人类社会过去的事件和行动,以及对这些事件行为有系统的记录、诠释和研究。歷史可提供今人理解過去,作為未來行事的參考依據,与伦理、哲学和艺术同属人类精神文明的重要成果。历史的第二个含义,即对过去事件的记录和研究,又称历史学”,或简称“史学”。隶属于历史学或与其密切相关的学科有年代学、编纂学、家谱学、古文字学、计量历史学、考古学、社会学和新闻学等,参见历史学。记录和研究历史的人称为历史学家,简称“史学家”,中国古代称为史官。记录历史的书籍称为史书,如《史記》、《汉书》等,粗分為「官修」與「民載」兩類。" +tts_text = frontend.text_normalize(tts_text,split=False, text_frontend=True) +print(f'normalized tts_text:{tts_text}') +final_rate = 24000 +model_input = frontend.frontend_zero_shot(tts_text, prompt_text, prompt_speech,final_rate) +print(model_input) +llm = cosyvoice.model.llm +device = cosyvoice.model.device +text = model_input['text'].to(device) +text_len = torch.tensor([text.shape[1]], dtype=torch.int32).to(device) +prompt_text = model_input['prompt_text'].to(device) +prompt_text_len = torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(device) +llm_prompt_speech_token = model_input['llm_prompt_speech_token'].to(device) +prompt_speech_token_len = torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(device) +flow_prompt_speech_token = model_input['flow_prompt_speech_token'].to(device) +prompt_speech_feat = model_input['prompt_speech_feat'].to(device) +llm_embedding = model_input['llm_embedding'].to(device) +flow_embedding = model_input['flow_embedding'].to(device) +speech_tokens = [] +for i in llm.inference(text = text, + text_len = text_len, + prompt_text = prompt_text, + prompt_text_len = prompt_text_len, + prompt_speech_token = llm_prompt_speech_token, + prompt_speech_token_len = prompt_speech_token_len, + embedding=llm_embedding + ): + speech_tokens.append(i) +print(speech_tokens) + +tts_speech_tokens = torch.tensor(speech_tokens).unsqueeze(dim=0).to(device) +print(f'tts_speech_tokens shape:{tts_speech_tokens.shape}') +cosyvoice.model.hift_cache_dict['xxxx'] = None +tts_speech = cosyvoice.model.token2wav(token=tts_speech_tokens, + prompt_token=flow_prompt_speech_token, + prompt_feat=prompt_speech_feat, + embedding=flow_embedding, + uuid='xxxx', + token_offset=0, + finalize=True, + speed=1.0) +print(f'tts_speech shape:{tts_speech.shape}') +tts_speech = tts_speech.cpu() +torchaudio.save('zh_tts.wav', tts_speech, final_rate) \ No newline at end of file diff --git a/data/utils/convert_embeddings_2_pt.py b/data/utils/convert_embeddings_2_pt.py new file mode 100644 index 0000000000000000000000000000000000000000..cba4d65420058035eda1a631da33883e5ecc46e6 --- /dev/null +++ b/data/utils/convert_embeddings_2_pt.py @@ -0,0 +1,34 @@ +import torch +import numpy as np +import sys +import os +import json +from sklearn.cluster import KMeans +jsonl_dir = sys.argv[1] +output_file_name = sys.argv[2] + +# Load the embeddings from jsonl files the key is the name of the file +embeddings = {} +for file in os.listdir(jsonl_dir): + print("Processing", file) + if file.endswith("_embeddings.json"): + with open(os.path.join(jsonl_dir, file), "r") as f: + print("Loading", file) + data = json.load(f) + key_name = os.path.basename(file).replace("_embeddings.json", "") + np_array = np.array(data) + if np_array.shape[0] == 1: + np_array = np_array[0] + else: + #find the cluster center of the embeddings using kmeans + kmeans = KMeans(n_clusters=1, random_state=0, n_init = 'auto').fit(np_array) + np_array = kmeans.cluster_centers_[0] + + embeddings[key_name]= {'embedding' : torch.tensor(np_array, dtype=torch.float32).unsqueeze(0)} +torch.save(embeddings, output_file_name) +print("Embeddings saved to", output_file_name) + +state_dict = torch.load(output_file_name) +print("Loaded embeddings from", output_file_name) +for key in state_dict: + print(key, state_dict[key]['embedding'].shape) \ No newline at end of file diff --git a/data/utils/create_embeddings_from_raw.py b/data/utils/create_embeddings_from_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..45d0144f66921b9c1a6a96235d95183fe8715c0d --- /dev/null +++ b/data/utils/create_embeddings_from_raw.py @@ -0,0 +1,263 @@ +import os +from re import A +import whisper +from librosa import resample +import multiprocessing +from tqdm import tqdm +import onnxruntime +from onnxruntime import InferenceSession +import torch +import pyarrow.parquet as pq +import numpy as np +import json +import io +import soundfile as sf +import torchaudio +import torchaudio.compliance.kaldi as kaldi +import mmap +import os +import pyarrow.parquet as pq +import io +import soundfile as sf +import torchaudio.compliance.kaldi as kaldi +import torch +import numpy as np +import onnxruntime + +def process_file(file_info): + """处理单个parquet文件的函数,每个进程调用一次""" + parquet_file, output_path, speaker_extractor, device = file_info + + # 为每个进程创建独立的speech_tokenizer_session + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + ort_session = onnxruntime.InferenceSession(speaker_extractor, sess_options=option, + providers=["CPUExecutionProvider"]) + results = {} + try: + # 创建目标文件名 + base_filename = os.path.splitext(os.path.basename(parquet_file))[0] + output_file = os.path.join(output_path, f"{base_filename}_tokens.jsonl") + + # 使用PyArrow读取parquet文件的元数据,获取总行数 + parquet_metadata = pq.read_metadata(parquet_file) + total_rows = parquet_metadata.num_rows + batch_size = 100 + + # 使用 mmap 读取 parquet 文件 + with open(parquet_file, 'rb') as f: + mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + + # 使用 io.BytesIO 将 mmap 对象包装成文件对象 + buffer = io.BytesIO(mm) + + pf = pq.ParquetFile(buffer) # 使用 mmap 包装的 buffer + + progress = tqdm(total=total_rows, + desc=f"Processing {os.path.basename(parquet_file)}", + position=multiprocessing.current_process()._identity[0] % 10) + + current_row = 0 + idx = 0 + for batch in pf.iter_batches(batch_size=batch_size): + df_batch = batch.to_pandas() + + # 处理当前批次中的每一行 + for _, row in df_batch.iterrows(): + current_row += 1 + audio_obj = row['audio'] + audio_data = audio_obj['bytes'] + transcription = row['transcription'] + language = row['language'] + speaker = row['speaker'] + if speaker not in results: + results[speaker] = {} + if language not in results[speaker]: + results[speaker][language] = [] + if len(results[speaker][language]) >= 10: + progress.update(1) + continue + + with io.BytesIO(audio_data) as audio_buffer: + prompt_data, sample_rate = sf.read(audio_buffer) + # 确保是单声道,并转换为float32 + if len(prompt_data.shape) > 1: + prompt_data = prompt_data[:, 0] + prompt_data = prompt_data.astype(np.float32) + + # 重采样到16kHz (如果需要) + if sample_rate != 16000: + prompt_data = resample(prompt_data, orig_sr=sample_rate, target_sr=16000) + + prompt_speech_16k = torch.tensor(prompt_data).unsqueeze(0) + + feat = kaldi.fbank(prompt_speech_16k, + num_mel_bins=80, + dither=0, + sample_frequency=16000) + feat = feat - feat.mean(dim=0,keepdim=True) + embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist() + + results[speaker][language].append(embedding) + + progress.update(1) + + # 关闭 mmap 对象 + mm.close() + + + + print(f'All speakers {results.keys()}') + for speaker in results: + print(f'{speaker} : All languages {results[speaker].keys()} in {os.getpid()}') + return results + except Exception as e: + import traceback + traceback.print_exc() + return f"Error processing {parquet_file}: {str(e)}" +def process_file_x(file_info): + """处理单个parquet文件的函数,每个进程调用一次""" + parquet_file, output_path, speaker_extractor, device = file_info + + # 为每个进程创建独立的speech_tokenizer_session + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + ort_session = InferenceSession(speaker_extractor, sess_options=option, + providers=["CPUExecutionProvider"]) + results = {} + try: + # 创建目标文件名 + base_filename = os.path.splitext(os.path.basename(parquet_file))[0] + output_file = os.path.join(output_path, f"{base_filename}_tokens.jsonl") + + # 使用PyArrow读取parquet文件的元数据,获取总行数 + parquet_metadata = pq.read_metadata(parquet_file) + total_rows = parquet_metadata.num_rows + batch_size = 100 + + pf = pq.ParquetFile(parquet_file) + + progress = tqdm(total=total_rows, + desc=f"Processing {os.path.basename(parquet_file)}", + position=multiprocessing.current_process()._identity[0] % 10) + + current_row = 0 + idx = 0 + for batch in pf.iter_batches(batch_size=batch_size): + df_batch = batch.to_pandas() + + # 处理当前批次中的每一行 + for _, row in df_batch.iterrows(): + current_row += 1 + audio_obj = row['audio'] + audio_data = audio_obj['bytes'] + transcription = row['transcription'] + language = row['language'] + speaker = row['speaker'] + if speaker not in results: + results[speaker] = {} + if language not in results[speaker]: + results[speaker][language] = [] + if len(results[speaker][language]) >= 10: + progress.update(1) + continue + + with io.BytesIO(audio_data) as buffer: + prompt_data, sample_rate = sf.read(buffer) + # 确保是单声道,并转换为float32 + if len(prompt_data.shape) > 1: + prompt_data = prompt_data[:, 0] + prompt_data = prompt_data.astype(np.float32) + + # 重采样到16kHz (如果需要) + if sample_rate != 16000: + prompt_data = resample(prompt_data, orig_sr=sample_rate, target_sr=16000) + + prompt_speech_16k = torch.tensor(prompt_data).unsqueeze(0) + + feat = kaldi.fbank(prompt_speech_16k, + num_mel_bins=80, + dither=0, + sample_frequency=16000) + feat = feat - feat.mean(dim=0,keepdim=True) + embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist() + + results[speaker][language].append(embedding) + + progress.update(1) + + + + + + print(f'All speakers {results.keys()}') + for speaker in results: + print(f'{speaker} : All languages {results[speaker].keys()} in {os.getpid()}') + return results + except Exception as e: + import traceback + traceback.print_exc() + return f"Error processing {parquet_file}: {str(e)}" +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--data_path', type=str, default='/external_data/yueyudata/starrail-voice') + parser.add_argument('--output_path',type=str,default='/external_data/yueyudata/starrail-voice-speaker-embeddings') + parser.add_argument('--speaker_extractor',type=str,default='/external_data/models/CosyVoice2-0.5B_RWKV_1.5B/campplus.onnx') + parser.add_argument('--device',type=str,default='cuda:0') + parser.add_argument('--num_processes',type=int,default=4) + args = parser.parse_args() + + print(args) + data_path = args.data_path + output_path = args.output_path + device = args.device + speaker_extractor = args.speaker_extractor + num_processes = args.num_processes + + # 确保输出目录存在 + os.makedirs(output_path, exist_ok=True) + + # 找到所有parquet文件 + parquet_files = [] + for root, dirs, files in os.walk(data_path): + for file in files: + if file.endswith('.parquet'): + parquet_files.append(os.path.join(root, file)) + print(f'Found {len(parquet_files)} parquet files in {data_path}') + + # 准备多进程参数 + file_info_list = [(file, output_path, speaker_extractor, device) for file in parquet_files] + + # 使用进程池处理文件 + print(f"Starting processing with {num_processes} processes") + + # 使用进程池处理文件 + print(f"Starting processing with {num_processes} processes") + with multiprocessing.Pool(processes=num_processes) as pool: + results = pool.map(process_file, file_info_list) + + # 输出处理结果 + print('Processing complete,merge results') + final_results = {} + for result in results: + if isinstance(result, dict): + for speaker in result: + if speaker not in final_results: + final_results[speaker] = {} + for language in result[speaker]: + if language not in final_results[speaker]: + final_results[speaker][language] = [] + final_results[speaker][language].extend(result[speaker][language]) + else: + print(result) + + # 输出结果 + for speaker in final_results: + for language in final_results[speaker]: + output_file = os.path.join(output_path, f"{speaker}_{language}_embeddings.json") + print(f"Writing embeddings for {speaker} ({language}) to {output_file}") + with open(output_file, 'w', encoding='utf-8') as f_out: + json.dump(final_results[speaker][language], f_out) \ No newline at end of file diff --git a/data/utils/create_lm_corpus_from_raw.py b/data/utils/create_lm_corpus_from_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..21800a85b4efefca823661f4a768edb3a3906b47 --- /dev/null +++ b/data/utils/create_lm_corpus_from_raw.py @@ -0,0 +1,156 @@ +import os +import numpy as np +import pandas as pd +import json +import io +import torch +import soundfile as sf +import pyarrow.parquet as pq +import whisper +from librosa import resample +import multiprocessing +from tqdm import tqdm +import onnxruntime +from onnxruntime import InferenceSession + +def process_file(file_info): + """处理单个parquet文件的函数,每个进程调用一次""" + parquet_file, output_path, speech_tokenizer_model, device = file_info + + # 为每个进程创建独立的speech_tokenizer_session + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + cuda_idx = int(device.split(':')[-1] if device is not None and 'cuda' in device else '0') + speech_tokenizer_session = InferenceSession(speech_tokenizer_model, sess_options=option, + providers=[("CUDAExecutionProvider", {"device_id": cuda_idx}) + if torch.cuda.is_available() else "CPUExecutionProvider"]) + + try: + # 创建目标文件名 + base_filename = os.path.splitext(os.path.basename(parquet_file))[0] + output_file = os.path.join(output_path, f"{base_filename}_tokens.jsonl") + + # 使用PyArrow读取parquet文件的元数据,获取总行数 + parquet_metadata = pq.read_metadata(parquet_file) + total_rows = parquet_metadata.num_rows + batch_size = 1000 + + # 检查是否有已经处理过的文件,计算已处理的行数 + processed_rows = 0 + if os.path.exists(output_file): + with open(output_file, 'r', encoding='utf-8') as f_check: + for _ in f_check: + processed_rows += 1 + print(f"Found existing file {output_file} with {processed_rows} processed rows") + + # 如果已经处理完所有行,跳过此文件 + if processed_rows >= total_rows: + return f"Skipped {parquet_file}: all {total_rows} rows already processed" + + # 逐批处理数据,以追加方式打开输出文件 + with open(output_file, 'a' if processed_rows > 0 else 'w', encoding='utf-8') as f_out: + pf = pq.ParquetFile(parquet_file) + progress = tqdm(total=total_rows, initial=processed_rows, + desc=f"Processing {os.path.basename(parquet_file)}", + position=multiprocessing.current_process()._identity[0] % 10) + + skip_rows = processed_rows + current_row = 0 + + for batch in pf.iter_batches(batch_size=batch_size): + df_batch = batch.to_pandas() + + # 处理当前批次中的每一行 + for _, row in df_batch.iterrows(): + current_row += 1 + + # 跳过已处理的行 + if current_row <= skip_rows: + continue + + audio_obj = row['audio'] + audio_data = audio_obj['bytes'] + transcription = row['transcription'] + language = row['language'] + speaker = row['speaker'] + + with io.BytesIO(audio_data) as buffer: + prompt_data, sample_rate = sf.read(buffer) + # 确保是单声道,并转换为float32 + if len(prompt_data.shape) > 1: + prompt_data = prompt_data[:, 0] + prompt_data = prompt_data.astype(np.float32) + + # 重采样到16kHz (如果需要) + if sample_rate != 16000: + prompt_data = resample(prompt_data, orig_sr=sample_rate, target_sr=16000) + + prompt_speech_16k = torch.tensor(prompt_data).unsqueeze(0) + + feat = whisper.log_mel_spectrogram(prompt_speech_16k, n_mels=128) + speech_token = speech_tokenizer_session.run(None, + {speech_tokenizer_session.get_inputs()[0].name: + feat.detach().cpu().numpy(), + speech_tokenizer_session.get_inputs()[1].name: + np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist() + + # 写入结果 + f_out.write(json.dumps({'tts_speech_tokens':speech_token, + 'text':transcription, + 'language':language, + 'speaker':speaker, + "prompt_text":"", + "llm_prompt_speech_token":[]}, + ensure_ascii=False)+'\n') + progress.update(1) + + # 释放内存 + del df_batch + import gc + gc.collect() + + return f"Successfully processed {parquet_file}: {total_rows-processed_rows} new rows processed" + except Exception as e: + return f"Error processing {parquet_file}: {str(e)}" + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--data_path', type=str, default='/external_data/yueyudata/starrail-voice') + parser.add_argument('--output_path',type=str,default='/external_data/yueyudata/starrail-voice-voice_tokens') + parser.add_argument('--speech_tokenizer_model',type=str,default='/external_data/models/CosyVoice2-0.5B_RWKV_1.5B/speech_tokenizer_v2.onnx') + parser.add_argument('--device',type=str,default='cuda:0') + parser.add_argument('--num_processes',type=int,default=4) + args = parser.parse_args() + + data_path = args.data_path + output_path = args.output_path + device = args.device + speech_tokenizer_model = args.speech_tokenizer_model + num_processes = args.num_processes + + # 确保输出目录存在 + os.makedirs(output_path, exist_ok=True) + + # 找到所有parquet文件 + parquet_files = [] + for root, dirs, files in os.walk(data_path): + for file in files: + if file.endswith('.parquet'): + parquet_files.append(os.path.join(root, file)) + print(f'Found {len(parquet_files)} parquet files in {data_path}') + + # 准备多进程参数 + file_info_list = [(file, output_path, speech_tokenizer_model, device) for file in parquet_files] + + # 使用进程池处理文件 + print(f"Starting processing with {num_processes} processes") + with multiprocessing.Pool(processes=num_processes) as pool: + results = pool.map(process_file, file_info_list) + + # 输出处理结果 + for result in results: + print(result) + + print("All files processed successfully!") \ No newline at end of file diff --git a/data/utils/llm_dataset.py b/data/utils/llm_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0bd7fd375f0a20013834b2924621c08d7f2a17 --- /dev/null +++ b/data/utils/llm_dataset.py @@ -0,0 +1,206 @@ +import datasets +import os +import json +import torch +import random +import time +random.seed(time.time()) +import logging +from tqdm import tqdm +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +def verify_jsonl_files(data_files): + """检查每个 jsonl 文件的有效性""" + invalid_files = [] + + for file_path in tqdm(data_files, desc="验证文件"): + try: + with open(file_path, 'r', encoding='utf-8') as f: + for i, line in enumerate(f): + try: + json.loads(line) + except json.JSONDecodeError: + invalid_files.append((file_path, i+1)) + logging.error(f"文件 {file_path} 在第 {i+1} 行有无效的 JSON") + break + except Exception as e: + invalid_files.append((file_path, f"读取错误: {str(e)}")) + logging.error(f"无法读取文件 {file_path}: {str(e)}") + + return invalid_files +def load_jsonl_dataset(directory,tokenizer): + ''' + load jsonl files in a directory recursively + ''' + data_files = [] + for root, dirs, files in os.walk(directory): + for file in files: + if file.endswith('.jsonl'): + data_files.append(os.path.join(root, file)) + + logging.info(f"找到 {len(data_files)} 个 JSONL 文件") + # 验证文件 + invalid_files = verify_jsonl_files(data_files) + if invalid_files: + logging.error(f"发现 {len(invalid_files)} 个无效文件:") + for file_info in invalid_files: + if isinstance(file_info[1], int): + logging.error(f" - {file_info[0]} (错误在第 {file_info[1]} 行)") + else: + logging.error(f" - {file_info[0]} ({file_info[1]})") + + # 移除无效文件 + valid_files = [f for f in data_files if f not in [info[0] for info in invalid_files]] + logging.info(f"继续处理剩余的 {len(valid_files)} 个有效文件") + data_files = valid_files + # 手动收集所有样本,确保特征一致性 + all_samples = [] + + for file_path in tqdm(data_files, desc="加载数据集"): + try: + # 手动解析JSONL文件,避免datasets加载时的类型推断问题 + with open(file_path, 'r', encoding='utf-8') as f: + for line in f: + try: + data = json.loads(line) + # 确保所有字段存在且类型一致 + llm_prompt_speech_token = data.get('llm_prompt_speech_token', []) + tts_speech_tokens = data.get('tts_speech_tokens', []) + text = str(data.get('text', "")) + prompt_text = str(data.get('prompt_text', "")) + + # 确保列表类型 + if not isinstance(llm_prompt_speech_token, list): + llm_prompt_speech_token = [] + if not isinstance(tts_speech_tokens, list): + tts_speech_tokens = [] + + # 添加处理后的样本 + all_samples.append({ + 'llm_prompt_speech_token': llm_prompt_speech_token, + 'tts_speech_tokens': tts_speech_tokens, + 'text': text, + 'prompt_text': prompt_text + }) + except json.JSONDecodeError: + continue # 跳过无效的JSON行 + except Exception as e: + logging.error(f"处理样本时出错: {str(e)}") + except Exception as e: + logging.error(f"打开文件 {file_path} 时出错: {str(e)}") + + if not all_samples: + raise ValueError("没有成功加载任何样本") + + # 创建数据集 + logging.info(f"手动创建数据集,包含 {len(all_samples)} 个样本") + dataset = datasets.Dataset.from_list(all_samples) + + logging.info(f"成功加载 {len(dataset)} 个样本") + + #1. concatenate llm_prompt_speech_token and tts_speech_tokens (list of int) + #delay the concatenation to collate_fn since sometimes we want to drop the prompt + # dataset = dataset.map(lambda x: {'speech_token': x['llm_prompt_speech_token'] + x['tts_speech_tokens']},remove_columns=['tts_speech_tokens','llm_prompt_speech_token']) + #2. Filter the data either : + # 1. the length of the speech_token is less than 1 + # 2. the length of the speech_token is greater than 1000 + # 3. the length of the text is greater than 500 + # 4. the length of the prompt_text is greater than 500 + # 5. the length of the text_token is less than 1 + # 6. the length of the prompt_text_token is less than 1 + dataset = dataset.filter(lambda x:len(x['llm_prompt_speech_token']) < 2048 and len(x['tts_speech_tokens']) < 2048 + and len(tokenizer.encode(x['text'])) < 2048 and len(tokenizer.encode(x['prompt_text'])) < 2048 ) + logging.info(f"过滤后剩余 {len(dataset)} 个样本") + #2. tokenize the text to text_tokens and prompt_text to prompt_text_tokens + # dataset = dataset.map(lambda x: {'text_tokens': tokenizer.encode(x['text']), 'prompt_text_tokens': tokenizer.encode(x['prompt_text'])},remove_columns=['text','prompt_text']) + return dataset + +def collate_fn(batch, tokenizer, pad_to_max_length=True, max_length=2048, drop_prompt_audio_rate=-0.1): + ''' + convert the data to torch tensors + 1. call tokenizer.encode('text') and tokenizer.encode('prompt_text'), concatenate them to get the text_token, record each sample's length to text_token_len + 2. convert the text_tokens and text_token_len to torch tensor + 3. record each sample's speech_token length to speech_token_len + 4. convert the speech_token and speech_token_len to torch tensor + 5. We will drop prompt with drop_prompt_audio_rate to ask model to learn generate audio without guaidance + By default we won't drop anything + ''' + all_text_tokens = [] + all_speech_tokens = [] + speech_token_len = [] + text_token_len = [] + my_max_length = 0 + is_drop_prompt = random.random() < drop_prompt_audio_rate + + for sample in batch: + tts_speech_tokens = sample['tts_speech_tokens'] + llm_prompt_speech_token = sample['llm_prompt_speech_token'] + + if is_drop_prompt: + # 只使用文本部分,不使用提示 + text_tokens = tokenizer.encode(sample['text']) + all_text_tokens.append(torch.tensor(text_tokens, dtype=torch.int32)) + text_token_len.append(len(text_tokens)) + + # 只使用语音部分,不使用提示语音 + current_speech_tokens = tts_speech_tokens + all_speech_tokens.append(torch.tensor(current_speech_tokens, dtype=torch.int32)) + speech_token_len.append(len(current_speech_tokens)) + + total_length = len(text_tokens) + len(current_speech_tokens) + else: + # 使用提示+文本 + text_tokens = tokenizer.encode(sample['text']) + prompt_tokens = tokenizer.encode(sample['prompt_text']) + combined_text_tokens = prompt_tokens + text_tokens + all_text_tokens.append(torch.tensor(combined_text_tokens, dtype=torch.int32)) + text_token_len.append(len(combined_text_tokens)) + + # 使用提示语音+语音 + current_speech_tokens = llm_prompt_speech_token + tts_speech_tokens + all_speech_tokens.append(torch.tensor(current_speech_tokens, dtype=torch.int32)) + speech_token_len.append(len(current_speech_tokens)) + + total_length = len(combined_text_tokens) + len(current_speech_tokens) + + if total_length > my_max_length: + my_max_length = total_length + + # 检查长度是否超出最大长度 + skip = my_max_length > max_length + + # 将列表转换为填充后的张量 + all_text_tokens = torch.nn.utils.rnn.pad_sequence(all_text_tokens, batch_first=True, padding_value=0) + all_speech_tokens = torch.nn.utils.rnn.pad_sequence(all_speech_tokens, batch_first=True, padding_value=0) + + # 如果需要填充到最大长度 + if pad_to_max_length and not skip: + pad_length = max_length - my_max_length + if pad_length > 0: + all_speech_tokens = torch.nn.functional.pad(all_speech_tokens, (0, pad_length), value=0) + + return { + 'text_token': all_text_tokens, + 'text_token_len': torch.tensor(text_token_len, dtype=torch.int32), + 'speech_token': all_speech_tokens, # 确保命名一致 + 'speech_token_len': torch.tensor(speech_token_len, dtype=torch.int32), + 'skip': skip + } + + +if __name__ == '__main__': + from transformers import AutoTokenizer + model_path = "/external_data/models/rwkv7-2.9B-world" + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + directory = '/external_data/yueyudata/speech_corpus' + dataset = load_jsonl_dataset(directory,tokenizer) + print(dataset) + print(dataset[0]) + from functools import partial + collate_fn = partial(collate_fn,tokenizer=tokenizer,pad_to_max_length=False) + dataloader = torch.utils.data.DataLoader(dataset,batch_size=1,collate_fn=collate_fn) + for data in dataloader: + print(data) + print(data['speech_token'].shape) + print(data['text_token'].shape) + break \ No newline at end of file diff --git a/data/utils/test_utilities.py b/data/utils/test_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..4871f6ea3040de09638dfcb52e476f94e5257e83 --- /dev/null +++ b/data/utils/test_utilities.py @@ -0,0 +1,31 @@ +from data.utils.utilitie import generate_mixed_instructions +if __name__ == '__main__': + print(generate_mixed_instructions('我来自中国。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('I am from China.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('我来自中国。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('这是一个拥有悠久历史的城市。')) + print(generate_mixed_instructions('I am from China.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) + print(generate_mixed_instructions('This is a city with a long history.',language='en')) \ No newline at end of file diff --git a/data/utils/utilitie.py b/data/utils/utilitie.py new file mode 100644 index 0000000000000000000000000000000000000000..06e9346066fb332143f826f3df983ee896a7d093 --- /dev/null +++ b/data/utils/utilitie.py @@ -0,0 +1,767 @@ +from concurrent.futures import thread +from operator import is_ +from librosa import ex +from regex import P +from torch import device +from tqdm import tqdm +import tarfile +import random +import time +import io +import torchaudio +import json +import os +import multiprocessing +import torch +from data.cosy.data.data_processor import init_process, preprocess_prompts +import random +from typing import List +import torch +import torchaudio +import io + +''' +Natural Language Instruction +Emotion: 高兴(Happy), 悲伤(Sad), 惊讶(Surprised), 愤怒(Angry), 恐惧(Fearful), 厌恶(Disgusted), 冷 +静(Calm), 严肃(Serious) +Speaking Rate: 快速(Fast), 非常快速(Very Fast), 慢速(Slow), 非常慢速(Very Slow) +Dialect: 粤语, 四川话, 上海话, 郑州话, 长沙话, 天津话 +Role-playing: 神秘(Mysterious), 凶猛(Fierce), 好奇(Curious), 优雅(Elegant), 孤独(Lonely), 机器 +人(Robot), 小猪佩奇(Peppa), etc. +Fine-grained Instruction +Vocal Bursts: [laughter], [breath], etc. +Vocal Features: , +Examples +- 你能用高兴的情感说吗?< |endofprompt| >今天真是太开心了,马上要放假了!I’m so happy, +Spring Festival is coming! +- Please speaking very fast.< |endofprompt| >Today is a happy day, full of laughter and joy. +- 请问你能模仿粤语的口音吗?< |endofprompt| >多保重,早休息。 +- 尝试一下以机器人的角色和我交流。< |endofprompt| >接收知识光波! +- [laughter]有时候,看着小孩子们的天真行为[laughter],我们总会会心一笑。 +- She pursued her dreams with enthusiasm and grit. +''' + +emotions = ['高兴', '悲伤', '惊讶', '愤怒', '恐惧', '厌恶', '冷静', '严肃'] +emotions_in_english = ['Happy', 'Sad', 'Surprised', 'Angry', 'Fearful', 'Disgusted', 'Calm', 'Serious'] +speaking_rates = ['快速', '非常快速', '慢速', '非常慢速'] +speaking_rates_in_english = ['Fast', 'Very Fast', 'Slow', 'Very Slow'] +dialects = ['普通话','粤语', '四川话', '上海话', '郑州话', '长沙话', '天津话'] +dialects_in_english = ['Mandarin','Cantonese', 'Sichuanese', 'Shanghainese', 'Zhengzhou Dialect', 'Changsha Dialect', 'Tianjin Dialect'] +role_playings = ['神秘', '凶猛', '好奇', '优雅', '孤独', '机器人', '小猪佩奇'] +role_playings_in_english = ['Mysterious', 'Fierce', 'Curious', 'Elegant', 'Lonely', 'Robot', 'Peppa'] +vocal_bursts = ['[laughter]', '[breath]'] +vocal_features = ['', ''] +end_of_prompt = '<|endofprompt|>' + +def generate_in_emotion_in_chinese(text :str): + templates = [ + '你能用{}的情感说吗?{}{}', + '请用{}的情感说。{}{}', + '请用{}的情感表达。{}{}', + '请用{}的情感说一下。{}{}', + '请用{}的情感说一句。{}{}' + ] + select_emotion = random.choice(emotions) + return random.choice(templates).format(select_emotion,end_of_prompt,text) + +def generate_in_emotion_in_english(text :str): + templates = [ + 'Can you say it with {} emotion?{}{}', + 'Please say it with {} emotion.{}{}', + 'Please express it with {} emotion.{}{}', + 'Please say it with {} emotion.{}{}', + 'Please say a sentence with {} emotion.{}{}' + ] + select_emotion = random.choice(emotions_in_english) + return random.choice(templates).format(select_emotion,end_of_prompt,text) + +def generate_speaking_rate_in_chinese(text :str): + templates = [ + '请用{}的语速说。{}{}', + '请用{}的语速说一下。{}{}', + '请用{}的语速说一句。{}{}', + '请用{}的语速表达。{}{}', + '请用{}的语速说。{}{}', + '请{}地说。{}{}', + '请{}地说一下。{}{}', + '请{}地说一句。{}{}', + '{}的说。{}{}', + '{}的说一下。{}{}', + '{}的说一句。{}{}', + '{}的表达。{}{}' + + ] + select_rate = random.choice(speaking_rates) + template = random.choice(templates) + return template.format(select_rate,end_of_prompt,text) + +def generate_speaking_rate_in_english(text :str): + templates = [ + 'Please say it with {} speaking rate.{}{}', + 'Say it with {} speaking rate.{}{}', + 'Please say a sentence with {} speaking rate.{}{}', + 'Please express it with {} speaking rate.{}{}', + 'Please speak {}ly.{}{}', + 'Speak {}ly.{}{}', + 'Please say it {}ly.{}{}', + 'Say it {}ly.{}{}' + ] + select_rate = random.choice(speaking_rates_in_english) + template = random.choice(templates) + return template.format(select_rate,end_of_prompt,text) + + +def load_file_list(tar_file): + #the files are FILE_NAME.mp3/FILE_NAME.json + #return all FILE_NAME as a list which has a mp3 and json + import tarfile + with tarfile.open(tar_file, 'r') as f: + file_names = f.getnames() + mp3_files = [i for i in file_names if i.endswith('.mp3')] + json_files = [i for i in file_names if i.endswith('.json')] + + #filter mp3_files without corresponded json + mp3_files = [i for i in mp3_files if i.replace('.mp3', '.json') in json_files] + return mp3_files + +def extract_prompt(input_tar_files, input_tar_languages, max_duration=5, num_samples=10, target_sr=16000, output_dir=None): + """ + Extract prompt from tar files + Args: + input_tar_files: list of str, input tar files + input_tar_languages: list of str, input tar languages for each tar file, must be the same length as input_tar_files + max_duration: float, max duration of audio + num_samples: int, number of samples to extract + target_sr: int, target sample rate + output_dir: str, output directory + """ + for tar_file, language in zip(input_tar_files, input_tar_languages): + print(f'Extracting prompt from {tar_file}...with language {language}') + random.seed(time.time()) + samples = [] + mp3_files = load_file_list(tar_file) + with tarfile.open(tar_file, 'r') as f: + progress_bar = tqdm(total=num_samples,desc=f'Extracting prompt from {tar_file}') + for i in random.sample(mp3_files, len(mp3_files)): + mp3 = f.extractfile(i) + mp3_bytes = io.BytesIO(mp3.read()) + speech, sample_rate = torchaudio.load(mp3_bytes,backend='soundfile') + json_file = f.extractfile(i.replace('.mp3', '.json')) + json_data = json.load(json_file) + duration = json_data['duration'] + if duration > max_duration: + continue + speech = speech.mean(dim=0, keepdim=True) + if sample_rate != target_sr: + assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) + speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) + samples.append((speech, json_data,sample_rate)) + progress_bar.update(1) + if len(samples) == num_samples: + break + if output_dir is not None: + """ + json looks like: + {'id': 'ZH_B00000_S01450_W000017', 'wav': 'ZH_B00000/ZH_B00000_S01450/mp3/ZH_B00000_S01450_W000017.mp3', 'text': '因此,我们认为流通性具有更广泛的含义。', 'duration': 4.193, 'speaker': 'ZH_B00000_S01450', 'language': 'zh', 'dnsmos': 3.3709} + """ + output_dir_lang = os.path.join(output_dir, language) + os.makedirs(output_dir_lang, exist_ok=True) + progress_bar = tqdm(total=len(samples), desc=f'Saving samples to {output_dir_lang}') + for i, (speech, json_data, sample_rate) in enumerate(samples): + id = json_data['id'] + wave_file = os.path.join(output_dir_lang, f'{id}.wav') + json_file = os.path.join(output_dir_lang, f'{id}.json') + torchaudio.save(wave_file, speech, target_sr) + with open(json_file, 'w') as f: + json.dump(json_data, f,ensure_ascii=False) + progress_bar.update(1) + print(f'Extracted {len(samples)} samples from {tar_file} with language {language}') + +def generate_dialect_in_chinese(text: str): + templates = [ + '请问你能模仿{}的口音吗?{}{}', + '请用{}的口音说一下。{}{}', + '用{}的口音说一句。{}{}', + '能用{}的口音读一下吗?{}{}', + '请尝试用{}的口音说这段话。{}{}', + '请以{}的口音表达。{}{}', + '请用{}的语调说。{}{}', + '试试用{}的方言说。{}{}', + '能否用{}的语调读出来?{}{}', + '请说一段{}。{}{}' + ] + select_dialect = random.choice(dialects) + return random.choice(templates).format(select_dialect, end_of_prompt, text) + +def generate_dialect_in_english(text: str): + templates = [ + 'Can you mimic the {} accent?{}{}', + 'Please speak with a {} accent.{}{}', + 'Say it with a {} accent.{}{}', + 'Could you read this with a {} accent?{}{}', + 'Please try to speak this with a {} accent.{}{}', + 'Please express it with a {} accent.{}{}', + 'Please use {} intonation.{}{}', + 'Try speaking in {}.{}{}', + 'Could you read this in {}?{}{}', + 'Please say a passage in {}.{}{}' + ] + select_dialect = random.choice(dialects_in_english) + return random.choice(templates).format(select_dialect, end_of_prompt, text) + +def generate_role_playing_in_chinese(text: str): + templates = [ + '尝试一下以{}的角色和我交流。{}{}', + '请以{}的角色说这句话。{}{}', + '假装你是{},说一下这句话。{}{}', + '扮演{}来说这段话。{}{}', + '请用{}的语气说。{}{}', + '以{}的形象来表达。{}{}', + '你能用{}的方式说吗?{}{}', + '模仿{}说话。{}{}', + '请用{}的口吻说一下。{}{}', + '像{}一样说这句话。{}{}' + ] + select_role = random.choice(role_playings) + return random.choice(templates).format(select_role, end_of_prompt, text) + +def generate_role_playing_in_english(text: str): + templates = [ + 'Try to communicate with me as a {} character.{}{}', + 'Please say this as a {} character.{}{}', + 'Pretend you are {}, say this sentence.{}{}', + 'Act as {} to say this passage.{}{}', + 'Please speak with a {} tone.{}{}', + 'Express this with a {} image.{}{}', + 'Can you say this in a {} way?{}{}', + 'Mimic {} speaking.{}{}', + 'Please say this in the manner of {}.{}{}', + 'Say this like {}.{}{}' + ] + select_role = random.choice(role_playings_in_english) + return random.choice(templates).format(select_role, end_of_prompt, text) + +def generate_vocal_bursts(text: str): + """ + 在文本中随机添加声音爆发标记,如[laughter]、[breath]等 + """ + templates = [ + '{}{}', # 在句首添加 + '{}{}{}', # 在句中添加 + '{}{}' # 在句末添加 + ] + + burst = random.choice(vocal_bursts) + template_choice = random.choice(templates) + + if template_choice == '{}{}': # 句首 + return burst + text + elif template_choice == '{}{}{}': # 句中 + words = text.split() + if len(words) <= 3: # 文本太短不分割 + return burst + text + split_point = random.randint(1, len(words) - 1) + return ' '.join(words[:split_point]) + ' ' + burst + ' ' + ' '.join(words[split_point:]) + else: # 句末 + return text + ' ' + burst + +def generate_vocal_features(text: str): + """ + 在文本中随机添加声音特征标记,如等 + 支持中文和英文文本 + """ + feature = random.choice(vocal_features) + feature_start, feature_end = feature.split('><') + feature_start += '>' + feature_end = '<' + feature_end + + # 检查是否为中文文本 + has_chinese = any('\u4e00' <= char <= '\u9fff' for char in text) + + if has_chinese: + # 处理中文文本 + if len(text) <= 10: # 文本太短,整个加强 + return feature_start + text + feature_end + + # 对中文处理,随机选择一个字符范围 + text_len = len(text) + # 随机选择一个起始位置和一个范围长度 + start_pos = random.randint(1, max(1, text_len // 2)) # 避免总是从句首开始 + span_length = random.randint(1, min(5, text_len - start_pos)) + end_pos = start_pos + span_length - 1 + + # 在选定位置插入标记 + result = text[:start_pos] + feature_start + text[start_pos:end_pos+1] + feature_end + text[end_pos+1:] + return result + else: + # 处理英文文本 + words = text.split() + if len(words) <= 3: # 文本太短,整个加强 + return feature_start + text + feature_end + + # 随机选择一个词或短语来添加特征 + start_idx = random.randint(0, len(words) - 1) + span_length = random.randint(1, min(3, len(words) - start_idx)) # 最多3个词 + + result = [] + for i, word in enumerate(words): + if i == start_idx: + result.append(feature_start + word) + elif i == start_idx + span_length - 1: + result.append(word + feature_end) + else: + result.append(word) + + return ' '.join(result) + +def generate_mixed_instructions(text: str, language="zh"): + """ + 混合多种指令类型,可以同时包含情感、语速、方言、角色扮演等 + """ + instruction_generators = [] + + if language == "zh": + instruction_generators = [ + generate_in_emotion_in_chinese, + generate_speaking_rate_in_chinese, + generate_dialect_in_chinese, + generate_role_playing_in_chinese + ] + else: # 英文 + instruction_generators = [ + generate_in_emotion_in_english, + generate_speaking_rate_in_english, + generate_dialect_in_english, + generate_role_playing_in_english + ] + + # 随机选择1个generator + selected_generator = random.choice(instruction_generators) + + # 可能会添加声音特征 + text_with_features = text + if random.random() < 0.3: # 30%的概率添加声音特征 + text_with_features = generate_vocal_features(text) + + # 可能会添加声音爆发 + if random.random() < 0.2: # 20%的概率添加声音爆发 + text_with_features = generate_vocal_bursts(text_with_features) + + # 应用选择的指令生成器 + result = text_with_features + result = selected_generator(result) + + return result + +frontend = None +llm = None +cosyvoice = None +output_fp = None +prompts = None +global_device = None +processed_count = 0 +def initialize_process(model_dir,prompts_dir,output_dir,device): + current_process = multiprocessing.current_process() + file_name = f'{output_dir}/{current_process.pid}.jsonl' + global frontend,llm,cosyvoice,output_fp,prompts,global_device + global_device = device + output_fp = open(file_name, 'w') + print(f'Initializing process with device {device} and output file {file_name}') + frontend,llm,cosyvoice = init_process(model_dir,device) + prompts = preprocess_prompts(frontend,prompts_dir) + print(f'load prompts {prompts.keys()}') + return frontend,llm,cosyvoice + +def generate_speech_tokens(llm,frontend,tts_text,model_input,device): + tts_text = frontend.text_normalize(tts_text,split=False, text_frontend=True) + tts_text_token, tts_text_token_len = frontend._extract_text_token(tts_text) + tts_text_token_len = torch.tensor([tts_text_token.shape[1]], dtype=torch.int32).to(device) + prompt_text = model_input['prompt_text'].to(device) if 'prompt_text' in model_input else torch.zeros(1, 0, dtype=torch.int32).to(device) + prompt_text_len = torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(device) if prompt_text is not None else torch.zeros(1, 0, dtype=torch.int32).to(device) + llm_prompt_speech_token = model_input['llm_prompt_speech_token'].to(device) if 'llm_prompt_speech_token' in model_input else torch.zeros(1, 0, dtype=torch.int32).to(device) + prompt_speech_token_len = torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(device) if llm_prompt_speech_token is not None else None + flow_prompt_speech_token = model_input['flow_prompt_speech_token'].to(device) + prompt_speech_feat = model_input['prompt_speech_feat'].to(device) + llm_embedding = model_input['llm_embedding'].to(device) + flow_embedding = model_input['flow_embedding'].to(device) + speech_tokens = [] + with torch.no_grad(): + for i in llm.inference(text = tts_text_token, + text_len = tts_text_token_len, + prompt_text = prompt_text, + prompt_text_len = prompt_text_len, + prompt_speech_token = llm_prompt_speech_token, + prompt_speech_token_len = prompt_speech_token_len, + embedding=llm_embedding + ): + speech_tokens.append(i) + return speech_tokens + +def process_text(text,language): + global frontend,llm,cosyvoice,output_fp,prompts,processed_count,global_device + processed_count += 1 + if processed_count % 100 == 0: + print(f'Processed {processed_count} samples') + tts_text = text + splits_txt_by_lines = tts_text.split('\n') + #remove the sentences with length less than 10 + splits_txt_by_lines = [i.strip() for i in splits_txt_by_lines if len(i.strip()) > 10] + random.seed(time.time()) + model_input,prompt_text = random.choice(prompts[language]) + llm_prompt_speech_token = model_input['llm_prompt_speech_token'].cpu().tolist() + for tts_text in splits_txt_by_lines: + tts_speech_tokens = generate_speech_tokens(llm,frontend,tts_text,model_input,cosyvoice.device) + output_data = { + 'text': tts_text, + 'tts_speech_tokens': tts_speech_tokens, + 'prompt_text': prompt_text, + 'llm_prompt_speech_token': llm_prompt_speech_token[0] + } + output_fp.write(json.dumps(output_data,ensure_ascii=False)+'\n') + output_fp.flush() + return processed_count +def process_jsonl_file(jsonl_file,language,process_pool): + print(f'Processing {jsonl_file}...') + count = 0 + import json + with open(jsonl_file, 'r') as f: + for line in f: + line = line.strip() + if len(line) == 0: + continue + data = json.loads(line) + text = data['text'] + count += 1 + future = process_pool.submit(process_text,text,language) + print(f'processed {future.result()} requests') + print(f'Processed {count} samples from {jsonl_file}') + return count + +def process_parquet_file(parquet_file,language,process_pool): + print(f'Processing {parquet_file}...') + import pandas as pd + df = pd.read_parquet(parquet_file) + count = 0 + for i in range(len(df)): + text = df.iloc[i]['text'] + count += 1 + future = process_pool.submit(process_text,text,language) + print(f'processed {future.result()} requests') + print(f'Processed {count} samples from {parquet_file}') + return count + +def generate_speech_tokens_single_process(cosy_model_dir, prompts_dir, output_dir, language, jsonl_files=None, parquet_files=None, device="cuda:0",is_cross_lingual=False,is_instructed=False): + """ + 单进程单线程版本的语音标记生成函数 + """ + import torch + import json + import os + import random + import time + import traceback + import logging + import sys + from datetime import datetime + from data.cosy.data.data_processor import init_process, preprocess_prompts + + # 设置日志 + output_dir_lang = os.path.join(output_dir, language) + os.makedirs(output_dir_lang, exist_ok=True) + process_id = os.getpid() + log_file = os.path.join(output_dir_lang, f'process_{process_id}_log.txt') + + # 配置日志输出到文件和控制台 + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[ + logging.FileHandler(log_file), + logging.StreamHandler(sys.stdout) + ] + ) + logger = logging.getLogger(f'process_{process_id}') + + # 记录启动信息 + logger.info(f"='='='='='='='='='='='Instructed={is_instructed}'='='='='='='='='='='='='='='='='='") + logger.info(f"启动时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + logger.info(f"进程ID: {process_id}") + logger.info(f"设备: {device}") + logger.info(f"模型目录: {cosy_model_dir}") + logger.info(f"提示词目录: {prompts_dir}") + logger.info(f"输出目录: {output_dir_lang}") + if jsonl_files: + logger.info(f"JSONL文件: {jsonl_files}") + if parquet_files: + logger.info(f"Parquet文件: {parquet_files}") + logger.info(f"='='='='='='='='='='='='='='='='='='='='='='='='='='='='='") + + output_fp = None + frontend = None + llm = None + cosyvoice = None + total_processed = 0 + + try: + # 初始化模型 + logger.info(f'初始化模型,使用设备: {device}') + frontend, llm, cosyvoice = init_process(cosy_model_dir, device) + + # 预处理提示 + logger.info(f'开始预处理提示词') + prompts = preprocess_prompts(frontend, prompts_dir) + logger.info(f'加载提示完成: {prompts.keys()}') + + output_file = os.path.join(output_dir_lang, f'{process_id}.jsonl') + output_fp = open(output_file, 'w') + + # 处理函数 + def process_single_text(text): + try: + tts_text = text + splits_txt_by_lines = tts_text.split('\n') + # 删除长度小于10的句子 + splits_txt_by_lines = [i.strip() for i in splits_txt_by_lines if len(i.strip()) > 10] + + if not splits_txt_by_lines: + logger.warning(f"文本没有有效句子: '{text[:100]}...'") + return 0 + + random.seed(time.time()) + cross_linguals_map = { + 'zh': 'en', + 'en': 'zh' + } + try: + model_input, prompt_text = random.choice(prompts[language if not is_cross_lingual else cross_linguals_map[language]]) + except KeyError: + logger.error(f"语言 '{language}' 在提示词中不存在! 可用语言: {list(prompts.keys())}") + return 0 + + llm_prompt_speech_token = model_input['llm_prompt_speech_token'].cpu().tolist() if 'llm_prompt_speech_token' in model_input else [] + + processed_count = 0 + for tts_text in splits_txt_by_lines: + try: + if is_instructed: + tts_text = generate_mixed_instructions(tts_text, language) + prompt_text = "" + llm_prompt_speech_token[0]=[] + if 'prompt_text' in model_input: + del model_input['prompt_text'] + if 'prompt_text_len' in model_input: + del model_input['prompt_text_len'] + if 'llm_prompt_speech_token' in model_input: + del model_input['llm_prompt_speech_token'] + if 'llm_prompt_speech_token_len' in model_input: + del model_input['llm_prompt_speech_token_len'] + # 生成语音标记 + tts_speech_tokens = generate_speech_tokens(llm, frontend, tts_text, model_input, device) + output_data = { + 'text': tts_text, + 'tts_speech_tokens': tts_speech_tokens, + 'prompt_text': prompt_text, + 'llm_prompt_speech_token': llm_prompt_speech_token[0] + } + output_fp.write(json.dumps(output_data, ensure_ascii=False) + '\n') + output_fp.flush() + processed_count += 1 + except Exception as e: + logger.error(f"处理单个句子时出错: '{tts_text[:100]}...'") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + + return processed_count + except Exception as e: + logger.error(f"处理文本块时出错") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + return 0 + + # 收集要处理的文件 + files_to_process = [] + + # 处理JSONL文件 + if jsonl_files is not None: + logger.info(f"处理指定的JSONL文件") + for file in jsonl_files: + if file.endswith('.jsonl'): + files_to_process.append(('jsonl', file)) + logger.info(f"共有 {len([f for t, f in files_to_process if t == 'jsonl'])} 个JSONL文件需要处理") + + # 处理Parquet文件 + if parquet_files is not None: + logger.info(f"处理指定的Parquet文件") + for file in parquet_files: + if file.endswith('.parquet'): + files_to_process.append(('parquet', file)) + logger.info(f"共有 {len([f for t, f in files_to_process if t == 'parquet'])} 个Parquet文件需要处理") + + # 顺序处理所有文件 + for file_type, file_path in files_to_process: + logger.info(f'开始处理文件: {file_path}') + try: + if file_type == 'jsonl': + # 处理JSONL文件 + # 首先计算文件总行数,用于进度条 + total_lines = 0 + with open(file_path, 'r') as f: + for line in f: + if line.strip(): # 只计算非空行 + total_lines += 1 + + logger.info(f"JSONL文件 {file_path} 共有 {total_lines} 行") + # 使用进度条处理文件 + with open(file_path, 'r') as f: + from tqdm import tqdm + progress_bar = tqdm(total=total_lines, desc=f'处理JSONL文件: {os.path.basename(file_path)}') + file_processed = 0 + for line in f: + line = line.strip() + if len(line) == 0: + continue + try: + data = json.loads(line) + text = data['text'] + processed = process_single_text(text) + total_processed += processed + file_processed += processed + progress_bar.update(1) + progress_bar.set_postfix(total=total_processed) + except Exception as e: + logger.error(f"处理JSONL行时出错: {line[:100]}...") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + progress_bar.close() + logger.info(f"JSONL文件 {file_path} 完成处理,成功处理 {file_processed} 条记录") + + elif file_type == 'parquet': + # 处理Parquet文件 + try: + import pandas as pd + logger.info(f"加载Parquet文件: {file_path}") + df = pd.read_parquet(file_path) + logger.info(f"Parquet文件 {file_path} 共有 {len(df)} 行") + + from tqdm import tqdm + progress_bar = tqdm(total=len(df), desc=f'处理Parquet文件: {os.path.basename(file_path)}') + file_processed = 0 + for i in range(len(df)): + try: + text = df.iloc[i]['text'] + processed = process_single_text(text) + total_processed += processed + file_processed += processed + progress_bar.update(1) + progress_bar.set_postfix(total=total_processed) + except Exception as e: + logger.error(f"处理Parquet行 {i} 时出错") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + progress_bar.close() + logger.info(f"Parquet文件 {file_path} 完成处理,成功处理 {file_processed} 条记录") + except ImportError: + logger.error("处理Parquet文件需要pandas库,请安装: pip install pandas") + except Exception as e: + logger.error(f"处理Parquet文件 {file_path} 时出现错误") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + except Exception as e: + logger.error(f"处理文件 {file_path} 时出现错误") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + + logger.info(f'总共成功处理 {total_processed} 个样本,结果保存到 {output_file}') + + except Exception as e: + logger.error("处理过程中出现全局错误") + logger.error(f"错误信息: {str(e)}") + logger.error(traceback.format_exc()) + + finally: + # 确保资源正确关闭 + logger.info("清理资源...") + if output_fp is not None: + try: + output_fp.close() + logger.info(f"关闭输出文件") + except Exception as e: + logger.error(f"关闭输出文件时出错: {str(e)}") + + # 释放GPU资源 + if torch.cuda.is_available(): + try: + torch.cuda.empty_cache() + logger.info("已清理GPU缓存") + except Exception as e: + logger.error(f"清理GPU缓存时出错: {str(e)}") + + logger.info(f"处理结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + logger.info(f"='='='='='='='='='='='='='='='='='='='='='='='='='='='='='") + +if __name__ == '__main__': + import argparse + """ + Parse arguments + task: str, including 'extract_prompt' + input_tar_files: list of str, input tar files + input_tar_languages: list of str, input tar languages for each tar file, must be the same length as input_tar_files + max_duration: float, max duration of audio + num_samples: int, number of samples to extract + target_sr: int, target sample rate + output_dir: str, output directory + num_processes: int, number of processes to use + prompt_dir: str, prompt directory which contains prompt jsonl files and audio files + language: str, language, zh or en + cosy_model_dir: str, cosy model directory + device: str, cuda device used to extract speech tokens + jsonl_files: list of str, jsonl files + parquet_files: list of str, parquet files + """ + parser = argparse.ArgumentParser() + parser.add_argument('--task', type=str, help='task') + parser.add_argument('--input_tar_files', nargs='+', type=str, help='input tar files') + parser.add_argument('--input_tar_languages', nargs='+', type=str, help='input tar languages for each tar file') + parser.add_argument('--output_dir', type=str, help='output directory',required=True) + parser.add_argument('--max_duration', type=float, default=5, help='max duration of audio') + parser.add_argument('--num_samples', type=int, default=10, help='number of samples to extract') + parser.add_argument('--target_sr', type=int, default=16000, help='target sample rate') + parser.add_argument('--num_processes', type=int, default=1, help='number of processes to use') + parser.add_argument('--prompts_dir', type=str, help='prompt directory which contains prompt jsonl files and audio files') + parser.add_argument('--language', type=str, help='language') + parser.add_argument('--cosy_model_dir', type=str, help='cosy model directory') + parser.add_argument('--device', type=str, help='cuda device used to extract speech tokens') + parser.add_argument('--jsonl_files', nargs='+', type=str, help='jsonl files') + parser.add_argument('--parquet_files', nargs='+', type=str, help='parquet files') + parser.add_argument('--is_cross_lingual', action='store_true', help='is cross lingual') + parser.add_argument('--is_instructed', action='store_true', help='is instructed') + args = parser.parse_args() + task = args.task + if task == 'extract_prompt': + input_tar_files = args.input_tar_files + input_tar_languages = args.input_tar_languages + output_dir = args.output_dir + assert len(input_tar_files) == len(input_tar_languages), 'input_tar_files and input_tar_languages must have the same length' + extract_prompt(input_tar_files, input_tar_languages, args.max_duration, args.num_samples, args.target_sr, output_dir) + elif task == 'generate_speech_tokens': + prompts_dir = args.prompts_dir + language = args.language + cosy_model_dir = args.cosy_model_dir + jsonl_files = args.jsonl_files + parquet_files = args.parquet_files + device = args.device + is_cross_lingual = args.is_cross_lingual + is_instructed = args.is_instructed + # 使用单进程单线程版本替代多进程版本 + generate_speech_tokens_single_process( + cosy_model_dir=cosy_model_dir, + prompts_dir=prompts_dir, + output_dir=args.output_dir, + language=language, + jsonl_files=jsonl_files, + parquet_files=parquet_files, + device=device, + is_cross_lingual=is_cross_lingual, + is_instructed=is_instructed, + ) + diff --git a/eval/eval_seed_generate.py b/eval/eval_seed_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..0177bd7b3110db82d9f6bc48f4310f023fc41a51 --- /dev/null +++ b/eval/eval_seed_generate.py @@ -0,0 +1,66 @@ +#Download the evaluation file from:https://drive.google.com/file/d/1GlSjVfSHkW3-leKKBlfrjuuTGqQ_xaLP/edit +import os +voice_engine = None +def init_process_func(model_path,device): + global voice_engine + from cosyvoice.cli.cosyvoice import CosyVoice2 + voice_engine = CosyVoice2(model_path,device=device,fp16=False,load_jit=False) + print(f'Finish loading cosyvoice model from {model_path} in process {os.getpid()}') +def do_tts(ID,tts_text,prompt_text,prompt_audio_file,output_dir): + from cosyvoice.utils.file_utils import load_wav + import torchaudio + global voice_engine + try: + final_output_file = os.path.join(output_dir,f'{ID}.wav') + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + for output in voice_engine.inference_zero_shot(tts_text,prompt_text, prompt_speech_16k, stream=False,speed=1): + torchaudio.save(final_output_file, output['tts_speech'], voice_engine.sample_rate) + break # only save the first output + print(f'TTS {tts_text} and Save to {final_output_file} at process {os.getpid()}') + except Exception as e: + print(f'Error: {e}') + print(f'Error processing {ID} at process {os.getpid()}') + import traceback + traceback.print_exc() + return +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--eval_dir", type=str, default='eval_data/seedtts_testset') + parser.add_argument("--language", type=str, default='zh',choices=['zh','en']) + parser.add_argument("--model_path", type=str, default='/home/yueyulin/models/CosyVoice2-0.5B_RWKV_1.5B/') + parser.add_argument("--device", type=str, default='cuda:0') + parser.add_argument("--num_processes", type=int, default=2) + parser.add_argument("--output_dir", type=str, default='generated') + parser.add_argument("--list_file", type=str, default='meta.lst') + + + args = parser.parse_args() + print(args) + output_dir = os.path.join(args.eval_dir,args.language,args.output_dir) + #first delete the output_dir + if os.path.exists(output_dir): + import shutil + shutil.rmtree(output_dir) + os.makedirs(output_dir) + list_file = os.path.join(args.eval_dir,args.language,args.list_file) + with open(list_file) as f: + lines = f.readlines() + lines = [line.strip() for line in lines] + print(f'Processing {len(lines)} lines') + + from multiprocessing import Pool + from functools import partial + import time + with Pool(args.num_processes,init_process_func,(args.model_path,args.device)) as p: + for line in lines: + # 10002287-00000095|在此奉劝大家别乱打美白针。|prompt-wavs/10002287-00000094.wav|简单地说,这相当于惠普把消费领域市场拱手相让了。 + parts = line.split('|') + ID = parts[0] + tts_text = parts[3] + prompt_text = parts[1] + prompt_audio_file = os.path.join(args.eval_dir,args.language,parts[2]) + p.apply_async(do_tts,(ID,tts_text,prompt_text,prompt_audio_file,output_dir)) + p.close() + p.join() + print('All done') \ No newline at end of file diff --git a/gradio/tts_demo_page.py b/gradio/tts_demo_page.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed0394cc724200686bd7c38379e08dbc8c742dc --- /dev/null +++ b/gradio/tts_demo_page.py @@ -0,0 +1,81 @@ +import os +import tempfile +import torch +import torchaudio +import gradio as gr +from cosyvoice.cli.cosyvoice import CosyVoice2 +from cosyvoice.utils.file_utils import load_wav + +# 全局变量 +model_path = '/external_data/models/CosyVoice2-0.5B_RWKV_0.19B/' +device = 'cuda:0' if torch.cuda.is_available() else 'cpu' + +# 在应用启动时初始化模型(全局共享) +print("正在初始化 CosyVoice2 模型...") +cosyvoice = CosyVoice2(model_path, device=device, fp16=True) +# 预热模型 +cosyvoice.model.llm.dummy_forward() +print("模型初始化完成!") + +def synthesize_speech(audio_file, prompt_text, tts_text): + """合成语音""" + global cosyvoice + + if not audio_file or not prompt_text or not tts_text: + return None, "请提供所有必需的输入(提示音频、提示文本和要合成的文本)" + + try: + # 加载提示音频 + prompt_speech_16k = load_wav(audio_file, 16000) + + # 执行推理 + result = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=False) + + # 获取合成的语音 + output_speech = result[0]['tts_speech'] + + # 保存临时文件 + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav') + temp_file.close() + torchaudio.save(temp_file.name, output_speech, cosyvoice.sample_rate) + + return temp_file.name, f"语音合成成功!" + except Exception as e: + return None, f"合成过程中出错:{str(e)}" + +# 创建 Gradio 界面 +with gr.Blocks(title="RWKV TTS 演示") as demo: + gr.Markdown("# RWKV 语音合成演示") + gr.Markdown("### 语音合成系统已准备就绪,可直接使用") + + with gr.Row(): + with gr.Column(): + audio_input = gr.Audio(type="filepath", label="上传提示音频文件(WAV 格式)") + prompt_text = gr.Textbox(label="提示文本(与提示音频对应的文字内容)", placeholder="例如:今天天气挺不错的。") + tts_text = gr.Textbox(label="要合成的文本", placeholder="例如:收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。") + synthesize_button = gr.Button("生成语音") + + with gr.Column(): + audio_output = gr.Audio(label="合成的语音") + output_message = gr.Textbox(label="状态信息") + + synthesize_button.click( + fn=synthesize_speech, + inputs=[audio_input, prompt_text, tts_text], + outputs=[audio_output, output_message] + ) + + gr.Markdown(""" + ## 使用说明 + + 1. 上传一个WAV格式的提示音频文件 + 2. 输入与提示音频对应的文本内容 + 3. 输入希望合成的文本 + 4. 点击"生成语音"按钮进行语音合成 + + 注意:模型已在服务启动时预加载,所有用户共享同一个模型实例。 + """) + +# 启动应用 +if __name__ == "__main__": + demo.launch() \ No newline at end of file diff --git a/mine.wav b/mine.wav new file mode 100644 index 0000000000000000000000000000000000000000..a844b76cdae5adbd646cca8bf60809e5023537fa Binary files /dev/null and b/mine.wav differ diff --git a/new.mp3 b/new.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..08837018f22631cec23b2b9f02b583a672774e65 Binary files /dev/null and b/new.mp3 differ diff --git a/new.wav b/new.wav new file mode 100644 index 0000000000000000000000000000000000000000..471b99eeccc1f1c143f4c3c25f7573fad6cdffc6 --- /dev/null +++ b/new.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e62a130a15a7560ebf8c1bd73212a9d6410a50e595de9a809bc64775a4a6f07 +size 141964 diff --git a/run_multiple_process.sh b/run_multiple_process.sh new file mode 100644 index 0000000000000000000000000000000000000000..6315c4bef435d742881502a768753445a931572d --- /dev/null +++ b/run_multiple_process.sh @@ -0,0 +1,137 @@ +export PYTHONPATH=/home/yueyulin/github/CosyVoice:/home/yueyulin/github/CosyVoice/third_party/Matcha-TTS/:/home/yueyulin/github/RWKVTTS + +# 设置默认参数 +LANGUAGE="zh" +OUTPUT_DIR="/home/yueyulin/data/speech_corpus" +COSY_MODEL_DIR="/home/yueyulin/models/CosyVoice2-0.5B/" +PROMPTS_DIR="extract_data/prompts/zh" +DEVICE="cuda:0" +PARQUET_FILES=() +JSONL_FILES=() +FILE_TYPE="" # 用于标记文件类型 +is_cross_lingual="" +is_instructed="" + +# 解析命令行参数 +while [[ $# -gt 0 ]]; do + case $1 in + --language) + LANGUAGE="$2" + shift 2 + ;; + --output_dir) + OUTPUT_DIR="$2" + shift 2 + ;; + --cosy_model_dir) + COSY_MODEL_DIR="$2" + shift 2 + ;; + --prompts_dir) + PROMPTS_DIR="$2" + shift 2 + ;; + --parquet_files) + # 接收多个parquet文件路径 + shift + while [[ $# -gt 0 && ! $1 =~ ^-- ]]; do + PARQUET_FILES+=("$1") + shift + done + FILE_TYPE="parquet" + ;; + --jsonl_files) + # 接收多个jsonl文件路径 + shift + while [[ $# -gt 0 && ! $1 =~ ^-- ]]; do + JSONL_FILES+=("$1") + shift + done + FILE_TYPE="jsonl" + ;; + --device) + DEVICE="$2" + shift 2 + ;; + --cross_lingual) + is_cross_lingual="--is_cross_lingual" + shift + ;; + --instructed) + is_instructed="--is_instructed" + shift + ;; + *) + echo "未知参数: $1" + exit 1 + ;; + esac +done + +# 检查是否提供了文件 +if [ "$FILE_TYPE" == "parquet" ]; then + if [ ${#PARQUET_FILES[@]} -eq 0 ]; then + echo "错误: 未指定parquet文件,请使用 --parquet_files 参数" + exit 1 + fi + FILES=("${PARQUET_FILES[@]}") + FILE_ARG="--parquet_files" + echo "将处理 ${#FILES[@]} 个parquet文件" +elif [ "$FILE_TYPE" == "jsonl" ]; then + if [ ${#JSONL_FILES[@]} -eq 0 ]; then + echo "错误: 未指定jsonl文件,请使用 --jsonl_files 参数" + exit 1 + fi + FILES=("${JSONL_FILES[@]}") + FILE_ARG="--jsonl_files" + echo "将处理 ${#FILES[@]} 个jsonl文件" +else + echo "错误: 请使用 --parquet_files 或 --jsonl_files 参数指定输入文件" + exit 1 +fi + +echo "运行参数:" +echo "语言: $LANGUAGE" +echo "输出目录: $OUTPUT_DIR" +echo "模型目录: $COSY_MODEL_DIR" +echo "提示词目录: $PROMPTS_DIR" +echo "设备: $DEVICE" +echo "文件类型: $FILE_TYPE" + +# 确保输出目录存在 +mkdir -p $OUTPUT_DIR + +# 启动处理进程,每个文件一个进程 +for ((i=0; i<${#FILES[@]}; i++)); do + FILE="${FILES[$i]}" + FILENAME=$(basename "$FILE") + + echo "处理文件 $FILENAME 使用 $DEVICE" + + # 在后台启动进程 + nohup python data/utils/utilitie.py \ + --task generate_speech_tokens \ + --language $LANGUAGE \ + $is_cross_lingual \ + $FILE_ARG "$FILE" \ + --output_dir $OUTPUT_DIR \ + --cosy_model_dir $COSY_MODEL_DIR \ + --prompts_dir $PROMPTS_DIR \ + $is_instructed \ + --device "$DEVICE" > "$OUTPUT_DIR/log_${FILENAME%.*}.log" 2>&1 & + + # 记录进程ID + PID=$! + echo "启动进程 PID: $PID 处理文件: $FILENAME 使用 $DEVICE" + + # 等待一点时间确保进程启动 + sleep 5 +done + +echo "所有处理进程已启动,日志文件保存在 $OUTPUT_DIR 目录" +echo "使用 'ps aux | grep utilitie.py' 命令查看运行状态" +echo "使用 'nvidia-smi' 命令监控GPU使用情况" + +# 等待所有后台进程完成 +wait +echo "所有处理已完成" \ No newline at end of file diff --git a/rwkvtts_requirements.txt b/rwkvtts_requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ca3dec8c2e5eeb2d269d038f3d3b4da032d7bb5 --- /dev/null +++ b/rwkvtts_requirements.txt @@ -0,0 +1,264 @@ +absl-py==2.1.0 +aiofiles==23.2.1 +aiohappyeyeballs==2.4.8 +aiohttp==3.11.13 +aiosignal==1.3.2 +alembic==1.15.1 +altair==5.5.0 +annotated-types==0.7.0 +antlr4-python3-runtime==4.9.3 +anyio==4.8.0 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==3.0.0 +async-lru==2.0.4 +attrs==25.1.0 +audioread==3.0.1 +autopage==0.5.2 +babel==2.17.0 +beautifulsoup4==4.13.3 +bleach==6.2.0 +certifi==2025.1.31 +cffi==1.17.1 +cfgv==3.4.0 +charset-normalizer==3.4.1 +click==8.1.8 +cliff==4.9.1 +cmaes==0.11.1 +cmd2==2.5.11 +colorama==0.4.6 +coloredlogs==15.0.1 +colorlog==6.9.0 +comm==0.2.2 +conformer==0.3.2 +contourpy==1.3.1 +csvw==3.5.1 +cycler==0.12.1 +Cython==3.0.12 +datasets==3.3.2 +debugpy==1.8.13 +decorator==5.2.1 +deepspeed==0.16.4 +defusedxml==0.7.1 +diffusers==0.32.2 +dill==0.3.8 +distlib==0.3.9 +dlinfo==2.0.0 +einops==0.8.1 +executing==2.2.0 +fastapi==0.115.11 +fastjsonschema==2.21.1 +ffmpy==0.5.0 +filelock==3.17.0 +flatbuffers==25.2.10 +fonttools==4.56.0 +fqdn==1.5.1 +frozenlist==1.5.0 +fsspec==2024.12.0 +gdown==5.2.0 +gradio==3.43.2 +gradio_client==0.5.0 +greenlet==3.1.1 +grpcio==1.70.0 +h11==0.14.0 +hjson==3.1.0 +httpcore==1.0.7 +httpx==0.28.1 +huggingface-hub==0.29.1 +humanfriendly==10.0 +hydra-colorlog==1.2.0 +hydra-core==1.3.2 +hydra-optuna-sweeper==1.2.0 +HyperPyYAML==1.2.2 +identify==2.6.8 +idna==3.10 +importlib_metadata==8.6.1 +importlib_resources==6.5.2 +inflect==7.5.0 +iniconfig==2.0.0 +ipykernel==6.29.5 +ipython==9.0.1 +ipython_pygments_lexers==1.1.1 +ipywidgets==8.1.5 +isodate==0.7.2 +isoduration==20.11.0 +jedi==0.19.2 +Jinja2==3.1.5 +joblib==1.4.2 +json5==0.10.0 +jsonpointer==3.0.0 +jsonschema==4.23.0 +jsonschema-specifications==2024.10.1 +jupyter-events==0.12.0 +jupyter-lsp==2.2.5 +jupyter_client==8.6.3 +jupyter_core==5.7.2 +jupyter_server==2.15.0 +jupyter_server_terminals==0.5.3 +jupyterlab==4.3.5 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.27.3 +jupyterlab_widgets==3.0.13 +kiwisolver==1.4.8 +language-tags==1.2.0 +lazy_loader==0.4 +librosa==0.10.2.post1 +lightning==2.5.0.post0 +lightning-utilities==0.13.1 +llvmlite==0.44.0 +Mako==1.3.9 +Markdown==3.7 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +matcha-tts==0.0.7.2 +matplotlib==3.10.1 +matplotlib-inline==0.1.7 +mdurl==0.1.2 +mistune==3.1.2 +modelscope==1.23.2 +more-itertools==10.6.0 +mpmath==1.3.0 +msgpack==1.1.0 +multidict==6.1.0 +multiprocess==0.70.16 +narwhals==1.29.0 +nbclient==0.10.2 +nbconvert==7.16.6 +nbformat==5.10.4 +nest-asyncio==1.6.0 +networkx==3.4.2 +ninja==1.11.1.3 +nodeenv==1.9.1 +notebook==7.3.2 +notebook_shim==0.2.4 +numba==0.61.0 +numpy==1.26.4 +nvidia-cublas-cu12==12.4.5.8 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.2.1.3 +nvidia-curand-cu12==10.3.5.147 +nvidia-cusolver-cu12==11.6.1.9 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-cusparselt-cu12==0.6.2 +nvidia-nccl-cu12==2.21.5 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvtx-cu12==12.4.127 +omegaconf==2.3.0 +onnx==1.17.0 +onnxruntime-gpu==1.20.1 +openai-whisper==20240930 +optuna==2.10.1 +orjson==3.10.15 +overrides==7.7.0 +packaging==24.2 +pandas==2.2.3 +pandocfilters==1.5.1 +parso==0.8.4 +pbr==6.1.1 +pexpect==4.9.0 +phonemizer==3.3.0 +pillow==10.4.0 +platformdirs==4.3.6 +pluggy==1.5.0 +pooch==1.8.2 +pre_commit==4.1.0 +prettytable==3.15.1 +prometheus_client==0.21.1 +prompt_toolkit==3.0.50 +propcache==0.3.0 +protobuf==6.30.0 +psutil==7.0.0 +ptyprocess==0.7.0 +pure_eval==0.2.3 +py-cpuinfo==9.0.0 +pyarrow==19.0.1 +pycparser==2.22 +pydantic==2.10.6 +pydantic_core==2.27.2 +pydub==0.25.1 +Pygments==2.19.1 +pyparsing==3.2.1 +pyperclip==1.9.0 +PySocks==1.7.1 +pytest==8.3.5 +python-dateutil==2.9.0.post0 +python-dotenv==1.0.1 +python-json-logger==3.2.1 +python-multipart==0.0.20 +pytorch-lightning==2.5.0.post0 +pytz==2025.1 +pyworld==0.3.5 +PyYAML==6.0.2 +pyzmq==26.2.1 +rdflib==7.1.3 +referencing==0.36.2 +regex==2024.11.6 +requests==2.32.3 +rfc3339-validator==0.1.4 +rfc3986==1.5.0 +rfc3986-validator==0.1.1 +rich==13.9.4 +rootutils==1.0.7 +rpds-py==0.23.1 +ruamel.yaml==0.18.10 +ruamel.yaml.clib==0.2.12 +rwkv-fla==0.7.202503020902 +safetensors==0.5.3 +scikit-learn==1.6.1 +scipy==1.15.2 +seaborn==0.13.2 +segments==2.3.0 +semantic-version==2.10.0 +Send2Trash==1.8.3 +six==1.17.0 +sniffio==1.3.1 +soundfile==0.13.1 +soupsieve==2.6 +soxr==0.5.0.post1 +SQLAlchemy==2.0.38 +stack-data==0.6.3 +starlette==0.46.0 +stevedore==5.4.1 +sympy==1.13.1 +tensorboard==2.19.0 +tensorboard-data-server==0.7.2 +terminado==0.18.1 +threadpoolctl==3.5.0 +tiktoken==0.9.0 +tinycss2==1.4.0 +tokenizers==0.21.0 +torch==2.6.0 +torchaudio==2.6.0 +torchmetrics==1.6.2 +torchvision==0.21.0 +tornado==6.4.2 +tqdm==4.67.1 +traitlets==5.14.3 +transformers==4.49.0 +triton==3.2.0 +typeguard==4.4.2 +types-python-dateutil==2.9.0.20241206 +typing_extensions==4.12.2 +tzdata==2025.1 +Unidecode==1.3.8 +uri-template==1.3.0 +uritemplate==4.1.1 +urllib3==2.3.0 +uvicorn==0.34.0 +virtualenv==20.29.2 +wcwidth==0.2.13 +webcolors==24.11.1 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==11.0.3 +Werkzeug==3.1.3 +WeTextProcessing==1.0.4.1 +wget==3.2 +widgetsnbextension==4.0.13 +xxhash==3.5.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/third_party/cosyvoice/dataset/processor.py b/third_party/cosyvoice/dataset/processor.py new file mode 100644 index 0000000000000000000000000000000000000000..0535268e127aad38dba1ccef11d9430d81139998 --- /dev/null +++ b/third_party/cosyvoice/dataset/processor.py @@ -0,0 +1,435 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import random + +import pyarrow.parquet as pq +from io import BytesIO +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence +import torch.nn.functional as F +import pyworld as pw + + +AUDIO_FORMAT_SETS = {'flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'} + + +def parquet_opener(data, mode='train', tts_data={}): + """ Give url or local file, return file descriptor + Inplace operation. + + Args: + data(Iterable[str]): url or local file list + + Returns: + Iterable[{src, stream}] + """ + for sample in data: + assert 'src' in sample + url = sample['src'] + try: + for df in pq.ParquetFile(url).iter_batches(batch_size=64): + df = df.to_pandas() + for i in range(len(df)): + if mode == 'inference' and df.loc[i, 'utt'] not in tts_data: + continue + sample.update(dict(df.loc[i])) + if mode == 'train': + # NOTE do not return sample directly, must initialize a new dict + yield {**sample} + else: + for index, text in enumerate(tts_data[df.loc[i, 'utt']]): + yield {**sample, 'tts_index': index, 'tts_text': text} + except Exception as ex: + logging.warning('Failed to open {}, ex info {}'.format(url, ex)) + + +def filter(data, + max_length=10240, + min_length=10, + token_max_length=200, + token_min_length=1, + min_output_input_ratio=0.0005, + max_output_input_ratio=1, + mode='train'): + """ Filter sample according to feature and label length + Inplace operation. + + Args:: + data: Iterable[{key, wav, label, sample_rate}] + max_length: drop utterance which is greater than max_length(10ms) + min_length: drop utterance which is less than min_length(10ms) + token_max_length: drop utterance which is greater than + token_max_length, especially when use char unit for + english modeling + token_min_length: drop utterance which is + less than token_max_length + min_output_input_ratio: minimal ration of + token_length / feats_length(10ms) + max_output_input_ratio: maximum ration of + token_length / feats_length(10ms) + + Returns: + Iterable[{key, wav, label, sample_rate}] + """ + for sample in data: + sample['speech'], sample['sample_rate'] = torchaudio.load(BytesIO(sample['audio_data'])) + sample['speech'] = sample['speech'].mean(dim=0, keepdim=True) + del sample['audio_data'] + # sample['wav'] is torch.Tensor, we have 100 frames every second + num_frames = sample['speech'].size(1) / sample['sample_rate'] * 100 + if num_frames < min_length: + continue + if num_frames > max_length: + continue + if len(sample['text_token']) < token_min_length: + continue + if len(sample['text_token']) > token_max_length: + continue + if len(sample['speech_token']) == 0: + continue + if num_frames != 0: + if len(sample['text_token']) / num_frames < min_output_input_ratio: + continue + if len(sample['text_token']) / num_frames > max_output_input_ratio: + continue + yield sample + + +def resample(data, resample_rate=22050, min_sample_rate=16000, mode='train'): + """ Resample data. + Inplace operation. + + Args: + data: Iterable[{key, wav, label, sample_rate}] + resample_rate: target resample rate + + Returns: + Iterable[{key, wav, label, sample_rate}] + """ + for sample in data: + assert 'sample_rate' in sample + assert 'speech' in sample + sample_rate = sample['sample_rate'] + waveform = sample['speech'] + if sample_rate != resample_rate: + if sample_rate < min_sample_rate: + continue + sample['sample_rate'] = resample_rate + sample['speech'] = torchaudio.transforms.Resample( + orig_freq=sample_rate, new_freq=resample_rate)(waveform) + max_val = sample['speech'].abs().max() + if max_val > 1: + sample['speech'] /= max_val + yield sample + + +def truncate(data, truncate_length=24576, mode='train'): + """ Truncate data. + + Args: + data: Iterable[{key, wav, label, sample_rate}] + truncate_length: truncate length + + Returns: + Iterable[{key, wav, label, sample_rate}] + """ + for sample in data: + waveform = sample['speech'] + if waveform.shape[1] > truncate_length: + start = random.randint(0, waveform.shape[1] - truncate_length) + waveform = waveform[:, start: start + truncate_length] + else: + waveform = torch.concat([waveform, torch.zeros(1, truncate_length - waveform.shape[1])], dim=1) + sample['speech'] = waveform + yield sample + + +def compute_fbank(data, + feat_extractor, + mode='train'): + """ Extract fbank + + Args: + data: Iterable[{key, wav, label, sample_rate}] + + Returns: + Iterable[{key, feat, label}] + """ + for sample in data: + assert 'sample_rate' in sample + assert 'speech' in sample + assert 'utt' in sample + assert 'text_token' in sample + waveform = sample['speech'] + mat = feat_extractor(waveform).squeeze(dim=0).transpose(0, 1) + sample['speech_feat'] = mat + yield sample + + +def compute_f0(data, sample_rate, hop_size, mode='train'): + """ Extract f0 + + Args: + data: Iterable[{key, wav, label, sample_rate}] + + Returns: + Iterable[{key, feat, label}] + """ + frame_period = hop_size * 1000 / sample_rate + for sample in data: + assert 'sample_rate' in sample + assert 'speech' in sample + assert 'utt' in sample + assert 'text_token' in sample + waveform = sample['speech'] + _f0, t = pw.harvest(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period) + if sum(_f0 != 0) < 5: # this happens when the algorithm fails + _f0, t = pw.dio(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period) # if harvest fails, try dio + f0 = pw.stonemask(waveform.squeeze(dim=0).numpy().astype('double'), _f0, t, sample_rate) + f0 = F.interpolate(torch.from_numpy(f0).view(1, 1, -1), size=sample['speech_feat'].shape[0], mode='linear').view(-1) + sample['pitch_feat'] = f0 + yield sample + + +def parse_embedding(data, normalize, mode='train'): + """ Parse utt_embedding/spk_embedding + + Args: + data: Iterable[{key, wav, label, sample_rate}] + + Returns: + Iterable[{key, feat, label}] + """ + for sample in data: + sample['utt_embedding'] = torch.tensor(sample['utt_embedding'], dtype=torch.float32) + sample['spk_embedding'] = torch.tensor(sample['spk_embedding'], dtype=torch.float32) + if normalize: + sample['utt_embedding'] = F.normalize(sample['utt_embedding'], dim=0) + sample['spk_embedding'] = F.normalize(sample['spk_embedding'], dim=0) + yield sample + + +def tokenize(data, get_tokenizer, allowed_special, mode='train'): + """ Decode text to chars or BPE + Inplace operation + + Args: + data: Iterable[{key, wav, txt, sample_rate}] + + Returns: + Iterable[{key, wav, txt, tokens, label, sample_rate}] + """ + tokenizer = get_tokenizer() + for sample in data: + assert 'text' in sample + sample['text_token'] = tokenizer.encode(sample['text'], allowed_special=allowed_special) + if mode == 'inference': + sample['tts_text_token'] = tokenizer.encode(sample['tts_text'], allowed_special=allowed_special) + yield sample + + +def shuffle(data, shuffle_size=10000, mode='train'): + """ Local shuffle the data + + Args: + data: Iterable[{key, feat, label}] + shuffle_size: buffer size for shuffle + + Returns: + Iterable[{key, feat, label}] + """ + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= shuffle_size: + random.shuffle(buf) + for x in buf: + yield x + buf = [] + # The sample left over + random.shuffle(buf) + for x in buf: + yield x + + +def sort(data, sort_size=500, mode='train'): + """ Sort the data by feature length. + Sort is used after shuffle and before batch, so we can group + utts with similar lengths into a batch, and `sort_size` should + be less than `shuffle_size` + + Args: + data: Iterable[{key, feat, label}] + sort_size: buffer size for sort + + Returns: + Iterable[{key, feat, label}] + """ + + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= sort_size: + buf.sort(key=lambda x: x['speech_feat'].size(0)) + for x in buf: + yield x + buf = [] + # The sample left over + buf.sort(key=lambda x: x['speech_feat'].size(0)) + for x in buf: + yield x + + +def static_batch(data, batch_size=16): + """ Static batch the data by `batch_size` + + Args: + data: Iterable[{key, feat, label}] + batch_size: batch size + + Returns: + Iterable[List[{key, feat, label}]] + """ + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= batch_size: + yield buf + buf = [] + if len(buf) > 0: + yield buf + + +def dynamic_batch(data, max_frames_in_batch=12000, mode='train'): + """ Dynamic batch the data until the total frames in batch + reach `max_frames_in_batch` + + Args: + data: Iterable[{key, feat, label}] + max_frames_in_batch: max_frames in one batch + + Returns: + Iterable[List[{key, feat, label}]] + """ + buf = [] + longest_frames = 0 + for sample in data: + assert 'speech_feat' in sample + assert isinstance(sample['speech_feat'], torch.Tensor) + new_sample_frames = sample['speech_feat'].size(0) + longest_frames = max(longest_frames, new_sample_frames) + frames_after_padding = longest_frames * (len(buf) + 1) + if frames_after_padding > max_frames_in_batch: + yield buf + buf = [sample] + longest_frames = new_sample_frames + else: + buf.append(sample) + if len(buf) > 0: + yield buf + + +def batch(data, batch_type='static', batch_size=16, max_frames_in_batch=12000, mode='train'): + """ Wrapper for static/dynamic batch + """ + if mode == 'inference': + return static_batch(data, 1) + else: + if batch_type == 'static': + return static_batch(data, batch_size) + elif batch_type == 'dynamic': + return dynamic_batch(data, max_frames_in_batch) + else: + logging.fatal('Unsupported batch type {}'.format(batch_type)) + + +def padding(data, use_spk_embedding, mode='train', gan=False): + """ Padding the data into training data + + Args: + data: Iterable[List[{key, feat, label}]] + + Returns: + Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)] + """ + for sample in data: + assert isinstance(sample, list) + speech_feat_len = torch.tensor([x['speech_feat'].size(1) for x in sample], + dtype=torch.int32) + order = torch.argsort(speech_feat_len, descending=True) + + utts = [sample[i]['utt'] for i in order] + speech = [sample[i]['speech'].squeeze(dim=0) for i in order] + speech_len = torch.tensor([i.size(0) for i in speech], dtype=torch.int32) + speech = pad_sequence(speech, batch_first=True, padding_value=0) + speech_token = [torch.tensor(sample[i]['speech_token']) for i in order] + speech_token_len = torch.tensor([i.size(0) for i in speech_token], dtype=torch.int32) + speech_token = pad_sequence(speech_token, + batch_first=True, + padding_value=0) + speech_feat = [sample[i]['speech_feat'] for i in order] + speech_feat_len = torch.tensor([i.size(0) for i in speech_feat], dtype=torch.int32) + speech_feat = pad_sequence(speech_feat, + batch_first=True, + padding_value=0) + text = [sample[i]['text'] for i in order] + text_token = [torch.tensor(sample[i]['text_token']) for i in order] + text_token_len = torch.tensor([i.size(0) for i in text_token], dtype=torch.int32) + text_token = pad_sequence(text_token, batch_first=True, padding_value=0) + utt_embedding = torch.stack([sample[i]['utt_embedding'] for i in order], dim=0) + spk_embedding = torch.stack([sample[i]['spk_embedding'] for i in order], dim=0) + batch = { + "utts": utts, + "speech": speech, + "speech_len": speech_len, + "speech_token": speech_token, + "speech_token_len": speech_token_len, + "speech_feat": speech_feat, + "speech_feat_len": speech_feat_len, + "text": text, + "text_token": text_token, + "text_token_len": text_token_len, + "utt_embedding": utt_embedding, + "spk_embedding": spk_embedding, + } + if gan is True: + # in gan train, we need pitch_feat + pitch_feat = [sample[i]['pitch_feat'] for i in order] + pitch_feat_len = torch.tensor([i.size(0) for i in pitch_feat], dtype=torch.int32) + pitch_feat = pad_sequence(pitch_feat, + batch_first=True, + padding_value=0) + batch["pitch_feat"] = pitch_feat + batch["pitch_feat_len"] = pitch_feat_len + else: + # only gan train needs speech, delete it to save memory + del batch["speech"] + del batch["speech_len"] + if mode == 'inference': + tts_text = [sample[i]['tts_text'] for i in order] + tts_index = [sample[i]['tts_index'] for i in order] + tts_text_token = [torch.tensor(sample[i]['tts_text_token']) for i in order] + tts_text_token_len = torch.tensor([i.size(0) for i in tts_text_token], dtype=torch.int32) + tts_text_token = pad_sequence(tts_text_token, batch_first=True, padding_value=-1) + batch.update({'tts_text': tts_text, + 'tts_index': tts_index, + 'tts_text_token': tts_text_token, + 'tts_text_token_len': tts_text_token_len}) + if use_spk_embedding is True: + batch["embedding"] = batch["spk_embedding"] + else: + batch["embedding"] = batch["utt_embedding"] + yield batch \ No newline at end of file diff --git a/third_party/cosyvoice/flow/decoder.py b/third_party/cosyvoice/flow/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..420a1bfc738b368d0f6c5b4d750cedba3263ce1d --- /dev/null +++ b/third_party/cosyvoice/flow/decoder.py @@ -0,0 +1,301 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import pack, rearrange, repeat +from cosyvoice.utils.common import mask_to_bias +from cosyvoice.utils.mask import add_optional_chunk_mask +from matcha.models.components.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, TimestepEmbedding, Upsample1D +from matcha.models.components.transformer import BasicTransformerBlock + + +class Transpose(torch.nn.Module): + def __init__(self, dim0: int, dim1: int): + super().__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x: torch.Tensor): + x = torch.transpose(x, self.dim0, self.dim1) + return x + + +class CausalBlock1D(Block1D): + def __init__(self, dim: int, dim_out: int): + super(CausalBlock1D, self).__init__(dim, dim_out) + self.block = torch.nn.Sequential( + CausalConv1d(dim, dim_out, 3), + Transpose(1, 2), + nn.LayerNorm(dim_out), + Transpose(1, 2), + nn.Mish(), + ) + + def forward(self, x: torch.Tensor, mask: torch.Tensor): + output = self.block(x * mask) + return output * mask + + +class CausalResnetBlock1D(ResnetBlock1D): + def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8): + super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups) + self.block1 = CausalBlock1D(dim, dim_out) + self.block2 = CausalBlock1D(dim_out, dim_out) + + +class CausalConv1d(torch.nn.Conv1d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = 'zeros', + device=None, + dtype=None + ) -> None: + super(CausalConv1d, self).__init__(in_channels, out_channels, + kernel_size, stride, + padding=0, dilation=dilation, + groups=groups, bias=bias, + padding_mode=padding_mode, + device=device, dtype=dtype) + assert stride == 1 + self.causal_padding = (kernel_size - 1, 0) + + def forward(self, x: torch.Tensor): + x = F.pad(x, self.causal_padding) + x = super(CausalConv1d, self).forward(x) + return x + + +class ConditionalDecoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + causal=False, + channels=(256, 256), + dropout=0.05, + attention_head_dim=64, + n_blocks=1, + num_mid_blocks=2, + num_heads=4, + act_fn="snake", + ): + """ + This decoder requires an input with the same shape of the target. So, if your text content + is shorter or longer than the outputs, please re-sampling it before feeding to the decoder. + """ + super().__init__() + channels = tuple(channels) + self.in_channels = in_channels + self.out_channels = out_channels + self.causal = causal + self.time_embeddings = SinusoidalPosEmb(in_channels) + time_embed_dim = channels[0] * 4 + self.time_mlp = TimestepEmbedding( + in_channels=in_channels, + time_embed_dim=time_embed_dim, + act_fn="silu", + ) + self.down_blocks = nn.ModuleList([]) + self.mid_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + output_channel = in_channels + for i in range(len(channels)): # pylint: disable=consider-using-enumerate + input_channel = output_channel + output_channel = channels[i] + is_last = i == len(channels) - 1 + resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \ + ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + downsample = ( + Downsample1D(output_channel) if not is_last else + CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample])) + + for _ in range(num_mid_blocks): + input_channel = channels[-1] + out_channels = channels[-1] + resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \ + ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + + self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks])) + + channels = channels[::-1] + (channels[0],) + for i in range(len(channels) - 1): + input_channel = channels[i] * 2 + output_channel = channels[i + 1] + is_last = i == len(channels) - 2 + resnet = CausalResnetBlock1D( + dim=input_channel, + dim_out=output_channel, + time_emb_dim=time_embed_dim, + ) if self.causal else ResnetBlock1D( + dim=input_channel, + dim_out=output_channel, + time_emb_dim=time_embed_dim, + ) + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + upsample = ( + Upsample1D(output_channel, use_conv_transpose=True) + if not is_last + else CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample])) + self.final_block = CausalBlock1D(channels[-1], channels[-1]) if self.causal else Block1D(channels[-1], channels[-1]) + self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1) + self.initialize_weights() + + def initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, mask, mu, t, spks=None, cond=None): + """Forward pass of the UNet1DConditional model. + + Args: + x (torch.Tensor): shape (batch_size, in_channels, time) + mask (_type_): shape (batch_size, 1, time) + t (_type_): shape (batch_size) + spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None. + cond (_type_, optional): placeholder for future use. Defaults to None. + + Raises: + ValueError: _description_ + ValueError: _description_ + + Returns: + _type_: _description_ + """ + + t = self.time_embeddings(t).to(t.dtype) + t = self.time_mlp(t) + + x = pack([x, mu], "b * t")[0] + + if spks is not None: + spks = repeat(spks, "b c -> b c t", t=x.shape[-1]) + x = pack([x, spks], "b * t")[0] + if cond is not None: + x = pack([x, cond], "b * t")[0] + + hiddens = [] + masks = [mask] + for resnet, transformer_blocks, downsample in self.down_blocks: + mask_down = masks[-1] + x = resnet(x, mask_down, t) + x = rearrange(x, "b c t -> b t c").contiguous() + # attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down) + attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1) + attn_mask = mask_to_bias(attn_mask == 1, x.dtype) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + hiddens.append(x) # Save hidden states for skip connections + x = downsample(x * mask_down) + masks.append(mask_down[:, :, ::2]) + masks = masks[:-1] + mask_mid = masks[-1] + + for resnet, transformer_blocks in self.mid_blocks: + x = resnet(x, mask_mid, t) + x = rearrange(x, "b c t -> b t c").contiguous() + # attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid) + attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1) + attn_mask = mask_to_bias(attn_mask == 1, x.dtype) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + + for resnet, transformer_blocks, upsample in self.up_blocks: + mask_up = masks.pop() + skip = hiddens.pop() + x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0] + x = resnet(x, mask_up, t) + x = rearrange(x, "b c t -> b t c").contiguous() + # attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up) + attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1) + attn_mask = mask_to_bias(attn_mask == 1, x.dtype) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + x = upsample(x * mask_up) + x = self.final_block(x, mask_up) + output = self.final_proj(x * mask_up) + return output * mask diff --git a/third_party/cosyvoice/flow/flow.py b/third_party/cosyvoice/flow/flow.py new file mode 100644 index 0000000000000000000000000000000000000000..72bb34cbb1a0bbece1d03864783c0c19ec9edde1 --- /dev/null +++ b/third_party/cosyvoice/flow/flow.py @@ -0,0 +1,239 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import random +from typing import Dict, Optional +import torch +import torch.nn as nn +from torch.nn import functional as F +from omegaconf import DictConfig +from cosyvoice.utils.mask import make_pad_mask + + +class MaskedDiffWithXvec(torch.nn.Module): + def __init__(self, + input_size: int = 512, + output_size: int = 80, + spk_embed_dim: int = 192, + output_type: str = "mel", + vocab_size: int = 4096, + input_frame_rate: int = 50, + only_mask_loss: bool = True, + encoder: torch.nn.Module = None, + length_regulator: torch.nn.Module = None, + decoder: torch.nn.Module = None, + decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1, + 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine', + 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}), + 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64, + 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}}, + mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050, + 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}): + super().__init__() + self.input_size = input_size + self.output_size = output_size + self.decoder_conf = decoder_conf + self.mel_feat_conf = mel_feat_conf + self.vocab_size = vocab_size + self.output_type = output_type + self.input_frame_rate = input_frame_rate + logging.info(f"input frame rate={self.input_frame_rate}") + self.input_embedding = nn.Embedding(vocab_size, input_size) + self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size) + self.encoder = encoder + self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size) + self.decoder = decoder + self.length_regulator = length_regulator + self.only_mask_loss = only_mask_loss + + def forward( + self, + batch: dict, + device: torch.device, + ) -> Dict[str, Optional[torch.Tensor]]: + token = batch['speech_token'].to(device) + token_len = batch['speech_token_len'].to(device) + feat = batch['speech_feat'].to(device) + feat_len = batch['speech_feat_len'].to(device) + embedding = batch['embedding'].to(device) + + # xvec projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + + # concat text and prompt_text + mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device) + token = self.input_embedding(torch.clamp(token, min=0)) * mask + + # text encode + h, h_lengths = self.encoder(token, token_len) + h = self.encoder_proj(h) + h, h_lengths = self.length_regulator(h, feat_len) + + # get conditions + conds = torch.zeros(feat.shape, device=token.device) + for i, j in enumerate(feat_len): + if random.random() < 0.5: + continue + index = random.randint(0, int(0.3 * j)) + conds[i, :index] = feat[i, :index] + conds = conds.transpose(1, 2) + + mask = (~make_pad_mask(feat_len)).to(h) + feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1) + loss, _ = self.decoder.compute_loss( + feat.transpose(1, 2).contiguous(), + mask.unsqueeze(1), + h.transpose(1, 2).contiguous(), + embedding, + cond=conds + ) + return {'loss': loss} + + @torch.inference_mode() + def inference(self, + token, + token_len, + prompt_token, + prompt_token_len, + prompt_feat, + prompt_feat_len, + embedding, + flow_cache): + if self.fp16 is True: + prompt_feat = prompt_feat.half() + embedding = embedding.half() + + assert token.shape[0] == 1 + # xvec projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + + # concat text and prompt_text + token_len1, token_len2 = prompt_token.shape[1], token.shape[1] + token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len + mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding) + token = self.input_embedding(torch.clamp(token, min=0)) * mask + + # text encode + h, h_lengths = self.encoder(token, token_len) + h = self.encoder_proj(h) + mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256) + h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate) + + # get conditions + conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype) + conds[:, :mel_len1] = prompt_feat + conds = conds.transpose(1, 2) + + mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h) + feat, flow_cache = self.decoder( + mu=h.transpose(1, 2).contiguous(), + mask=mask.unsqueeze(1), + spks=embedding, + cond=conds, + n_timesteps=10, + prompt_len=mel_len1, + flow_cache=flow_cache + ) + feat = feat[:, :, mel_len1:] + assert feat.shape[2] == mel_len2 + return feat.float(), flow_cache + + +class CausalMaskedDiffWithXvec(torch.nn.Module): + def __init__(self, + input_size: int = 512, + output_size: int = 80, + spk_embed_dim: int = 192, + output_type: str = "mel", + vocab_size: int = 4096, + input_frame_rate: int = 50, + only_mask_loss: bool = True, + token_mel_ratio: int = 2, + pre_lookahead_len: int = 3, + encoder: torch.nn.Module = None, + decoder: torch.nn.Module = None, + decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1, + 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine', + 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}), + 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64, + 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}}, + mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050, + 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}): + super().__init__() + self.input_size = input_size + self.output_size = output_size + self.decoder_conf = decoder_conf + self.mel_feat_conf = mel_feat_conf + self.vocab_size = vocab_size + self.output_type = output_type + self.input_frame_rate = input_frame_rate + logging.info(f"input frame rate={self.input_frame_rate}") + self.input_embedding = nn.Embedding(vocab_size, input_size) + self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size) + self.encoder = encoder + self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size) + self.decoder = decoder + self.only_mask_loss = only_mask_loss + self.token_mel_ratio = token_mel_ratio + self.pre_lookahead_len = pre_lookahead_len + + @torch.inference_mode() + def inference(self, + token, + token_len, + prompt_token, + prompt_token_len, + prompt_feat, + prompt_feat_len, + embedding, + finalize): + if self.fp16 is True: + prompt_feat = prompt_feat.half() + embedding = embedding.half() + + assert token.shape[0] == 1 + # xvec projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + + # concat text and prompt_text + token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len + mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding) + token = self.input_embedding(torch.clamp(token, min=0)) * mask + + # text encode + h, h_lengths = self.encoder(token, token_len) + if finalize is False: + h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio] + mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1] + h = self.encoder_proj(h) + + # get conditions + conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype) + conds[:, :mel_len1] = prompt_feat + conds = conds.transpose(1, 2) + + mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h) + feat, _ = self.decoder( + mu=h.transpose(1, 2).contiguous(), + mask=mask.unsqueeze(1), + spks=embedding, + cond=conds, + n_timesteps=10 + ) + feat = feat[:, :, mel_len1:] + assert feat.shape[2] == mel_len2 + return feat.float(), None diff --git a/third_party/cosyvoice/flow/flow_matching.py b/third_party/cosyvoice/flow/flow_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..6a60f6d4562194c9b1c67f9832b0895cf5a328b7 --- /dev/null +++ b/third_party/cosyvoice/flow/flow_matching.py @@ -0,0 +1,217 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import threading +import torch +import torch.nn.functional as F +from matcha.models.components.flow_matching import BASECFM + + +class ConditionalCFM(BASECFM): + def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None): + super().__init__( + n_feats=in_channels, + cfm_params=cfm_params, + n_spks=n_spks, + spk_emb_dim=spk_emb_dim, + ) + self.t_scheduler = cfm_params.t_scheduler + self.training_cfg_rate = cfm_params.training_cfg_rate + self.inference_cfg_rate = cfm_params.inference_cfg_rate + in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0) + # Just change the architecture of the estimator here + self.estimator = estimator + self.lock = threading.Lock() + + @torch.inference_mode() + def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)): + """Forward diffusion + + Args: + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + n_timesteps (int): number of diffusion steps + temperature (float, optional): temperature for scaling noise. Defaults to 1.0. + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + + Returns: + sample: generated mel-spectrogram + shape: (batch_size, n_feats, mel_timesteps) + """ + + z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature + cache_size = flow_cache.shape[2] + # fix prompt and overlap part mu and z + if cache_size != 0: + z[:, :, :cache_size] = flow_cache[:, :, :, 0] + mu[:, :, :cache_size] = flow_cache[:, :, :, 1] + z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2) + mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2) + flow_cache = torch.stack([z_cache, mu_cache], dim=-1) + + t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype) + if self.t_scheduler == 'cosine': + t_span = 1 - torch.cos(t_span * 0.5 * torch.pi) + return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache + + def solve_euler(self, x, t_span, mu, mask, spks, cond): + """ + Fixed euler solver for ODEs. + Args: + x (torch.Tensor): random noise + t_span (torch.Tensor): n_timesteps interpolated + shape: (n_timesteps + 1,) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + """ + t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0] + t = t.unsqueeze(dim=0) + + # I am storing this because I can later plot it by putting a debugger here and saving it to a file + # Or in future might add like a return_all_steps flag + sol = [] + + # Do not use concat, it may cause memory format changed and trt infer with wrong results! + x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) + mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype) + mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) + t_in = torch.zeros([2], device=x.device, dtype=x.dtype) + spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype) + cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype) + for step in range(1, len(t_span)): + # Classifier-Free Guidance inference introduced in VoiceBox + x_in[:] = x + mask_in[:] = mask + mu_in[0] = mu + t_in[:] = t.unsqueeze(0) + spks_in[0] = spks + cond_in[0] = cond + dphi_dt = self.forward_estimator( + x_in, mask_in, + mu_in, t_in, + spks_in, + cond_in + ) + dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0) + dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt) + x = x + dt * dphi_dt + t = t + dt + sol.append(x) + if step < len(t_span) - 1: + dt = t_span[step + 1] - t + + return sol[-1].float() + + def forward_estimator(self, x, mask, mu, t, spks, cond): + if isinstance(self.estimator, torch.nn.Module): + return self.estimator.forward(x, mask, mu, t, spks, cond) + else: + with self.lock: + self.estimator.set_input_shape('x', (2, 80, x.size(2))) + self.estimator.set_input_shape('mask', (2, 1, x.size(2))) + self.estimator.set_input_shape('mu', (2, 80, x.size(2))) + self.estimator.set_input_shape('t', (2,)) + self.estimator.set_input_shape('spks', (2, 80)) + self.estimator.set_input_shape('cond', (2, 80, x.size(2))) + # run trt engine + self.estimator.execute_v2([x.contiguous().data_ptr(), + mask.contiguous().data_ptr(), + mu.contiguous().data_ptr(), + t.contiguous().data_ptr(), + spks.contiguous().data_ptr(), + cond.contiguous().data_ptr(), + x.data_ptr()]) + return x + + def compute_loss(self, x1, mask, mu, spks=None, cond=None): + """Computes diffusion loss + + Args: + x1 (torch.Tensor): Target + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): target mask + shape: (batch_size, 1, mel_timesteps) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + spks (torch.Tensor, optional): speaker embedding. Defaults to None. + shape: (batch_size, spk_emb_dim) + + Returns: + loss: conditional flow matching loss + y: conditional flow + shape: (batch_size, n_feats, mel_timesteps) + """ + b, _, t = mu.shape + + # random timestep + t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype) + if self.t_scheduler == 'cosine': + t = 1 - torch.cos(t * 0.5 * torch.pi) + # sample noise p(x_0) + z = torch.randn_like(x1) + + y = (1 - (1 - self.sigma_min) * t) * z + t * x1 + u = x1 - (1 - self.sigma_min) * z + + # during training, we randomly drop condition to trade off mode coverage and sample fidelity + if self.training_cfg_rate > 0: + cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate + mu = mu * cfg_mask.view(-1, 1, 1) + spks = spks * cfg_mask.view(-1, 1) + cond = cond * cfg_mask.view(-1, 1, 1) + + pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond) + loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1]) + return loss, y + + +class CausalConditionalCFM(ConditionalCFM): + def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None): + super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator) + self.rand_noise = torch.randn([1, 80, 50 * 300]) + + @torch.inference_mode() + def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None): + """Forward diffusion + + Args: + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + n_timesteps (int): number of diffusion steps + temperature (float, optional): temperature for scaling noise. Defaults to 1.0. + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + + Returns: + sample: generated mel-spectrogram + shape: (batch_size, n_feats, mel_timesteps) + """ + + z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature + # fix prompt and overlap part mu and z + t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype) + if self.t_scheduler == 'cosine': + t_span = 1 - torch.cos(t_span * 0.5 * torch.pi) + return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None diff --git a/third_party/cosyvoice/flow/length_regulator.py b/third_party/cosyvoice/flow/length_regulator.py new file mode 100644 index 0000000000000000000000000000000000000000..2cae42fa81de0b0e29b5f1eeee209d1891aa9a78 --- /dev/null +++ b/third_party/cosyvoice/flow/length_regulator.py @@ -0,0 +1,69 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple +import torch.nn as nn +import torch +from torch.nn import functional as F +from cosyvoice.utils.mask import make_pad_mask + + +class InterpolateRegulator(nn.Module): + def __init__( + self, + channels: int, + sampling_ratios: Tuple, + out_channels: int = None, + groups: int = 1, + ): + super().__init__() + self.sampling_ratios = sampling_ratios + out_channels = out_channels or channels + model = nn.ModuleList([]) + if len(sampling_ratios) > 0: + for _ in sampling_ratios: + module = nn.Conv1d(channels, channels, 3, 1, 1) + norm = nn.GroupNorm(groups, channels) + act = nn.Mish() + model.extend([module, norm, act]) + model.append( + nn.Conv1d(channels, out_channels, 1, 1) + ) + self.model = nn.Sequential(*model) + + def forward(self, x, ylens=None): + # x in (B, T, D) + mask = (~make_pad_mask(ylens)).to(x).unsqueeze(-1) + x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='linear') + out = self.model(x).transpose(1, 2).contiguous() + olens = ylens + return out * mask, olens + + def inference(self, x1, x2, mel_len1, mel_len2, input_frame_rate=50): + # in inference mode, interploate prompt token and token(head/mid/tail) seprately, so we can get a clear separation point of mel + # x in (B, T, D) + if x2.shape[1] > 40: + x2_head = F.interpolate(x2[:, :20].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear') + x2_mid = F.interpolate(x2[:, 20:-20].transpose(1, 2).contiguous(), size=mel_len2 - int(20 / input_frame_rate * 22050 / 256) * 2, + mode='linear') + x2_tail = F.interpolate(x2[:, -20:].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear') + x2 = torch.concat([x2_head, x2_mid, x2_tail], dim=2) + else: + x2 = F.interpolate(x2.transpose(1, 2).contiguous(), size=mel_len2, mode='linear') + if x1.shape[1] != 0: + x1 = F.interpolate(x1.transpose(1, 2).contiguous(), size=mel_len1, mode='linear') + x = torch.concat([x1, x2], dim=2) + else: + x = x2 + out = self.model(x).transpose(1, 2).contiguous() + return out, mel_len1 + mel_len2 diff --git a/third_party/cosyvoice/hifigan/discriminator.py b/third_party/cosyvoice/hifigan/discriminator.py new file mode 100644 index 0000000000000000000000000000000000000000..1a4dcc88f622af6d519f0b5db141d9f4f00526f8 --- /dev/null +++ b/third_party/cosyvoice/hifigan/discriminator.py @@ -0,0 +1,140 @@ +import torch +import torch.nn as nn +from torch.nn.utils.parametrizations import weight_norm +from typing import List, Optional, Tuple +from einops import rearrange +from torchaudio.transforms import Spectrogram + + +class MultipleDiscriminator(nn.Module): + def __init__( + self, mpd: nn.Module, mrd: nn.Module + ): + super().__init__() + self.mpd = mpd + self.mrd = mrd + + def forward(self, y: torch.Tensor, y_hat: torch.Tensor): + y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], [] + this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mpd(y.unsqueeze(dim=1), y_hat.unsqueeze(dim=1)) + y_d_rs += this_y_d_rs + y_d_gs += this_y_d_gs + fmap_rs += this_fmap_rs + fmap_gs += this_fmap_gs + this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mrd(y, y_hat) + y_d_rs += this_y_d_rs + y_d_gs += this_y_d_gs + fmap_rs += this_fmap_rs + fmap_gs += this_fmap_gs + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiResolutionDiscriminator(nn.Module): + def __init__( + self, + fft_sizes: Tuple[int, ...] = (2048, 1024, 512), + num_embeddings: Optional[int] = None, + ): + """ + Multi-Resolution Discriminator module adapted from https://github.com/descriptinc/descript-audio-codec. + Additionally, it allows incorporating conditional information with a learned embeddings table. + + Args: + fft_sizes (tuple[int]): Tuple of window lengths for FFT. Defaults to (2048, 1024, 512). + num_embeddings (int, optional): Number of embeddings. None means non-conditional discriminator. + Defaults to None. + """ + + super().__init__() + self.discriminators = nn.ModuleList( + [DiscriminatorR(window_length=w, num_embeddings=num_embeddings) for w in fft_sizes] + ) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor, bandwidth_id: torch.Tensor = None + ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]]]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + + for d in self.discriminators: + y_d_r, fmap_r = d(x=y, cond_embedding_id=bandwidth_id) + y_d_g, fmap_g = d(x=y_hat, cond_embedding_id=bandwidth_id) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorR(nn.Module): + def __init__( + self, + window_length: int, + num_embeddings: Optional[int] = None, + channels: int = 32, + hop_factor: float = 0.25, + bands: Tuple[Tuple[float, float], ...] = ((0.0, 0.1), (0.1, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)), + ): + super().__init__() + self.window_length = window_length + self.hop_factor = hop_factor + self.spec_fn = Spectrogram( + n_fft=window_length, hop_length=int(window_length * hop_factor), win_length=window_length, power=None + ) + n_fft = window_length // 2 + 1 + bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands] + self.bands = bands + convs = lambda: nn.ModuleList( + [ + weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))), + ] + ) + self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))]) + + if num_embeddings is not None: + self.emb = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=channels) + torch.nn.init.zeros_(self.emb.weight) + + self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1))) + + def spectrogram(self, x): + # Remove DC offset + x = x - x.mean(dim=-1, keepdims=True) + # Peak normalize the volume of input audio + x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) + x = self.spec_fn(x) + x = torch.view_as_real(x) + x = rearrange(x, "b f t c -> b c t f") + # Split into bands + x_bands = [x[..., b[0]: b[1]] for b in self.bands] + return x_bands + + def forward(self, x: torch.Tensor, cond_embedding_id: torch.Tensor = None): + x_bands = self.spectrogram(x) + fmap = [] + x = [] + for band, stack in zip(x_bands, self.band_convs): + for i, layer in enumerate(stack): + band = layer(band) + band = torch.nn.functional.leaky_relu(band, 0.1) + if i > 0: + fmap.append(band) + x.append(band) + x = torch.cat(x, dim=-1) + if cond_embedding_id is not None: + emb = self.emb(cond_embedding_id) + h = (emb.view(1, -1, 1, 1) * x).sum(dim=1, keepdims=True) + else: + h = 0 + x = self.conv_post(x) + fmap.append(x) + x += h + + return x, fmap diff --git a/third_party/cosyvoice/hifigan/f0_predictor.py b/third_party/cosyvoice/hifigan/f0_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..172c5f50bdece3d4ac2b3874b0a32deb9f957b93 --- /dev/null +++ b/third_party/cosyvoice/hifigan/f0_predictor.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch.nn as nn +from torch.nn.utils.parametrizations import weight_norm + + +class ConvRNNF0Predictor(nn.Module): + def __init__(self, + num_class: int = 1, + in_channels: int = 80, + cond_channels: int = 512 + ): + super().__init__() + + self.num_class = num_class + self.condnet = nn.Sequential( + weight_norm( + nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + ) + self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.condnet(x) + x = x.transpose(1, 2) + return torch.abs(self.classifier(x).squeeze(-1)) diff --git a/third_party/cosyvoice/hifigan/generator.py b/third_party/cosyvoice/hifigan/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..c47bf05111bdb6afc8d12ecabdd1a59443ebea09 --- /dev/null +++ b/third_party/cosyvoice/hifigan/generator.py @@ -0,0 +1,411 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HIFI-GAN""" + +from typing import Dict, Optional, List +import numpy as np +from scipy.signal import get_window +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import Conv1d +from torch.nn import ConvTranspose1d +from torch.nn.utils import remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.distributions.uniform import Uniform + +from cosyvoice.transformer.activation import Snake +from cosyvoice.utils.common import get_padding +from cosyvoice.utils.common import init_weights + + +"""hifigan based generator implementation. + +This code is modified from https://github.com/jik876/hifi-gan + ,https://github.com/kan-bayashi/ParallelWaveGAN and + https://github.com/NVIDIA/BigVGAN + +""" + + +class ResBlock(torch.nn.Module): + """Residual block module in HiFiGAN/BigVGAN.""" + def __init__( + self, + channels: int = 512, + kernel_size: int = 3, + dilations: List[int] = [1, 3, 5], + ): + super(ResBlock, self).__init__() + self.convs1 = nn.ModuleList() + self.convs2 = nn.ModuleList() + + for dilation in dilations: + self.convs1.append( + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation, + padding=get_padding(kernel_size, dilation) + ) + ) + ) + self.convs2.append( + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1) + ) + ) + ) + self.convs1.apply(init_weights) + self.convs2.apply(init_weights) + self.activations1 = nn.ModuleList([ + Snake(channels, alpha_logscale=False) + for _ in range(len(self.convs1)) + ]) + self.activations2 = nn.ModuleList([ + Snake(channels, alpha_logscale=False) + for _ in range(len(self.convs2)) + ]) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for idx in range(len(self.convs1)): + xt = self.activations1[idx](x) + xt = self.convs1[idx](xt) + xt = self.activations2[idx](xt) + xt = self.convs2[idx](xt) + x = xt + x + return x + + def remove_weight_norm(self): + for idx in range(len(self.convs1)): + remove_weight_norm(self.convs1[idx]) + remove_weight_norm(self.convs2[idx]) + + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = (f0 > self.voiced_threshold).type(torch.float32) + return uv + + @torch.no_grad() + def forward(self, f0): + """ + :param f0: [B, 1, sample_len], Hz + :return: [B, 1, sample_len] + """ + + F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device) + for i in range(self.harmonic_num + 1): + F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate + + theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1) + u_dist = Uniform(low=-np.pi, high=np.pi) + phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device) + phase_vec[:, 0, :] = 0 + + # generate sine waveforms + sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec) + + # generate uv signal + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + with torch.no_grad(): + sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2)) + sine_wavs = sine_wavs.transpose(1, 2) + uv = uv.transpose(1, 2) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv + + +class HiFTGenerator(nn.Module): + """ + HiFTNet Generator: Neural Source Filter + ISTFTNet + https://arxiv.org/abs/2309.09493 + """ + def __init__( + self, + in_channels: int = 80, + base_channels: int = 512, + nb_harmonics: int = 8, + sampling_rate: int = 22050, + nsf_alpha: float = 0.1, + nsf_sigma: float = 0.003, + nsf_voiced_threshold: float = 10, + upsample_rates: List[int] = [8, 8], + upsample_kernel_sizes: List[int] = [16, 16], + istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4}, + resblock_kernel_sizes: List[int] = [3, 7, 11], + resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + source_resblock_kernel_sizes: List[int] = [7, 11], + source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]], + lrelu_slope: float = 0.1, + audio_limit: float = 0.99, + f0_predictor: torch.nn.Module = None, + ): + super(HiFTGenerator, self).__init__() + + self.out_channels = 1 + self.nb_harmonics = nb_harmonics + self.sampling_rate = sampling_rate + self.istft_params = istft_params + self.lrelu_slope = lrelu_slope + self.audio_limit = audio_limit + + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.m_source = SourceModuleHnNSF( + sampling_rate=sampling_rate, + upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"], + harmonic_num=nb_harmonics, + sine_amp=nsf_alpha, + add_noise_std=nsf_sigma, + voiced_threshod=nsf_voiced_threshold) + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"]) + + self.conv_pre = weight_norm( + Conv1d(in_channels, base_channels, 7, 1, padding=3) + ) + + # Up + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + base_channels // (2**i), + base_channels // (2**(i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + # Down + self.source_downs = nn.ModuleList() + self.source_resblocks = nn.ModuleList() + downsample_rates = [1] + upsample_rates[::-1][:-1] + downsample_cum_rates = np.cumprod(downsample_rates) + for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)): + if u == 1: + self.source_downs.append( + Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1) + ) + else: + self.source_downs.append( + Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2)) + ) + + self.source_resblocks.append( + ResBlock(base_channels // (2 ** (i + 1)), k, d) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = base_channels // (2**(i + 1)) + for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(ResBlock(ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + self.reflection_pad = nn.ReflectionPad1d((1, 0)) + self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32)) + self.f0_predictor = f0_predictor + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + self.m_source.remove_weight_norm() + for l in self.source_downs: + remove_weight_norm(l) + for l in self.source_resblocks: + l.remove_weight_norm() + + def _stft(self, x): + spec = torch.stft( + x, + self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device), + return_complex=True) + spec = torch.view_as_real(spec) # [B, F, TT, 2] + return spec[..., 0], spec[..., 1] + + def _istft(self, magnitude, phase): + magnitude = torch.clip(magnitude, max=1e2) + real = magnitude * torch.cos(phase) + img = magnitude * torch.sin(phase) + inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"], + self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device)) + return inverse_transform + + def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor: + s_stft_real, s_stft_imag = self._stft(s.squeeze(1)) + s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1) + + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, self.lrelu_slope) + x = self.ups[i](x) + + if i == self.num_upsamples - 1: + x = self.reflection_pad(x) + + # fusion + si = self.source_downs[i](s_stft) + si = self.source_resblocks[i](si) + x = x + si + + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + x = F.leaky_relu(x) + x = self.conv_post(x) + magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :]) + phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy + + x = self._istft(magnitude, phase) + x = torch.clamp(x, -self.audio_limit, self.audio_limit) + return x + + def forward( + self, + batch: dict, + device: torch.device, + ) -> Dict[str, Optional[torch.Tensor]]: + speech_feat = batch['speech_feat'].transpose(1, 2).to(device) + # mel->f0 + f0 = self.f0_predictor(speech_feat) + # f0->source + s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t + s, _, _ = self.m_source(s) + s = s.transpose(1, 2) + # mel+source->speech + generated_speech = self.decode(x=speech_feat, s=s) + return generated_speech, f0 + + @torch.inference_mode() + def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor: + # mel->f0 + f0 = self.f0_predictor(speech_feat) + # f0->source + s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t + s, _, _ = self.m_source(s) + s = s.transpose(1, 2) + # use cache_source to avoid glitch + if cache_source.shape[2] != 0: + s[:, :, :cache_source.shape[2]] = cache_source + generated_speech = self.decode(x=speech_feat, s=s) + return generated_speech, s diff --git a/third_party/cosyvoice/hifigan/hifigan.py b/third_party/cosyvoice/hifigan/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..de623cce3aa096b27050063a28efd653ede132cb --- /dev/null +++ b/third_party/cosyvoice/hifigan/hifigan.py @@ -0,0 +1,67 @@ +from typing import Dict, Optional +import torch +import torch.nn as nn +import torch.nn.functional as F +from matcha.hifigan.models import feature_loss, generator_loss, discriminator_loss +from cosyvoice.utils.losses import tpr_loss, mel_loss + + +class HiFiGan(nn.Module): + def __init__(self, generator, discriminator, mel_spec_transform, + multi_mel_spectral_recon_loss_weight=45, feat_match_loss_weight=2.0, + tpr_loss_weight=1.0, tpr_loss_tau=0.04): + super(HiFiGan, self).__init__() + self.generator = generator + self.discriminator = discriminator + self.mel_spec_transform = mel_spec_transform + self.multi_mel_spectral_recon_loss_weight = multi_mel_spectral_recon_loss_weight + self.feat_match_loss_weight = feat_match_loss_weight + self.tpr_loss_weight = tpr_loss_weight + self.tpr_loss_tau = tpr_loss_tau + + def forward( + self, + batch: dict, + device: torch.device, + ) -> Dict[str, Optional[torch.Tensor]]: + if batch['turn'] == 'generator': + return self.forward_generator(batch, device) + else: + return self.forward_discriminator(batch, device) + + def forward_generator(self, batch, device): + real_speech = batch['speech'].to(device) + pitch_feat = batch['pitch_feat'].to(device) + # 1. calculate generator outputs + generated_speech, generated_f0 = self.generator(batch, device) + # 2. calculate discriminator outputs + y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech) + # 3. calculate generator losses, feature loss, mel loss, tpr losses [Optional] + loss_gen, _ = generator_loss(y_d_gs) + loss_fm = feature_loss(fmap_rs, fmap_gs) + loss_mel = mel_loss(real_speech, generated_speech, self.mel_spec_transform) + if self.tpr_loss_weight != 0: + loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau) + else: + loss_tpr = torch.zeros(1).to(device) + loss_f0 = F.l1_loss(generated_f0, pitch_feat) + loss = loss_gen + self.feat_match_loss_weight * loss_fm + \ + self.multi_mel_spectral_recon_loss_weight * loss_mel + \ + self.tpr_loss_weight * loss_tpr + loss_f0 + return {'loss': loss, 'loss_gen': loss_gen, 'loss_fm': loss_fm, 'loss_mel': loss_mel, 'loss_tpr': loss_tpr, 'loss_f0': loss_f0} + + def forward_discriminator(self, batch, device): + real_speech = batch['speech'].to(device) + # 1. calculate generator outputs + with torch.no_grad(): + generated_speech, generated_f0 = self.generator(batch, device) + # 2. calculate discriminator outputs + y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech) + # 3. calculate discriminator losses, tpr losses [Optional] + loss_disc, _, _ = discriminator_loss(y_d_rs, y_d_gs) + if self.tpr_loss_weight != 0: + loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau) + else: + loss_tpr = torch.zeros(1).to(device) + loss = loss_disc + self.tpr_loss_weight * loss_tpr + return {'loss': loss, 'loss_disc': loss_disc, 'loss_tpr': loss_tpr} diff --git a/third_party/cosyvoice/transformer/__init__.py b/third_party/cosyvoice/transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/third_party/cosyvoice/transformer/activation.py b/third_party/cosyvoice/transformer/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..8cea54816385d3b6585ccc2417bc71630d578177 --- /dev/null +++ b/third_party/cosyvoice/transformer/activation.py @@ -0,0 +1,84 @@ +# Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe) +# 2020 Northwestern Polytechnical University (Pengcheng Guo) +# 2020 Mobvoi Inc (Binbin Zhang) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Swish() activation function for Conformer.""" + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Swish(torch.nn.Module): + """Construct an Swish object.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Return Swish activation function.""" + return x * torch.sigmoid(x) + + +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/third_party/cosyvoice/transformer/convolution.py b/third_party/cosyvoice/transformer/convolution.py new file mode 100644 index 0000000000000000000000000000000000000000..4d5d96149154776000991a681a666fbe55e562fe --- /dev/null +++ b/third_party/cosyvoice/transformer/convolution.py @@ -0,0 +1,145 @@ +# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""ConvolutionModule definition.""" + +from typing import Tuple + +import torch +from torch import nn + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model.""" + + def __init__(self, + channels: int, + kernel_size: int = 15, + activation: nn.Module = nn.ReLU(), + norm: str = "batch_norm", + causal: bool = False, + bias: bool = True): + """Construct an ConvolutionModule object. + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernel size of conv layers. + causal (int): Whether use causal convolution or not + """ + super().__init__() + + self.pointwise_conv1 = nn.Conv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + # self.lorder is used to distinguish if it's a causal convolution, + # if self.lorder > 0: it's a causal convolution, the input will be + # padded with self.lorder frames on the left in forward. + # else: it's a symmetrical convolution + if causal: + padding = 0 + self.lorder = kernel_size - 1 + else: + # kernel_size should be an odd number for none causal convolution + assert (kernel_size - 1) % 2 == 0 + padding = (kernel_size - 1) // 2 + self.lorder = 0 + self.depthwise_conv = nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + padding=padding, + groups=channels, + bias=bias, + ) + + assert norm in ['batch_norm', 'layer_norm'] + if norm == "batch_norm": + self.use_layer_norm = False + self.norm = nn.BatchNorm1d(channels) + else: + self.use_layer_norm = True + self.norm = nn.LayerNorm(channels) + + self.pointwise_conv2 = nn.Conv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.activation = activation + + def forward( + self, + x: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + cache: torch.Tensor = torch.zeros((0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute convolution module. + Args: + x (torch.Tensor): Input tensor (#batch, time, channels). + mask_pad (torch.Tensor): used for batch padding (#batch, 1, time), + (0, 0, 0) means fake mask. + cache (torch.Tensor): left context cache, it is only + used in causal convolution (#batch, channels, cache_t), + (0, 0, 0) meas fake cache. + Returns: + torch.Tensor: Output tensor (#batch, time, channels). + """ + # exchange the temporal dimension and the feature dimension + x = x.transpose(1, 2) # (#batch, channels, time) + + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + if self.lorder > 0: + if cache.size(2) == 0: # cache_t == 0 + x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0) + else: + assert cache.size(0) == x.size(0) # equal batch + assert cache.size(1) == x.size(1) # equal channel + x = torch.cat((cache, x), dim=2) + assert (x.size(2) > self.lorder) + new_cache = x[:, :, -self.lorder:] + else: + # It's better we just return None if no cache is required, + # However, for JIT export, here we just fake one tensor instead of + # None. + new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channel, dim) + x = nn.functional.glu(x, dim=1) # (batch, channel, dim) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.activation(self.norm(x)) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.pointwise_conv2(x) + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + return x.transpose(1, 2), new_cache diff --git a/third_party/cosyvoice/transformer/encoder_layer.py b/third_party/cosyvoice/transformer/encoder_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..efbb12dd365770bebe8bca75276fe63be260a08f --- /dev/null +++ b/third_party/cosyvoice/transformer/encoder_layer.py @@ -0,0 +1,236 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Encoder self-attention layer definition.""" + +from typing import Optional, Tuple + +import torch +from torch import nn + + +class TransformerEncoderLayer(nn.Module): + """Encoder layer module. + + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward`, instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: to use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: torch.nn.Module, + feed_forward: torch.nn.Module, + dropout_rate: float, + normalize_before: bool = True, + ): + """Construct an EncoderLayer object.""" + super().__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.norm1 = nn.LayerNorm(size, eps=1e-12) + self.norm2 = nn.LayerNorm(size, eps=1e-12) + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + + def forward( + self, + x: torch.Tensor, + mask: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute encoded features. + + Args: + x (torch.Tensor): (#batch, time, size) + mask (torch.Tensor): Mask tensor for the input (#batch, time,time), + (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): just for interface compatibility + to ConformerEncoderLayer + mask_pad (torch.Tensor): does not used in transformer layer, + just for unified api with conformer. + att_cache (torch.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (torch.Tensor): Convolution cache in conformer layer + (#batch=1, size, cache_t2), not used here, it's for interface + compatibility to ConformerEncoderLayer. + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time, time). + torch.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2). + + """ + residual = x + if self.normalize_before: + x = self.norm1(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache) + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm1(x) + + residual = x + if self.normalize_before: + x = self.norm2(x) + x = residual + self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm2(x) + + fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + return x, mask, new_att_cache, fake_cnn_cache + + +class ConformerEncoderLayer(nn.Module): + """Encoder layer module. + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + feed_forward_macaron (torch.nn.Module): Additional feed-forward module + instance. + `PositionwiseFeedForward` instance can be used as the argument. + conv_module (torch.nn.Module): Convolution module instance. + `ConvlutionModule` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: torch.nn.Module, + feed_forward: Optional[nn.Module] = None, + feed_forward_macaron: Optional[nn.Module] = None, + conv_module: Optional[nn.Module] = None, + dropout_rate: float = 0.1, + normalize_before: bool = True, + ): + """Construct an EncoderLayer object.""" + super().__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.feed_forward_macaron = feed_forward_macaron + self.conv_module = conv_module + self.norm_ff = nn.LayerNorm(size, eps=1e-12) # for the FNN module + self.norm_mha = nn.LayerNorm(size, eps=1e-12) # for the MHA module + if feed_forward_macaron is not None: + self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-12) + self.ff_scale = 0.5 + else: + self.ff_scale = 1.0 + if self.conv_module is not None: + self.norm_conv = nn.LayerNorm(size, eps=1e-12) # for the CNN module + self.norm_final = nn.LayerNorm( + size, eps=1e-12) # for the final output of the block + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + + def forward( + self, + x: torch.Tensor, + mask: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute encoded features. + + Args: + x (torch.Tensor): (#batch, time, size) + mask (torch.Tensor): Mask tensor for the input (#batch, time,time), + (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): positional encoding, must not be None + for ConformerEncoderLayer. + mask_pad (torch.Tensor): batch padding mask used for conv module. + (#batch, 1,time), (0, 0, 0) means fake mask. + att_cache (torch.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (torch.Tensor): Convolution cache in conformer layer + (#batch=1, size, cache_t2) + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time, time). + torch.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2). + """ + + # whether to use macaron style + if self.feed_forward_macaron is not None: + residual = x + if self.normalize_before: + x = self.norm_ff_macaron(x) + x = residual + self.ff_scale * self.dropout( + self.feed_forward_macaron(x)) + if not self.normalize_before: + x = self.norm_ff_macaron(x) + + # multi-headed self-attention module + residual = x + if self.normalize_before: + x = self.norm_mha(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, + att_cache) + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm_mha(x) + + # convolution module + # Fake new cnn cache here, and then change it in conv_module + new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + if self.conv_module is not None: + residual = x + if self.normalize_before: + x = self.norm_conv(x) + x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache) + x = residual + self.dropout(x) + + if not self.normalize_before: + x = self.norm_conv(x) + + # feed forward module + residual = x + if self.normalize_before: + x = self.norm_ff(x) + + x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm_ff(x) + + if self.conv_module is not None: + x = self.norm_final(x) + + return x, mask, new_att_cache, new_cnn_cache diff --git a/third_party/cosyvoice/transformer/label_smoothing_loss.py b/third_party/cosyvoice/transformer/label_smoothing_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..feacabf09609ee6eb047c89ce18d372256c72c71 --- /dev/null +++ b/third_party/cosyvoice/transformer/label_smoothing_loss.py @@ -0,0 +1,96 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Label smoothing module.""" + +import torch +from torch import nn + + +class LabelSmoothingLoss(nn.Module): + """Label-smoothing loss. + + In a standard CE loss, the label's data distribution is: + [0,1,2] -> + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + ] + + In the smoothing version CE Loss,some probabilities + are taken from the true label prob (1.0) and are divided + among other labels. + + e.g. + smoothing=0.1 + [0,1,2] -> + [ + [0.9, 0.05, 0.05], + [0.05, 0.9, 0.05], + [0.05, 0.05, 0.9], + ] + + Args: + size (int): the number of class + padding_idx (int): padding class id which will be ignored for loss + smoothing (float): smoothing rate (0.0 means the conventional CE) + normalize_length (bool): + normalize loss by sequence length if True + normalize loss by batch size if False + """ + + def __init__(self, + size: int, + padding_idx: int, + smoothing: float, + normalize_length: bool = False): + """Construct an LabelSmoothingLoss object.""" + super(LabelSmoothingLoss, self).__init__() + self.criterion = nn.KLDivLoss(reduction="none") + self.padding_idx = padding_idx + self.confidence = 1.0 - smoothing + self.smoothing = smoothing + self.size = size + self.normalize_length = normalize_length + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Compute loss between x and target. + + The model outputs and data labels tensors are flatten to + (batch*seqlen, class) shape and a mask is applied to the + padding part which should not be calculated for loss. + + Args: + x (torch.Tensor): prediction (batch, seqlen, class) + target (torch.Tensor): + target signal masked with self.padding_id (batch, seqlen) + Returns: + loss (torch.Tensor) : The KL loss, scalar float value + """ + assert x.size(2) == self.size + batch_size = x.size(0) + x = x.view(-1, self.size) + target = target.view(-1) + # use zeros_like instead of torch.no_grad() for true_dist, + # since no_grad() can not be exported by JIT + true_dist = torch.zeros_like(x) + true_dist.fill_(self.smoothing / (self.size - 1)) + ignore = target == self.padding_idx # (B,) + total = len(target) - ignore.sum().item() + target = target.masked_fill(ignore, 0) # avoid -1 index + true_dist.scatter_(1, target.unsqueeze(1), self.confidence) + kl = self.criterion(torch.log_softmax(x, dim=1), true_dist) + denom = total if self.normalize_length else batch_size + return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom diff --git a/third_party/cosyvoice/transformer/positionwise_feed_forward.py b/third_party/cosyvoice/transformer/positionwise_feed_forward.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a2cf6e7315e3a5ed2794423daff0a59cc5b208 --- /dev/null +++ b/third_party/cosyvoice/transformer/positionwise_feed_forward.py @@ -0,0 +1,115 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Positionwise feed forward layer definition.""" + +import torch + + +class PositionwiseFeedForward(torch.nn.Module): + """Positionwise feed forward layer. + + FeedForward are appied on each position of the sequence. + The output dim is same with the input dim. + + Args: + idim (int): Input dimenstion. + hidden_units (int): The number of hidden units. + dropout_rate (float): Dropout rate. + activation (torch.nn.Module): Activation function + """ + + def __init__( + self, + idim: int, + hidden_units: int, + dropout_rate: float, + activation: torch.nn.Module = torch.nn.ReLU(), + ): + """Construct a PositionwiseFeedForward object.""" + super(PositionwiseFeedForward, self).__init__() + self.w_1 = torch.nn.Linear(idim, hidden_units) + self.activation = activation + self.dropout = torch.nn.Dropout(dropout_rate) + self.w_2 = torch.nn.Linear(hidden_units, idim) + + def forward(self, xs: torch.Tensor) -> torch.Tensor: + """Forward function. + + Args: + xs: input tensor (B, L, D) + Returns: + output tensor, (B, L, D) + """ + return self.w_2(self.dropout(self.activation(self.w_1(xs)))) + + +class MoEFFNLayer(torch.nn.Module): + """ + Mixture of expert with Positionwise feed forward layer + See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf + The output dim is same with the input dim. + + Modified from https://github.com/Lightning-AI/lit-gpt/pull/823 + https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219 + Args: + n_expert: number of expert. + n_expert_per_token: The actual number of experts used for each frame + idim (int): Input dimenstion. + hidden_units (int): The number of hidden units. + dropout_rate (float): Dropout rate. + activation (torch.nn.Module): Activation function + """ + + def __init__( + self, + n_expert: int, + n_expert_per_token: int, + idim: int, + hidden_units: int, + dropout_rate: float, + activation: torch.nn.Module = torch.nn.ReLU(), + ): + super(MoEFFNLayer, self).__init__() + self.gate = torch.nn.Linear(idim, n_expert, bias=False) + self.experts = torch.nn.ModuleList( + PositionwiseFeedForward(idim, hidden_units, dropout_rate, + activation) for _ in range(n_expert)) + self.n_expert_per_token = n_expert_per_token + + def forward(self, xs: torch.Tensor) -> torch.Tensor: + """Foward function. + Args: + xs: input tensor (B, L, D) + Returns: + output tensor, (B, L, D) + + """ + B, L, D = xs.size( + ) # batch size, sequence length, embedding dimension (idim) + xs = xs.view(-1, D) # (B*L, D) + router = self.gate(xs) # (B*L, n_expert) + logits, indices = torch.topk( + router, self.n_expert_per_token + ) # probs:(B*L, n_expert), indices: (B*L, n_expert) + weights = torch.nn.functional.softmax( + logits, dim=1, + dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token) + output = torch.zeros_like(xs) # (B*L, D) + for i, expert in enumerate(self.experts): + mask = indices == i + batch_idx, ith_expert = torch.where(mask) + output[batch_idx] += weights[batch_idx, ith_expert, None] * expert( + xs[batch_idx]) + return output.view(B, L, D) diff --git a/third_party/cosyvoice/transformer/subsampling.py b/third_party/cosyvoice/transformer/subsampling.py new file mode 100644 index 0000000000000000000000000000000000000000..e17c2e324e3afb24e1b619effe29cef07c9c5b3a --- /dev/null +++ b/third_party/cosyvoice/transformer/subsampling.py @@ -0,0 +1,383 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Subsampling layer definition.""" + +from typing import Tuple, Union + +import torch + + +class BaseSubsampling(torch.nn.Module): + + def __init__(self): + super().__init__() + self.right_context = 0 + self.subsampling_rate = 1 + + def position_encoding(self, offset: Union[int, torch.Tensor], + size: int) -> torch.Tensor: + return self.pos_enc.position_encoding(offset, size) + + +class EmbedinigNoSubsampling(BaseSubsampling): + """Embedding input without subsampling + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + super().__init__() + self.embed = torch.nn.Embedding(idim, odim) + self.pos_enc = pos_enc_class + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.embed(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask + + +class LinearNoSubsampling(BaseSubsampling): + """Linear transform the input without subsampling + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an linear object.""" + super().__init__() + self.out = torch.nn.Sequential( + torch.nn.Linear(idim, odim), + torch.nn.LayerNorm(odim, eps=1e-5), + torch.nn.Dropout(dropout_rate), + ) + self.pos_enc = pos_enc_class + self.right_context = 0 + self.subsampling_rate = 1 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.out(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask + + +class Conv1dSubsampling2(BaseSubsampling): + """Convolutional 1D subsampling (to 1/2 length). + It is designed for Whisper, ref: + https://github.com/openai/whisper/blob/main/whisper/model.py + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv1dSubsampling2 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1), + torch.nn.GELU(), + torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1), + torch.nn.GELU(), + ) + self.pos_enc = pos_enc_class + # The right context for every conv layer is computed by: + # (kernel_size - 1) * frame_rate_of_this_layer + self.subsampling_rate = 2 + # 4 = (3 - 1) * 1 + (3 - 1) * 1 + self.right_context = 4 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 2. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 2. + torch.Tensor: positional encoding + + """ + time = x.size(1) + x = x.transpose(1, 2) # (b, f, t) + x = self.conv(x) + x = x.transpose(1, 2) # (b, t, f) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, (time + 1) % 2::2] + + +class Conv2dSubsampling4(BaseSubsampling): + """Convolutional 2D subsampling (to 1/4 length). + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling4 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + ) + self.out = torch.nn.Sequential( + torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)) + self.pos_enc = pos_enc_class + # The right context for every conv layer is computed by: + # (kernel_size - 1) * frame_rate_of_this_layer + self.subsampling_rate = 4 + # 6 = (3 - 1) * 1 + (3 - 1) * 2 + self.right_context = 6 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 4. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 4. + torch.Tensor: positional encoding + + """ + x = x.unsqueeze(1) # (b, c=1, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2] + + +class Conv2dSubsampling6(BaseSubsampling): + """Convolutional 2D subsampling (to 1/6 length). + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + pos_enc (torch.nn.Module): Custom position encoding layer. + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling6 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 5, 3), + torch.nn.ReLU(), + ) + self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), + odim) + self.pos_enc = pos_enc_class + # 10 = (3 - 1) * 1 + (5 - 1) * 2 + self.subsampling_rate = 6 + self.right_context = 10 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 6. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 6. + torch.Tensor: positional encoding + """ + x = x.unsqueeze(1) # (b, c, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3] + + +class Conv2dSubsampling8(BaseSubsampling): + """Convolutional 2D subsampling (to 1/8 length). + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling8 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + ) + self.linear = torch.nn.Linear( + odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim) + self.pos_enc = pos_enc_class + self.subsampling_rate = 8 + # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4 + self.right_context = 14 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 8. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 8. + torch.Tensor: positional encoding + """ + x = x.unsqueeze(1) # (b, c, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2] + + +class LegacyLinearNoSubsampling(BaseSubsampling): + """Linear transform the input without subsampling + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an linear object.""" + super().__init__() + self.out = torch.nn.Sequential( + torch.nn.Linear(idim, odim), + torch.nn.LayerNorm(odim, eps=1e-5), + torch.nn.Dropout(dropout_rate), + torch.nn.ReLU(), + ) + self.pos_enc = pos_enc_class + self.right_context = 0 + self.subsampling_rate = 1 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.out(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask diff --git a/third_party/cosyvoice/transformer/upsample_encoder.py b/third_party/cosyvoice/transformer/upsample_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f67fb985cd0a56b7a6000253104fff137056bf58 --- /dev/null +++ b/third_party/cosyvoice/transformer/upsample_encoder.py @@ -0,0 +1,318 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Encoder definition.""" +from typing import Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from cosyvoice.transformer.convolution import ConvolutionModule +from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer +from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward +from cosyvoice.utils.class_utils import ( + COSYVOICE_EMB_CLASSES, + COSYVOICE_SUBSAMPLE_CLASSES, + COSYVOICE_ATTENTION_CLASSES, + COSYVOICE_ACTIVATION_CLASSES, +) +from cosyvoice.utils.mask import make_pad_mask +from cosyvoice.utils.mask import add_optional_chunk_mask + + +class Upsample1D(nn.Module): + """A 1D upsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + use_conv_transpose (`bool`, default `False`): + option to use a convolution transpose. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + """ + + def __init__(self, channels: int, out_channels: int, stride: int = 2): + super().__init__() + self.channels = channels + self.out_channels = out_channels + self.stride = stride + # In this mode, first repeat interpolate, than conv with stride=1 + self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0) + + def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor): + outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest") + outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0) + outputs = self.conv(outputs) + return outputs, input_lengths * self.stride + + +class PreLookaheadLayer(nn.Module): + def __init__(self, channels: int, pre_lookahead_len: int = 1): + super().__init__() + self.channels = channels + self.pre_lookahead_len = pre_lookahead_len + self.conv1 = nn.Conv1d( + channels, channels, + kernel_size=pre_lookahead_len + 1, + stride=1, padding=0, + ) + self.conv2 = nn.Conv1d( + channels, channels, + kernel_size=3, stride=1, padding=0, + ) + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + """ + inputs: (batch_size, seq_len, channels) + """ + outputs = inputs.transpose(1, 2).contiguous() + # look ahead + outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0) + outputs = F.leaky_relu(self.conv1(outputs)) + # outputs + outputs = F.pad(outputs, (2, 0), mode='constant', value=0.0) + outputs = self.conv2(outputs) + outputs = outputs.transpose(1, 2).contiguous() + + # residual connection + outputs = outputs + inputs + return outputs + + +class UpsampleConformerEncoder(torch.nn.Module): + + def __init__( + self, + input_size: int, + output_size: int = 256, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + input_layer: str = "conv2d", + pos_enc_layer_type: str = "rel_pos", + normalize_before: bool = True, + static_chunk_size: int = 0, + use_dynamic_chunk: bool = False, + global_cmvn: torch.nn.Module = None, + use_dynamic_left_chunk: bool = False, + positionwise_conv_kernel_size: int = 1, + macaron_style: bool = True, + selfattention_layer_type: str = "rel_selfattn", + activation_type: str = "swish", + use_cnn_module: bool = True, + cnn_module_kernel: int = 15, + causal: bool = False, + cnn_module_norm: str = "batch_norm", + key_bias: bool = True, + gradient_checkpointing: bool = False, + ): + """ + Args: + input_size (int): input dim + output_size (int): dimension of attention + attention_heads (int): the number of heads of multi head attention + linear_units (int): the hidden units number of position-wise feed + forward + num_blocks (int): the number of decoder blocks + dropout_rate (float): dropout rate + attention_dropout_rate (float): dropout rate in attention + positional_dropout_rate (float): dropout rate after adding + positional encoding + input_layer (str): input layer type. + optional [linear, conv2d, conv2d6, conv2d8] + pos_enc_layer_type (str): Encoder positional encoding layer type. + opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] + normalize_before (bool): + True: use layer_norm before each sub-block of a layer. + False: use layer_norm after each sub-block of a layer. + static_chunk_size (int): chunk size for static chunk training and + decoding + use_dynamic_chunk (bool): whether use dynamic chunk size for + training or not, You can only use fixed chunk(chunk_size > 0) + or dyanmic chunk size(use_dynamic_chunk = True) + global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module + use_dynamic_left_chunk (bool): whether use dynamic left chunk in + dynamic chunk training + key_bias: whether use bias in attention.linear_k, False for whisper models. + gradient_checkpointing: rerunning a forward-pass segment for each + checkpointed segment during backward. + """ + super().__init__() + self._output_size = output_size + + self.global_cmvn = global_cmvn + self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer]( + input_size, + output_size, + dropout_rate, + COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size, + positional_dropout_rate), + ) + + self.normalize_before = normalize_before + self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) + self.static_chunk_size = static_chunk_size + self.use_dynamic_chunk = use_dynamic_chunk + self.use_dynamic_left_chunk = use_dynamic_left_chunk + self.gradient_checkpointing = gradient_checkpointing + activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]() + # self-attention module definition + encoder_selfattn_layer_args = ( + attention_heads, + output_size, + attention_dropout_rate, + key_bias, + ) + # feed-forward module definition + positionwise_layer_args = ( + output_size, + linear_units, + dropout_rate, + activation, + ) + # convolution module definition + convolution_layer_args = (output_size, cnn_module_kernel, activation, + cnn_module_norm, causal) + self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3) + self.encoders = torch.nn.ModuleList([ + ConformerEncoderLayer( + output_size, + COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type]( + *encoder_selfattn_layer_args), + PositionwiseFeedForward(*positionwise_layer_args), + PositionwiseFeedForward( + *positionwise_layer_args) if macaron_style else None, + ConvolutionModule( + *convolution_layer_args) if use_cnn_module else None, + dropout_rate, + normalize_before, + ) for _ in range(num_blocks) + ]) + self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2) + self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer]( + input_size, + output_size, + dropout_rate, + COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size, + positional_dropout_rate), + ) + self.up_encoders = torch.nn.ModuleList([ + ConformerEncoderLayer( + output_size, + COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type]( + *encoder_selfattn_layer_args), + PositionwiseFeedForward(*positionwise_layer_args), + PositionwiseFeedForward( + *positionwise_layer_args) if macaron_style else None, + ConvolutionModule( + *convolution_layer_args) if use_cnn_module else None, + dropout_rate, + normalize_before, + ) for _ in range(4) + ]) + + def output_size(self) -> int: + return self._output_size + + def forward( + self, + xs: torch.Tensor, + xs_lens: torch.Tensor, + decoding_chunk_size: int = 0, + num_decoding_left_chunks: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Embed positions in tensor. + + Args: + xs: padded input tensor (B, T, D) + xs_lens: input length (B) + decoding_chunk_size: decoding chunk size for dynamic chunk + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + Returns: + encoder output tensor xs, and subsampled masks + xs: padded output tensor (B, T' ~= T/subsample_rate, D) + masks: torch.Tensor batch padding mask after subsample + (B, 1, T' ~= T/subsample_rate) + NOTE(xcsong): + We pass the `__call__` method of the modules instead of `forward` to the + checkpointing API because `__call__` attaches all the hooks of the module. + https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + """ + T = xs.size(1) + masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) + if self.global_cmvn is not None: + xs = self.global_cmvn(xs) + xs, pos_emb, masks = self.embed(xs, masks) + mask_pad = masks # (B, 1, T/subsample_rate) + chunk_masks = add_optional_chunk_mask(xs, masks, + self.use_dynamic_chunk, + self.use_dynamic_left_chunk, + decoding_chunk_size, + self.static_chunk_size, + num_decoding_left_chunks) + # lookahead + conformer encoder + xs = self.pre_lookahead_layer(xs) + xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad) + + # upsample + conformer encoder + xs = xs.transpose(1, 2).contiguous() + xs, xs_lens = self.up_layer(xs, xs_lens) + xs = xs.transpose(1, 2).contiguous() + T = xs.size(1) + masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) + xs, pos_emb, masks = self.up_embed(xs, masks) + mask_pad = masks # (B, 1, T/subsample_rate) + chunk_masks = add_optional_chunk_mask(xs, masks, + self.use_dynamic_chunk, + self.use_dynamic_left_chunk, + decoding_chunk_size, + self.static_chunk_size * self.up_layer.stride, + num_decoding_left_chunks) + xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad) + + if self.normalize_before: + xs = self.after_norm(xs) + # Here we assume the mask is not changed in encoder layers, so just + # return the masks before encoder layers, and the masks will be used + # for cross attention with decoder later + return xs, masks + + def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor) -> torch.Tensor: + for layer in self.encoders: + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + return xs + + def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor) -> torch.Tensor: + for layer in self.up_encoders: + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + return xs diff --git a/third_party/model/llm/llm.py b/third_party/model/llm/llm.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1b656c29785df39364e57d5d3c87461d5dedb7 --- /dev/null +++ b/third_party/model/llm/llm.py @@ -0,0 +1,311 @@ +from operator import is_ +from click import Option, prompt +from numpy import dtype +from pydantic import InstanceOf +import torch +from torch import nn +from typing import List, Callable,Dict, Optional,Generator,AnyStr,Union + +import transformers +from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss +from cosyvoice.utils.common import IGNORE_ID +from torch.nn.utils.rnn import pad_sequence, unpad_sequence +import torch.nn.functional as F +from cosyvoice.utils.common import th_accuracy +from transformers import AutoModelForCausalLM, AutoTokenizer,AutoConfig +import time +class RWKV7LM(nn.Module): + def __init__( + self, + llm_input_size: int, + llm_output_size: int, + speech_token_size: int, + llm: Union[AutoModelForCausalLM,AnyStr], + sampling: Callable, + length_normalized_loss: bool = True, + lsm_weight: float = 0.0, + mix_ratio: List[int] = [5, 15], + drop_ratio = 0.0, + vocab_size = 0, + ): + super(RWKV7LM, self).__init__() + self.llm_input_size = llm_input_size + self.llm_output_size = llm_output_size + self.speech_token_size = speech_token_size + + # 2. build speech token language model related modules + self.sos_eos = 0 + self.task_id = 1 + self.fill_token = 2 + + self.llm_embedding = torch.nn.Embedding(2, llm_input_size) + if isinstance(llm, str): + #load configuration and init model withouth loading weights + model_configuration = AutoConfig.from_pretrained(llm,trust_remote_code=True) + self.llm = AutoModelForCausalLM.from_config(model_configuration,trust_remote_code=True) + if vocab_size != 0: + from train_scripts.train_functions import alter_emb_and_head # Only used for inference + self.llm = alter_emb_and_head(self.llm,vocab_size,speech_token_size) + else: + self.llm = llm + self.text_embedding = self.llm.get_input_embeddings() + # self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1) + self.criterion_ce = LabelSmoothingLoss( + size=speech_token_size + 1, + padding_idx=IGNORE_ID, + smoothing=lsm_weight, + normalize_length=length_normalized_loss, + ) + + # 3. [Optional] build speech token related modules + self.speech_embedding = torch.nn.Embedding(speech_token_size + 1, llm_input_size) + + # 4. sampling method + self.sampling = sampling + self.mix_ratio = mix_ratio + + #Dropout + if drop_ratio > 0: + self.dropout = nn.Dropout(drop_ratio) + else: + self.dropout = None + + def pad_unpad_sequence(self, sos_eos_emb, text_token, text_token_len, task_id_emb, speech_token, speech_token_len): + device = text_token.device + text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True) + speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True) + lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0) + for i in range(len(text_token))] + # lm_input_len = [i.size(0) for i in lm_input] + attention_mask = [torch.ones(i.size(0),device=device,dtype=torch.int32) for i in lm_input] + lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID) + attention_mask = pad_sequence(attention_mask, batch_first=True, padding_value=0) + return lm_input, attention_mask + def forward( + self, + batch: dict, + ) -> Dict[str, Optional[torch.Tensor]]: + """ + Args: + text: (B, L, D) + text_lengths: (B,) + audio: (B, T, N) or (B, T) + audio_lengths: (B,) + """ + text_token = batch['text_token'] + text_token_len = batch['text_token_len'] + speech_token = batch['speech_token'] + speech_token_len = batch['speech_token_len'] + + # 1. prepare llm_target + lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() + + [self.speech_token_size]) for i in range(text_token.size(0))] + lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(text_token.device) + + # 1. encode text_token + text_token = self.text_embedding(text_token) + + + + # 3. eos and task_id + sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1) + task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1) + + # 4. encode speech_token + speech_token = self.speech_embedding(speech_token) + + # 5. unpad and pad + lm_input, attention_mask = self.pad_unpad_sequence(sos_eos_emb, text_token, text_token_len, + task_id_emb, speech_token, speech_token_len) + # 5.1 create attention mask + # attention mask is [1,text_token_len,1,sp_token_len,0,0,0] + + if self.dropout is not None: + lm_input = self.dropout(lm_input) + # 6. run lm forward + lm_output = self.llm(inputs_embeds=lm_input, attention_mask=attention_mask,output_hidden_states=True,return_dict=True) + # hidden_states = lm_output.hidden_states[-1] + # logits = self.llm_decoder(hidden_states) + logits = lm_output.logits + lm_target = lm_target[:, 1:].contiguous() + loss = self.criterion_ce(logits, lm_target) + acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID) + return {'loss': loss, 'acc': acc} + + def dummy_forward(self): + print(f'start to do dummy forward') + with torch.no_grad(): + with torch.amp.autocast(enabled=True,device_type='cuda'): + xs = torch.ones(1, 1, self.llm_input_size,device=self.llm.model.device,dtype=torch.float) + print(f'xs is {xs.dtype}') + masks = torch.ones(1, 1, 1,device=self.llm.model.device,dtype=torch.long) + cache = None + self.forward_one_step(xs, masks, cache) + print(f'finish dummy forward') + + def forward_one_step(self, xs, masks, cache=None): + input_masks = masks[:, -1, :] + outs = self.llm( + inputs_embeds=xs, + attention_mask=input_masks, + output_hidden_states=True, + return_dict=True, + use_cache=True, + past_key_values=cache, + ) + logits = outs.logits + new_cache = outs.past_key_values + return logits, new_cache + + def sampling_ids( + self, + weighted_scores: torch.Tensor, + decoded_tokens: List, + sampling: int, + ignore_eos: bool = True, + ): + num_trials, max_trials = 0, 100 + while True: + top_ids = self.sampling(weighted_scores, decoded_tokens, sampling) + if (not ignore_eos) or (self.speech_token_size not in top_ids): + break + num_trials += 1 + if num_trials > max_trials: + print(f'decoded_tokens is {decoded_tokens}, top_ids is {top_ids}, sampling is {sampling}, ignore_eos is {ignore_eos}') + raise RuntimeError('sampling reaches max_trials {} and still get eos when ignore_eos is True, check your input!'.format(max_trials)) + return top_ids + @torch.inference_mode + def inference( + self, + text: torch.Tensor, + text_len: torch.Tensor, + prompt_text: torch.Tensor, + prompt_text_len: torch.Tensor, + prompt_speech_token: torch.Tensor, + prompt_speech_token_len: torch.Tensor, + embedding: torch.Tensor, + sampling: int = 25, + max_token_text_ratio: float = 20, + min_token_text_ratio: float = 2, + ) -> Generator[torch.Tensor, None, None]: + device = text.device + text = torch.concat([prompt_text, text], dim=1) + + end_of_prompt_id = 65531 + #find the length of instruction and text the text is [prompt, end_of_prompt, text] + end_of_prompt_mask = (text == end_of_prompt_id) + # 使用nonzero找到所有匹配的索引 + end_of_prompt_indices = end_of_prompt_mask.nonzero() + + # 默认值:没有找到end_of_prompt_id + instruction_length = 0 + content_length = text_len + + # 如果找到了end_of_prompt_id + if end_of_prompt_indices.size(0) > 0: + # 获取第一个匹配的索引(只考虑第一个出现的end_of_prompt_id) + # 由于text是二维张量 [batch, seq_len],我们需要第二个维度的索引 + instruction_length = end_of_prompt_indices[0, 1].item() + content_length = text_len - (instruction_length + 1) # +1是因为要跳过end_of_prompt_id标记本身 + # print(f'找到end_of_prompt标记,指令长度: {instruction_length}, 内容长度: {content_length}') + + text_len += prompt_text_len + text = self.text_embedding(text) + + + # 3. concat llm_input + sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1) + task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1) + if prompt_speech_token_len != 0: + prompt_speech_token_emb = self.speech_embedding(prompt_speech_token) + else: + prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device) + lm_input = torch.concat([sos_eos_emb, text, task_id_emb, prompt_speech_token_emb], dim=1) + + # 4. cal min/max_length + min_len = content_length * min_token_text_ratio + max_len = content_length * max_token_text_ratio + # print(f'min_len is {min_len}, max_len is {max_len}') + # 5. step by step decode + out_tokens = [] + cache = None + start_time = time.time() + end_time = 0 + is_prefill = True + prefill_time = 0 + prefill_length = lm_input.shape[1] + for i in range(max_len): + logits, cache = self.forward_one_step(lm_input, + masks=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool), + cache=cache) + # print(f'logits.shap is {logits.shape}') + logp = logits[:,-1].log_softmax(dim=-1) + top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item() + if top_ids == self.speech_token_size: + break + if top_ids > self.speech_token_size: + continue + # in stream mode, yield token one by one + yield top_ids + out_tokens.append(top_ids) + lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1) + if is_prefill: + prefill_time = time.time() - start_time + is_prefill = False + end_time = time.time() + decode_time = end_time - start_time - prefill_time + decoded_length = len(out_tokens) + print(f'tps for prefill is {prefill_length/prefill_time}. {prefill_length} tokens in {prefill_time} seconds') + print(f'tps for decode is {decoded_length/decode_time}. {decoded_length} tokens in {decode_time} seconds') + print(f'out_tokens is {out_tokens}') + +if __name__ == '__main__': + rwkv_path = "/external_data/models/rwkv7-191M-world/" + json_data = {"text": "馬克思去世之後,恩格斯在1884年所出版的《家庭、私有制和國家的起源》,被視為馬克思主義民族學的第一本經典著作。提到了民族形成的規律。人結合成群;由血緣關係組成原始的家庭型式並形成親屬制度,逐漸排除同胞的性交關係;共祖的血族團體結成氏族,氏族結成部落,進而結成部落聯盟,融合成「人民()」;隨著生產力的增加,分工擴大,新的生產關係出現,新階級產生,使得氏族制度漸漸不能負荷而消滅,隨之產生由「新國族()」組成的國家。", "tts_speech_tokens": [1959, 1707, 1704, 5835, 5832, 5832, 3645, 3888, 2031, 2112, 2124, 4133, 5672, 489, 2268, 6453, 5643, 4527, 159, 6298, 3810, 4612, 2989, 4680, 3719, 3477, 4671, 4610, 2685, 4953, 5080, 2897, 2087, 1978, 1948, 1950, 2112, 1028, 2, 6175, 507, 2585, 4511, 5725, 6374, 3465, 5177, 3718, 6375, 2261, 5189, 35, 1763, 2015, 5667, 4777, 2166, 703, 6077, 649, 2163, 5672, 6076, 4466, 5644, 6374, 6372, 4527, 753, 2699, 341, 86, 2234, 4995, 5808, 725, 710, 632, 2085, 6213, 3321, 5830, 2910, 720, 3890, 1463, 1476, 1746, 59, 353, 2540, 1514, 1951, 2031, 4920, 4758, 2276, 6320, 5914, 222, 2193, 2688, 3509, 5207, 5392, 5644, 5645, 4680, 4615, 1127, 725, 1370, 3462, 4915, 3465, 803, 632, 2754, 576, 623, 2810, 2909, 1410, 663, 1457, 2864, 231, 2509, 5921, 5914, 54, 4523, 4920, 4596, 5298, 3840, 1943, 1691, 734, 1460, 1800, 1804, 2031, 2112, 4299, 2160, 2378, 2816, 2267, 5400, 5645, 5644, 4680, 4609, 2185, 1454, 224, 2582, 4526, 1762, 3651, 5835, 3648, 1458, 2031, 2121, 4311, 4133, 6401, 594, 324, 6454, 5643, 4527, 3312, 3462, 4032, 1454, 4046, 5588, 5749, 2753, 1884, 1885, 512, 4480, 5529, 3228, 4041, 5825, 5179, 6055, 1635, 137, 135, 64, 2246, 1896, 60, 5669, 5668, 5100, 954, 2166, 380, 8, 1725, 1284, 4759, 3562, 4560, 6018, 2482, 440, 737, 1483, 6216, 2178, 4289, 3963, 5493, 4370, 707, 626, 1247, 2112, 4299, 4218, 3894, 3645, 1701, 4218, 2112, 60, 4461, 565, 2419, 54, 5023, 4949, 3151, 2432, 1153, 1884, 1079, 4723, 6258, 5412, 4042, 6557, 4453, 4596, 528, 2456, 6177, 4672, 748, 909, 4394, 712, 573, 696, 5825, 4615, 240, 4614, 6070, 1700, 1843, 1735, 3969, 3645, 1461, 2031, 1611, 1688, 113, 5208, 3993, 4678, 1295, 467, 2405, 4860, 2792, 2324, 2733, 2214, 752, 2213, 6100, 6055, 6024, 1672, 395, 3670, 1726, 1950, 3648, 5835, 5832, 3651, 2031, 1266, 3481, 968, 707, 5098, 5581, 5971, 6055, 942, 962, 953, 2357, 4546, 5773, 654, 5090, 5996, 4543, 4723, 4512, 4759, 4659, 5020, 1052, 1534, 1734, 5484, 2097, 1457, 2099, 4671, 2187, 740, 17, 5453, 4129, 1941, 1700, 815, 2195, 4557, 5409, 5402, 5401, 1044, 792, 306, 110, 151, 4920, 2347, 6158, 6157, 141, 2193, 3428, 5696, 4659, 4516, 528, 539, 2942, 5166, 5645, 4914, 4528, 788, 1735, 1950, 5832, 5832, 3645, 1788, 2112, 1920, 5344, 4966, 4886, 4444, 4596, 1267, 4481, 5367, 4672, 748, 5, 4479, 3912, 4675, 4488, 2724, 4560, 5148, 6534, 2169, 2102, 1854, 4920, 3231, 5177, 1125, 303, 2180, 1457, 2096, 2113, 4134, 3888, 3645, 1701, 2031, 5649, 4113, 2186, 3153, 4920, 4849, 5192, 2125, 2175, 6075, 1460, 3653, 2418, 5484, 4320, 2105, 5831, 3312, 1032, 2430, 512, 1403, 6501, 2181, 703, 6404, 5030, 5077, 144, 73, 59, 4435, 4594, 4569, 4966, 3994, 2499, 962, 2840, 717, 672, 1379, 3653, 4643, 4443, 4756, 4686, 3239, 1771, 1708, 4218, 4218, 6405, 6405, 4218, 4218, 2112, 570, 5097, 5047, 5774, 3991, 6378, 4041, 2186, 1454, 300, 56, 2324, 2297, 785, 1978, 2031, 1734, 5217, 6052, 3867, 971, 221, 65, 1284, 6222, 2183, 5826, 948, 2511, 6328, 3650, 5289, 1041, 4380, 1266, 2024, 1356, 4677, 1286, 4487, 3459, 2466, 2201, 5203, 4680, 4996, 4753, 4527, 5420, 3468, 3378, 2180, 1457, 2095, 2059, 4218, 4218, 3894, 4482, 4753, 4752, 5418, 5499, 3468, 2099, 5831, 2424, 4677, 395, 4517, 4680, 2233, 3340, 6258, 2166, 1431, 5829, 5099, 1692, 725, 710, 1355, 1757, 2032, 4218, 4218, 2031, 2733, 4515, 5075, 4562, 335, 4052, 5324, 384, 4678, 1286, 2249, 2332, 4915, 2925, 983, 4068, 2157, 2166, 2901, 5089, 3879, 707, 380, 75, 638, 737, 2610, 6501, 671, 581, 2777, 1238, 1960, 2031, 4218, 3894, 5838, 3651, 3645, 1701, 1947, 1854, 1693, 1445, 5047, 5046, 498, 2430, 2819, 4727, 147, 5644, 2925, 731, 2210, 5938, 1744, 1584, 1685, 86, 6255, 6261, 4071, 2726, 2726, 3751, 2049, 1959, 4218, 4218, 4218, 4218, 4218, 4137, 5454, 5805, 883, 224, 2428, 5649, 6309, 5828, 1454, 5157, 5644, 4412, 4554, 6099, 4428, 3655, 1463, 1588, 2564, 4858, 792, 56, 64, 5650, 2503, 4415, 6018, 3453, 4686, 2573, 5915, 6238, 1703, 1705, 1950, 6345, 6534, 4347, 2810, 6175, 6181, 588, 5075, 5775, 3589, 651, 4947, 5074, 4970, 54, 2189, 6077, 1703, 1948, 1947, 1704, 3645, 3651, 1950, 4404, 4513, 2571, 4993, 6018, 4419, 56, 5644, 2457, 4394, 5934, 5652, 2930, 1460, 2047, 699, 3566, 4544, 4659, 2580, 3724, 5649, 4077, 3616, 5342, 4515, 2553, 4841, 737, 1493, 2032, 4218, 6405, 4218, 4218, 2031, 4650, 4756, 4515, 5668, 5452, 6261, 4929, 4686, 4607, 4597, 393, 4677, 2024, 5182, 5649, 2187, 3655, 1466, 752, 5656, 5643, 56, 2225, 5203, 1744, 2031, 4137, 5832, 1701, 3894, 4644, 5402, 3222, 1532, 315, 2324, 4834, 4671, 5726, 4689, 5499, 4197, 4286, 5099, 1125, 4998, 4680, 4528, 1851, 396, 3644, 1370, 1275, 4434, 4849, 4472, 3426, 4669, 4571, 4311, 2175, 2898, 2655, 991, 119, 5042, 4257, 6534, 6534, 5102, 2831, 2673, 495, 632, 2891, 2783, 2006, 1951, 4218, 4137, 2027, 2108, 1865, 3863, 2336, 4593, 4839, 2393, 5756, 4311, 4562, 2528, 749, 5724, 4077, 1700, 4615, 6378, 5661, 5256, 5655, 2214, 3656, 737, 752, 5412, 5645, 74, 2216, 4474, 1725, 1704, 5832, 3645, 2031, 1950, 1132, 1937, 5095, 4435, 4594, 2553, 4831, 5532, 6505, 672, 728, 5054, 2413, 5406, 4286, 5831, 3312, 6213, 2183, 2099, 4752, 11, 1557, 4385, 2899, 1383, 1457, 719, 4592, 2733, 4697, 5273, 6157, 5428, 1730, 1975, 1701, 5832, 3645, 1461, 4299, 2112, 135, 5915, 6157], "prompt_text": "他和之前的那位护士粉丝一样呀,都很喜欢心理咨询。", "llm_prompt_speech_token": [2058, 5103, 6076, 5996, 3195, 2440, 4807, 72, 5727, 5658, 5175, 5260, 4755, 2598, 3659, 181, 2225, 4492, 1482, 4403, 3609, 3098, 2663, 2834, 470, 2850, 2112, 2913, 5095, 5403, 36, 2169, 4329, 902, 749, 4194, 6374, 46, 5020, 3454, 3480, 1295, 197, 6481, 5752, 734, 737, 758, 1954, 1957, 1954, 2058, 1842, 74, 716, 5831, 5101, 2676, 2268, 1712, 818, 5453, 5371, 4755, 4759, 4758, 1294, 1847, 2769, 584, 3971, 4472, 4660, 4839, 2562, 4805, 3751, 998, 53, 4194, 6374, 6454, 6390, 5257, 4443, 6298, 1671, 953, 1582, 2041]} + json_data_2 = {"text": "早期的制度主义者受到传统政治哲学和欧洲大陆国家学影响,主要关注自上而下的制度设计问题。认为制度是影响人类行为的基本因素,对政治机构运作的研究主要通过对政治制度中权力分配和人类行为的法律与机构约束地了解。主要通过制度研究法和历史比较法进行研究。", "tts_speech_tokens": [4131, 6075, 5832, 5862, 3645, 1701, 1950, 4299, 4920, 2259, 3404, 3410, 2786, 156, 5325, 2283, 150, 4450, 4430, 1029, 4918, 4752, 4770, 1281, 5743, 5015, 1369, 5727, 2180, 1697, 5588, 4614, 3720, 4671, 884, 308, 542, 4644, 4995, 5068, 5095, 552, 4646, 2756, 387, 5487, 3321, 1955, 1703, 4554, 3993, 2763, 692, 1967, 1887, 4920, 4761, 4608, 5122, 1805, 2022, 4995, 5661, 6149, 1774, 3462, 4915, 794, 632, 4603, 6027, 3840, 1700, 233, 137, 5427, 495, 1280, 3476, 5015, 5098, 5014, 3462, 2826, 2912, 5014, 2139, 300, 5509, 6319, 2837, 3879, 5825, 3641, 2186, 1392, 1457, 716, 150, 2337, 3086, 6401, 2408, 5325, 6270, 1672, 962, 2267, 3482, 1268, 3455, 4830, 4416, 4569, 2276, 1460, 1946, 1702, 1950, 6075, 5832, 5859, 5862, 5835, 3645, 1944, 4218, 1977, 4833, 1855, 1616, 4607, 5509, 5054, 1456, 654, 2913, 5027, 5104, 5931, 3480, 3384, 5824, 4289, 1370, 1923, 2490, 4753, 5013, 5742, 6148, 4672, 4914, 4456, 5357, 1235, 1703, 1784, 3812, 4526, 4593, 2301, 4543, 6320, 55, 315, 272, 299, 1758, 6351, 4673, 4752, 4770, 1368, 306, 5825, 1454, 5238, 4996, 2790, 4753, 148, 3228, 3158, 3637, 5830, 2324, 1561, 555, 4488, 294, 1295, 800, 1762, 1750, 6147, 4447, 4780, 5777, 1457, 227, 224, 4850, 4534, 2032, 1950, 5838, 5835, 5862, 5859, 3888, 4137, 6405, 6405, 4218, 5379, 4753, 4923, 4851, 2504, 2094, 306, 5095, 5828, 1457, 2096, 2086, 1975, 5400, 4753, 4672, 4932, 5420, 809, 1295, 2024, 5642, 5073, 4659, 4839, 4463, 4133, 1226, 1748, 3474, 803, 38, 5853, 6093, 4486, 4598, 314, 2510, 4488, 4731, 4812, 1106, 1883, 5776, 5825, 2348, 1041, 2243, 58, 1248, 4512, 2472, 4857, 1686, 3627, 167, 269, 1268, 3536, 5480, 5933, 5238, 5562, 3636, 725, 1364, 2093, 2112, 1113, 4609, 2420, 160, 2139, 5647, 4761, 4599, 5121, 3994, 5652, 3961, 1041, 4758, 3319, 2428, 1227, 5015, 2909, 4129, 6073, 4587, 5938, 1807, 5484, 5824, 4997, 1044, 2252, 1052, 1294, 1133, 8, 3022, 3426, 2589, 5342, 5090, 387, 4191, 6294, 1828, 2099, 3158, 4568, 5591, 307, 300, 5598, 5048, 5776, 591, 2913, 5084, 542, 1923, 381, 4601, 2663, 80, 1599, 1977, 4753, 4581, 4518, 3666, 3913, 4680, 6392, 3231, 3462, 4996, 4761, 5094, 1044, 1038, 5096, 3560, 1119, 5490, 2891, 2861, 5774, 3991, 1951, 4218, 5865, 6052, 3084, 818, 1463, 3825, 4561, 5020, 1684, 6534, 2530, 4511, 6255, 2130, 2628, 4570, 386, 1520, 1732, 5346, 4698, 1261, 632, 2576, 3477, 1774, 785, 2934, 6177, 4420, 4570, 638, 4435, 4542, 357, 1673, 6506, 3395, 4778, 876, 2260, 29, 1028, 6507, 6535, 5805, 3491, 1946, 1703, 839, 1533, 4613, 5344, 4130, 1943, 3239, 879, 4920, 4758, 5668, 4858, 1218, 4852, 5084, 638, 2087, 1945, 3645, 5832, 5859, 5859, 3675, 3648, 4137, 6405, 4218, 3885, 6316, 6074, 4598, 4573, 4671, 5076, 5562, 5095, 4373, 2186, 2012, 2031, 2112, 60, 4776, 5019, 4939, 1295, 1772, 1762, 1707, 4218, 2031, 80, 314, 1784, 3491, 4843, 312, 2337, 2672, 386, 791, 1493, 1708, 4137, 4218, 6405, 6405, 6324, 6324, 4920, 5562, 2180, 1856, 3320, 4594, 5674, 2702, 1839, 351, 4875, 5039, 5776, 5532, 1401, 669, 5091, 5003, 515, 64, 2004, 4753, 4770, 5013, 2091, 2503, 5099, 1697, 1376, 386, 737, 4479, 3264, 3066, 6071, 5089, 3430, 6535, 604, 1946, 1946, 1217, 4617, 3159, 623, 299, 3798, 4453, 5100, 4777, 2983, 4996, 4915, 2017, 1046, 1923, 1920, 1698, 1295, 1051, 2733, 4615, 5509, 2870, 6453, 6534, 1949, 1460, 222, 4677, 4533, 4832, 4407, 2454, 2726, 1268, 566, 116, 8, 6261, 2490, 3156, 2414, 5093, 5102, 2006, 2032, 1947, 3888, 5832, 3645, 3645], "prompt_text": "当然你的他对保证金有要求,有的是比如说百分之你交易值的百分之五。", "llm_prompt_speech_token": [1516, 1950, 4215, 1116, 4700, 4628, 5365, 4496, 2291, 1726, 1016, 886, 72, 4592, 1836, 2058, 2916, 6086, 4537, 1038, 4604, 5342, 2160, 2163, 2870, 1952, 29, 3471, 3476, 4718, 3751, 4686, 2310, 5588, 4831, 4777, 971, 956, 1295, 5749, 4568, 6323, 4868, 5077, 2257, 5325, 1623, 1607, 1862, 4130, 1199, 74, 1767, 4528, 4763, 5643, 4923, 6391, 1774, 2058, 1950, 1689, 1618, 1532, 5481, 5024, 2163, 712, 1463, 812, 2142, 5312, 5934, 5658, 4915, 4905, 3717, 1968, 3914, 5426, 1599, 1005, 5325, 4598, 3491, 4781, 4615, 4695, 5911, 3722, 1275, 4915, 4833, 2018, 1775, 3312, 306, 4538, 2178, 4299, 4347, 326, 1460, 1810, 3016, 6180, 4923, 6391, 4852, 5828, 2105, 2105, 2105, 728, 5098, 3551, 1951]} + device = 'cuda' + model = AutoModelForCausalLM.from_pretrained(rwkv_path, trust_remote_code=True).to(dtype=torch.bfloat16) + model.to(device) + configuration = model.config + print(configuration) + tokenizer = AutoTokenizer.from_pretrained(rwkv_path, trust_remote_code=True) + print(tokenizer) + tokenizer.add_special_tokens({'pad_token': '<|rwkv_tokenizer_end_of_text|>'}) + + + llm_input_size = configuration.hidden_size + llm_output_size = configuration.hidden_size + speech_token_size = 6561 + rwkv7lm = RWKV7LM(llm_input_size, llm_output_size, speech_token_size, model,None).to(dtype=torch.bfloat16) + rwkv7lm.to(device) + rwkv7lm.train() + print(rwkv7lm) + + speech_tokens = [torch.tensor(json_data["tts_speech_tokens"],dtype=torch.int32), torch.tensor(json_data_2["tts_speech_tokens"],dtype=torch.int32)] + speech_length = torch.tensor([len(json_data["tts_speech_tokens"]), len(json_data_2["tts_speech_tokens"])],dtype=torch.int32) + print(speech_length) + + speech_tokens = pad_sequence(speech_tokens, batch_first=True, padding_value=tokenizer.pad_token_id) + print(speech_tokens.shape) + + texts = [json_data["text"], json_data_2["text"]] + prompts = [json_data["prompt_text"], json_data_2["prompt_text"]] + texts_ids = [ + torch.tensor(tokenizer.encode(texts[i],add_special_tokens=False)+tokenizer.encode(prompts[i],add_special_tokens=False),dtype=torch.int32) + for i in range(len(texts)) + ] + texts_length = torch.tensor([i.shape[0] for i in texts_ids],dtype=torch.int32) + print(texts_length) + texts_ids = pad_sequence(texts_ids, batch_first=True, padding_value=tokenizer.pad_token_id) + print(texts_ids.shape) + + + + batch = {"text_token": texts_ids.to(device), "text_token_len": texts_length.to(device), "speech_token": speech_tokens.to(device), "speech_token_len": speech_length.to(device)} + output = rwkv7lm(batch) + print(output) + + print(model) + print(model.__class__.__name__) + print(f"类名: {model.__class__.__name__}") + print(f"完整类路径: {model.__class__.__module__}.{model.__class__.__qualname__}") \ No newline at end of file diff --git a/third_party/model/test/test_hf_rwkv.py b/third_party/model/test/test_hf_rwkv.py new file mode 100644 index 0000000000000000000000000000000000000000..a477a2d486b91feca909b5956518fd79dd56fba6 --- /dev/null +++ b/third_party/model/test/test_hf_rwkv.py @@ -0,0 +1,21 @@ +model_path = "/external_data/models/rwkv7-0.4B-world/" +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch +device = 'cuda:0' +model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to(device=device, dtype=torch.float16) +model.eval() + +tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) +prompt = "User : 请评价一下现阶段中美关系,包括合作和竞争。\nAssistant :" +inputs = tokenizer(prompt, return_tensors="pt").to(device) +print(inputs) +from transformers import GenerationConfig +config = GenerationConfig(max_new_tokens=256) + +ids = tokenizer.encode(prompt,add_special_tokens=False) +print(ids) + +outputs = model.generate(**inputs, + generation_config=config,) +print(tokenizer.decode(outputs[0], skip_special_tokens=True)) +# from fla.models.rwkv7 import RWKV7ForCausalLM, RWKV7Model, RWKV7Config \ No newline at end of file diff --git a/third_party/model/test/test_initialize.py b/third_party/model/test/test_initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..15e85b744706bad2d83690260574e220dc10413c --- /dev/null +++ b/third_party/model/test/test_initialize.py @@ -0,0 +1,69 @@ +import time + + +def do_tts(tts_text,prompt_texts,prompt_audios,cosyvoice,prefix): + import logging + for i, (prompt_audio_file, prompt_text) in enumerate(zip(prompt_audios, prompt_texts)): + logging.info(f'Processing {prompt_text}') + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + if prompt_text is not None: + for j, k in enumerate(cosyvoice.inference_zero_shot(tts_text,prompt_text, prompt_speech_16k, stream=False,speed=1)): + torchaudio.save('{}_{}_{}.wav'.format(prefix,i, j), k['tts_speech'], cosyvoice.sample_rate) + else: + for j, k in enumerate(cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=False,speed=1)): + torch.cuda.manual_seed_all(time.time()) + torchaudio.save('{}_{}_{}.wav'.format(prefix,i, j), k['tts_speech'], cosyvoice.sample_rate) + logging.info(f'Finished processing {tts_text},for {i}th prompt') +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + is_flow_only = sys.argv[3]=='True' if len(sys.argv) > 3 else False + print(f'is_flow_only: {is_flow_only}') + cosyvoice = CosyVoice2(model_path,device=device,fp16=False,load_jit=False) + + from cosyvoice.utils.file_utils import load_wav + import torchaudio + prompt_audios = [ + '/home/yueyulin/github/RWKVTTS/zero_shot_prompt.wav', + '/home/yueyulin/github/RWKVTTS/mine.wav', + '/home/yueyulin/github/RWKVTTS/new.wav', + '/home/yueyulin/github/RWKVTTS/Trump.wav', + "/home/yueyulin/github/RWKVTTS/00000309-00000300.wav" + ] + + if not is_flow_only: + prompt_texts = [ + '希望你以后做的比我还好呦。', + '少年强则中国强。', + '我随便说一句话,我喊开始录就开始录。', + 'numbers of Latino, African American, Asian American and native American voters.', + "小偷却一点也不气馁,继续在抽屉里翻找。" + ] + else: + prompt_texts = [ + None, + None, + None, + None + ] + + tts_texts = [ + '一个教授逻辑学的教授,有三个学生,而且三个学生均非常聪明!一天教授给他们出了一个题,教授在每个人脑门上贴了一张纸条并告诉他们,每个人的纸条上都写了一个正整数,且某两个数的和等于第三个!', + 'By unifying streaming and non-streaming synthesis within a single framework, CosyVoice 2 achieves human parity naturalness, minimal response latency, and virtually lossless synthesis quality in streaming mode. ' + '全球每年有超过一百三十五万人,因交通事故而死亡。', + '通过创新技术让未来出行更加安全,高效。' + ] + index = 0 + for tts_text in tts_texts: + do_tts(tts_text,prompt_texts,prompt_audios,cosyvoice,f"MIXED{index}") + index += 1 + + # do_tts('By unifying streaming and non-streaming synthesis within a single framework, CosyVoice 2 achieves human parity naturalness, minimal response latency, and virtually lossless synthesis quality in streaming mode. ',prompt_texts,cosyvoice,"en") + + # do_tts('一个教授逻辑学的教授,有三个学生,而且三个学生均非常聪明!一天教授给他们出了一个题,教授在每个人脑门上贴了一张纸条并告诉他们,每个人的纸条上都写了一个正整数,且某两个数的和等于第三个!',prompt_texts,cosyvoice,"cn") \ No newline at end of file diff --git a/third_party/model/test/test_instructed.py b/third_party/model/test/test_instructed.py new file mode 100644 index 0000000000000000000000000000000000000000..8ecb02ba2799cdd295afe637e90f0f639e6e71c1 --- /dev/null +++ b/third_party/model/test/test_instructed.py @@ -0,0 +1,53 @@ +import time + + +def do_tts(tts_text,prompt_audios,prompt_texts,cosyvoice,prefix): + import logging + for i in range(len(prompt_audios)): + prompt_audio_file = prompt_audios[i] + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + for j in range(len(prompt_texts)): + prompt_text = prompt_texts[j] + logging.info(f'Processing {prompt_text} from {prompt_audio_file}') + torch.cuda.manual_seed_all(time.time()) + if len(prompt_text) >0: + for result in cosyvoice.inference_instruct2(tts_text,prompt_text, prompt_speech_16k, stream=False,speed=1): + torchaudio.save(f"{prefix}_{i}_{j}.wav", result['tts_speech'], cosyvoice.sample_rate) + else: + for result in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=False,speed=1): + torchaudio.save(f"{prefix}_{i}_{j}.wav", result['tts_speech'], cosyvoice.sample_rate) + logging.info(f'Finished processing {prompt_text} from {prompt_audio_file}') +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + cosyvoice = CosyVoice2(model_path,device=device,fp16=True,load_jit=False) + instruct = sys.argv[3] + tts_text = sys.argv[4] + from cosyvoice.utils.file_utils import load_wav + import torchaudio + prompt_audios = [ + '/home/yueyulin/github/RWKVTTS/zero_shot_prompt.wav', + '/home/yueyulin/github/RWKVTTS/mine.wav', + '/home/yueyulin/github/RWKVTTS/new.wav', + '/home/yueyulin/github/RWKVTTS/Trump.wav', + ] + + prompt_texts = [ + # '希望你以后做的比我还好呦。', + # '请用非常快速的语速说。', + '日本語で話してください。', + # '한국어로 말씀해주세요.' + ] + + # do_tts('By unifying streaming and non-streaming synthesis within a single framework, CosyVoice 2 achieves human parity naturalness, minimal response latency, and virtually lossless synthesis quality in streaming mode. ',prompt_texts,cosyvoice,"instructed_en") + + # do_tts('[laughter]有时候,看着小孩子们的天真行为[laughter],我们总会会心一笑。',prompt_audios,prompt_texts,cosyvoice,"instructed_cn") + # do_tts(tts_text,prompt_audios,prompt_texts,cosyvoice,"instructed_cn") + do_tts(tts_text,prompt_audios,[instruct],cosyvoice,"multilingual") + \ No newline at end of file diff --git a/third_party/model/test/test_performance.py b/third_party/model/test/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..929a1d5492787eb94739f9b75092d56be9870708 --- /dev/null +++ b/third_party/model/test/test_performance.py @@ -0,0 +1,56 @@ +from math import cos + +from regex import F + + +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + cosyvoice = CosyVoice2(model_path,device=device,fp16=True,load_jit=False) + print('start to compile llm') + import torch_tensorrt + # cosyvoice.model.llm = torch.compile(cosyvoice.model.llm,backend='onnxrt') + print('finish to compile llm') + tts_text = "\"Ra Kuv\" is an architecture that builds various kinds of neural network models." + ref_voice = "Cocona_English" + import torchaudio + i = 0 + + # 重置 RWKV7Block 的统计信息 + from rwkvfla.models.rwkv7.modeling_rwkv7 import RWKV7Block + RWKV7Block.reset_stats() + + # 重置 LoRA 的统计信息 + from rwkvfla.layers.rwkv6 import LoRA + LoRA.reset_stats() + + # 重置 RWKV7Attention 的统计信息 + from rwkvfla.layers.rwkv7 import RWKV7Attention + RWKV7Attention.reset_stats() + for result in cosyvoice.inference_sft(tts_text, ref_voice, stream=False,speed=1): + torchaudio.save(f"performance_{i}.wav", result['tts_speech'], cosyvoice.sample_rate) + i += 1 + for i in range(100): + print(f'Processing {tts_text} from {ref_voice} {i} times') + for result in cosyvoice.inference_sft(tts_text, ref_voice, stream=False,speed=1): + continue + sort_by_all_time = True + # 打印RWKV7Block内部统计信息 + from rwkvfla.models.rwkv7.modeling_rwkv7 import RWKV7Block + RWKV7Block.print_stats(sort_by_time=sort_by_all_time) + + # 打印LoRA内部统计信息 + from rwkvfla.layers.rwkv6 import LoRA + LoRA.print_stats() + + # 打印RWKV7Attention内部统计信息 + from rwkvfla.layers.rwkv7 import RWKV7Attention + RWKV7Attention.print_stats(sort_by_time=sort_by_all_time) \ No newline at end of file diff --git a/third_party/model/test/test_speaker_adapter.py b/third_party/model/test/test_speaker_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..10f74ffcd25af98285ce16abdc3d832d6794cad0 --- /dev/null +++ b/third_party/model/test/test_speaker_adapter.py @@ -0,0 +1,43 @@ +import time + + +def do_tts(tts_text,ref_voice,prompt_texts,cosyvoice,prefix): + import logging + for i in range(len(prompt_texts)): + prompt_text = prompt_texts[i] + logging.info(f'Processing {prompt_text} from {ref_voice}') + if len(prompt_text) >0: + tts_text = f'{prompt_text}<|endofprompt|>{tts_text}' + for result in cosyvoice.inference_sft(tts_text, ref_voice, stream=False,speed=1): + torchaudio.save(f"{prefix}_{i}.wav", result['tts_speech'], cosyvoice.sample_rate) + logging.info(f'Finished processing {tts_text} {prompt_text} from {ref_voice}') +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + cosyvoice = CosyVoice2(model_path,device=device,fp16=True,load_jit=False) + print(cosyvoice.frontend.spk2info.keys()) + instruct = sys.argv[3] + tts_text = sys.argv[4] + ref_voice = sys.argv[5] + from cosyvoice.utils.file_utils import load_wav + import torchaudio + + prompt_texts = [ + # '希望你以后做的比我还好呦。', + # '请用非常快速的语速说。', + '日本語で話してください。', + # '한국어로 말씀해주세요.' + ] + + # do_tts('By unifying streaming and non-streaming synthesis within a single framework, CosyVoice 2 achieves human parity naturalness, minimal response latency, and virtually lossless synthesis quality in streaming mode. ',prompt_texts,cosyvoice,"instructed_en") + + # do_tts('[laughter]有时候,看着小孩子们的天真行为[laughter],我们总会会心一笑。',prompt_audios,prompt_texts,cosyvoice,"instructed_cn") + # do_tts(tts_text,prompt_audios,prompt_texts,cosyvoice,"instructed_cn") + do_tts(tts_text,ref_voice,[instruct],cosyvoice,"speaker_info_test") + \ No newline at end of file diff --git a/third_party/model/test/verify_speech_tokens.py b/third_party/model/test/verify_speech_tokens.py new file mode 100644 index 0000000000000000000000000000000000000000..b1de836e1165ba59943ce6397010eb9b3edd9602 --- /dev/null +++ b/third_party/model/test/verify_speech_tokens.py @@ -0,0 +1,65 @@ +from pyexpat import model +import time + + +def do_tts(tts_text,prompt_texts,cosyvoice,prefix): + import logging + for i, (prompt_audio_file, prompt_text) in enumerate(zip(prompt_audios, prompt_texts)): + logging.info(f'Processing {prompt_text}') + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + with torch.no_grad(): + for j, k in enumerate(cosyvoice.inference_zero_shot(tts_text,prompt_text, prompt_speech_16k, stream=False,speed=1)): + torch.cuda.manual_seed_all(time.time()) + torchaudio.save('{}_{}_{}.wav'.format(prefix,i, j), k['tts_speech'], cosyvoice.sample_rate) + logging.info(f'Finished processing {prompt_text}') +if __name__ == '__main__': + from cosyvoice.cli.cosyvoice import CosyVoice2 + import torch + import sys + # model_path = '/home/yueyulin/models/CosyVoice2-0.5B_RWKV_0.19B/' + # device = 'cuda:0' + print(sys.argv) + model_path = sys.argv[1] + device = sys.argv[2] if len(sys.argv) > 2 else 'cuda:0' + cosyvoice = CosyVoice2(model_path,device=device,fp16=False,load_jit=False) + + from cosyvoice.utils.file_utils import load_wav + import torchaudio + #{"text": "请问你能模仿上海话的口音吗?<|endofprompt|>[laughter]太史,是中国古代官名。", "tts_speech_tokens": [3295, 2031, 1734, 1950, 2031, 1707, 1950, 1950, 1950, 1950, 2031, 1977, 1974, 3888, 5832, 2916, 2225, 37, 2916, 1728, 1734, 2031, 1950, 1950, 2112, 2139, 54, 5832, 4142, 6320, 5834, 2276, 4431, 4753, 4429, 4671, 3720, 1534, 1538, 4451, 4519, 1517, 2005, 1732, 1488, 1488, 1704, 1707, 2031, 1950, 2031, 1950, 4137, 1788, 1606, 5238, 4915, 5644, 4671, 1534, 806, 2264, 2993, 3074, 2992, 1356, 5565, 2666, 5074, 5776, 1321, 699, 2906, 5081, 5090, 1410, 651, 2186, 3641, 153, 57, 2303, 2276, 2384, 1206, 654, 2908, 2648, 5189, 2386, 6258, 1882, 377, 539, 620, 3266, 5935, 1966, 1959, 1950, 2031, 2031, 2031, 1707, 1950, 2031, 2031, 2031, 2031, 1950], "prompt_text": "", "llm_prompt_speech_token": []} + prompt_audios = [ + '/home/yueyulin/github/RWKVTTS/zero_shot_prompt.wav' + ] + + prompt_texts = [ + '请用凶猛的语气说。', + # '以机器人的角色和我交流。', + # '请用非常快速的语速说。', + # '请用愤怒的情感说一下。' + ] + prompt_audio_file = prompt_audios[0] + instruct_text = prompt_texts[0] + prompt_speech_16k = load_wav(prompt_audio_file, 16000) + tts_text = "聯合國會員國是聯合國大會的正式成員,在聯合國大會中擁有平等的代表權。截至2021年,聯合國一共有193個會員國。" + model_input = cosyvoice.frontend.frontend_instruct2(tts_text, instruct_text, prompt_speech_16k, cosyvoice.sample_rate) + print(model_input) + # do_tts('By unifying streaming and non-streaming synthesis within a single framework, CosyVoice 2 achieves human parity naturalness, minimal response latency, and virtually lossless synthesis quality in streaming mode. ',prompt_texts,cosyvoice,"instructed_en") + + # do_tts('一个教授逻辑学的教授,[laughter]有三个学生,而且三个学生均非常聪明![breath]一天教授给他们出了一个题,[breath]教授在每个人脑门上贴了一张纸条并告诉他们,每个人的纸条上都写了一个正整数,且某两个数的和等于第三个!',prompt_texts,cosyvoice,"instructed_original_cn") + llm_embedding = model_input['llm_embedding'] + flow_embedding = model_input['flow_embedding'] + + speech_tokens = torch.tensor([[1571, 4299, 4299, 4299, 4299, 4299, 4299, 4299, 4299, 1725, 3831, 46, 2867, 3617, 4319, 4313, 6209, 5020, 2330, 4565, 5589, 4872, 2870, 710, 5087, 303, 4754, 2906, 2777, 1644, 387, 1208, 2348, 1356, 5405, 4408, 4731, 1295, 971, 4860, 4876, 5054, 1434, 1686, 647, 35, 2378, 2760, 4956, 6077, 948, 73, 3482, 3455, 5642, 5615, 5046, 4749, 5320, 4677, 4777, 5917, 6077, 992, 1953, 3888, 3645, 1785, 2112, 1869, 2814, 5808, 4287, 5823, 4594, 4862, 702, 705, 1952, 1220, 5057, 690, 2838, 4962, 5050, 710, 1456, 1454, 56, 776, 6175, 5409, 6455, 4986, 4528, 6378, 5405, 4461, 510, 1213, 60, 2697, 2427, 2160, 5832, 5841, 5837, 2924, 2411, 4616, 6210, 6453, 2702, 3404, 488, 506, 2031, 3888, 2112, 3975, 1942, 4130, 5344, 5166, 6536, 6372, 56, 46, 3747, 4751, 5047, 6261, 114, 2598, 5352, 5119, 4463, 4543, 4536, 4735, 6534, 2547, 6157, 3890, 1703, 1712, 2028, 1701, 3972, 5919, 5916, 5835, 3645, 3969, 4215, 4299, 4218, 2049, 6018, 262, 5054, 5048, 6506, 4316, 5936, 5749, 5668, 2582, 2303, 2222, 764, 3975, 6486, 4299, 2112, 4071, 3020, 4993, 5696, 6344, 5695, 4966, 5020, 2330, 2222, 35, 1573, 3648, 3645, 2112, 3567, 5028, 2906, 2414, 3462, 5162, 4488, 2563, 867, 2928, 3655, 3890, 2296, 303, 3321, 3576, 5097, 225, 42, 2834, 1133, 5908, 6454, 6455, 3951, 5182, 4603, 2876, 4319, 3616, 5028, 5047, 2897, 2894, 2584, 300, 2567, 710, 1403, 2129, 2067, 5644, 2922, 5848, 5111, 4567, 6372, 2466, 4430, 1560, 1480, 5021, 6311, 2185, 725, 56, 779, 6255, 5401, 6455, 4743, 2262, 5648, 4435, 4704, 539, 566, 1680, 2220, 2760, 4993, 5102, 314, 1086, 3651, 5838, 3651, 1869, 1815, 5400, 4454, 2924, 4382, 73, 5652, 2243, 2233, 5851, 6016, 1476, 5087, 398, 476, 970, 3462, 2250, 2831, 5043, 6501, 2538, 5832, 5191, 4382, 2309, 6534, 2790, 2678, 3422, 101, 2157, 67, 5021, 2753, 1286, 2140, 3732, 5838, 3651, 1869, 5859, 5482, 6373, 6130, 6372, 3961, 6391, 6453, 6456, 2258, 8, 2924, 4598, 6378, 5412, 4778, 4463, 1191, 2160, 224, 4841, 3060, 6534, 2549, 2209, 6261, 6378, 6381, 4599, 5185, 1978, 1946, 1541, 1652, 1661, 2983, 6536, 6455, 3231, 5993, 5668, 3481, 5669, 5020, 5021, 566, 2086, 2112, 4299, 4299, 4299, 4218, 4299, 6486, 6486, 6486, 4299, 4299, 6486, 4299, 4299, 4299, 4218, 1806, 3828, 2711, 2879, 4319, 3989, 4993, 4940, 2330, 2300, 147, 6377, 2919, 5119, 5111, 4540, 5157, 6372, 4447, 1560, 751, 4607, 5827, 2185, 725, 56, 2936, 5445, 5482, 6454, 4743, 4527, 5648, 4516, 2517, 566, 1599, 2193, 2760, 5020, 2343, 6379, 2503, 2702, 2870, 5066, 5071, 1302, 6318, 1458, 2112, 6378, 5400, 5841, 1220, 5056, 573, 5097, 5086, 4535, 3481, 3482, 5749, 957, 2166, 2300, 251, 251, 737, 812, 2912, 6559, 5096, 2381, 4571, 2582, 2006, 2113, 1294, 5668, 4210, 3481, 35, 5348, 5044, 2778, 5757, 2860, 4946, 3404, 1946, 1541, 1622, 1679, 1679, 5324, 2415, 1761, 4567, 4625, 2924, 737, 1466, 1464, 1701, 1302, 4944, 673, 713, 969, 5650, 2337, 4769, 3653, 3647, 6320, 4973, 3643, 5827, 4595, 4463, 2195, 8, 764, 1789, 4299, 4299, 4299, 4299, 4299, 1788, 5943, 5325, 5326, 3840, 1700, 1616, 2125, 4317, 3617, 5804, 4825, 5100, 5019, 4760, 2303, 2222, 2264, 6454, 6454, 6381, 3962, 748, 3831, 47, 2867, 3590, 4319, 3857, 5020, 5021, 2303, 2327, 3641, 3643, 713, 68, 2936, 5448, 6373, 6455, 4743, 4527, 2004, 5405, 4461, 582, 566, 960, 2220, 2697, 240, 786, 5021, 2267, 1851, 5649, 3303, 4289, 6556, 5344, 4605, 4779, 2678, 515, 5987, 2189, 5840, 2195, 5102, 6316, 6313, 3884, 4598, 4382, 737, 1493, 4299, 4299, 4299, 4299, 4299, 4299, 4299, 4299]], device=device, dtype=torch.long) + this_uuid = 'xxx' + model = cosyvoice.model + model.tts_speech_token_dict[this_uuid], model.llm_end_dict[this_uuid] = [], False + model.hift_cache_dict[this_uuid] = None + this_tts_speech = model.token2wav(token=speech_tokens, + prompt_token=torch.zeros(1, 0, dtype=torch.int32,device=device), + prompt_feat=torch.zeros(1, 0, 80,device=device), + embedding=flow_embedding, + uuid=this_uuid, + token_offset=0, + finalize=True, + speed=1.0).cpu() + torchaudio.save('{}_{}.wav'.format('from_speech_tokens', 0), this_tts_speech, cosyvoice.sample_rate) + \ No newline at end of file diff --git a/third_party/train_scripts/train_functions.py b/third_party/train_scripts/train_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..b496d4fbae2297dffe63de6cf66475ba2fb256ff --- /dev/null +++ b/third_party/train_scripts/train_functions.py @@ -0,0 +1,33 @@ +import torch +import torch.nn as nn + +def train_step(model,batch): + batch = {k: v.to(model.device) for k, v in batch.items()} + output = model(batch) + return output + +def alter_emb_and_head(model, vocab_size, audio_token_size): + old_embeddings = model.model.embeddings + if vocab_size < model.config.vocab_size: + print(f'No need to enlarge the vocabulary size: {model.config.vocab_size}') + + # 创建并初始化新的 embedding 层 + print(f'Enlarging vocabulary size from {model.config.vocab_size} to {vocab_size}') + embedding_dim = old_embeddings.weight.size(1) + current_vocab_size = old_embeddings.weight.size(0) + new_embeddings = nn.Embedding(vocab_size, embedding_dim) + with torch.no_grad(): + new_embeddings.weight[:current_vocab_size, :] = old_embeddings.weight.data + std = old_embeddings.weight.std().item() + new_embeddings.weight[current_vocab_size:, :].normal_(mean=0.0, std=std) + model.model.embeddings = new_embeddings + model.config.vocab_size = vocab_size + # old_head = model.lm_head + head_dim = model.config.hidden_size + new_head = nn.Linear(head_dim, audio_token_size+1) + with torch.no_grad(): + #init the new head with random values + new_head.weight.normal_(mean=0.0, std=0.02) + model.lm_head = new_head + print(f'Enlarging head size from {head_dim} to {audio_token_size}') + return model \ No newline at end of file diff --git a/third_party/train_scripts/train_llm.py b/third_party/train_scripts/train_llm.py new file mode 100644 index 0000000000000000000000000000000000000000..fbce1b3a63c020e9082c6e4e9f7890526e993ccc --- /dev/null +++ b/third_party/train_scripts/train_llm.py @@ -0,0 +1,513 @@ +from ast import mod +from calendar import c +import os +from turtle import up +import torch +# import torch._dynamo +# torch._dynamo.config.suppress_errors = True +from torch.utils.data import DataLoader, DistributedSampler +import deepspeed +import datasets +import wandb +from transformers import HfArgumentParser, AutoTokenizer,AutoModelForCausalLM +from dataclasses import dataclass, field +import logging +import json +from typing import Optional +from functools import partial +import time +import regex as re +from data.utils.llm_dataset import load_jsonl_dataset, collate_fn +from model.llm.llm import RWKV7LM +from train_scripts.train_functions import train_step,alter_emb_and_head +logger = logging.getLogger(__name__) +@dataclass +class ScriptArguments: + """Command line arguments for training script""" + data_file: str = field( + default=None, + metadata={"help": "Path to training data file (JSONL format)"} + ) + model_name: str = field( + default=None, + metadata={"help": "Path or name of pretrained model"} + ) + output_dir: str = field( + default=None, + metadata={"help": "Directory to save trained model"} + ) + deepspeed_config: Optional[str] = field( + default=None, + metadata={"help": "Path to DeepSpeed config file"} + ) + num_epochs: int = field( + default=3, + metadata={"help": "Number of training epochs"} + ) + per_device_train_batch_size: int = field( + default=1, + metadata={"help": "Training batch size per device"} + ) + learning_rate: float = field( + default=1e-5, + metadata={"help": "Learning rate"} + ) + learning_rate_final: float = field( + default=1e-6, + metadata={"help": "Final learning rate at the end of training"} + ) + weight_decay: float = field( + default=0.01, + metadata={"help": "Weight decay"} + ) + warmup_steps: int = field( + default=100, + metadata={"help": "Number of warmup steps"} + ) + max_length: int = field( + default=2048, + metadata={"help": "Maximum length of input sequence"} + ) + logging_steps: int = field( + default=10, + metadata={"help": "Number of steps between logging"} + ) + save_steps: int = field( + default=500, + metadata={"help": "Number of steps between saving checkpoints"} + ) + local_rank: int = field( + default=-1, + metadata={"help": "Local rank for distributed training"} + ) + seed: int = field( + default=42, + metadata={"help": "Random seed"} + ) + wandb_project: str = field( + default="grpo-training", + metadata={"help": "Name of W&B project"} + ) + wandb_run_name: str = field( + default=None, + metadata={"help": "Name of W&B run"} + ) + gradient_checkpointing: bool = field( + default=False, + metadata={"help": "Use gradient checkpointing"} + ) + + chunk_size : int = field( + default=1024, + metadata={"help": "chunk size"} + ) + + batch_chunk_size: int = field( + default=2, + metadata={"help": "batch chunk size"} + ) + + ds_stage: int = field( + default=3, + metadata={"help": "DeepSpeed stage"} + ) + + ds_param_offload : bool = field( + default=True, + metadata={"help": "DeepSpeed parameter offload"} + ) + + ds_optimizer_offload : bool = field( + default=True, + metadata={"help": "DeepSpeed optimizer offload"} + ) + speech_token_size: int = field( + default=6561, + metadata={"help": "speech token size"} + ) + + drop_out: float = field( + default=0.02, + metadata={"help": "drop out"} + ) + + drop_prompt_ratio : float = field( + default=0.5, + metadata={"help": "drop prompt ratio"} + ) + + ckpt_file: Optional[str] = field( + default=None, + metadata={"help": "Path to model checkpoint file"} + ) + +def setup_logging(local_rank): + """Configure logging""" + if local_rank <= 0: + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO if 'LOG_LEVEL' not in os.environ else os.environ['LOG_LEVEL'], + ) + +def configure_optimizer(model, args): + lr_1x = set() + for n, p in model.named_parameters(): + if not p.requires_grad: + continue + lr_1x.add(n) + + lr_1x = sorted(list(lr_1x)) + param_dict = {n: p for n, p in model.named_parameters()} + + optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] + + if args.ds_optimizer_offload: + from deepspeed.ops.adam import DeepSpeedCPUAdam + optimizer = DeepSpeedCPUAdam(optim_groups, lr=args.learning_rate, betas=(0.9, 0.95), eps=1e-18, bias_correction=True, adamw_mode=True, amsgrad=False,weight_decay=args.weight_decay) + else: + from deepspeed.ops.adam import FusedAdam + optimizer = FusedAdam(optim_groups, lr=args.learning_rate, betas=(0.9, 0.95), eps=1e-18, bias_correction=True, adam_w_mode=True, amsgrad=False, weight_decay=args.weight_decay) + + return optimizer + +def save_checkpoint(model_engine, output_dir, epoch, step,logger): + """Save model checkpoint""" + if os.path.exists(output_dir): + if model_engine.local_rank == 0: + checkpoints = os.listdir(output_dir) + #only list the directories + checkpoints = [f for f in checkpoints if os.path.isdir(os.path.join(output_dir, f))] + #sort by creation time + checkpoints.sort(key=lambda x: os.path.getctime(os.path.join(output_dir, x))) + if len(checkpoints) > 2: + print(f'deleting older checkpoints {checkpoints[0]}') + import shutil + shutil.rmtree(os.path.join(output_dir, checkpoints[0])) + output_dir = f"{output_dir}/epoch_{epoch}_step_{step}" + print(f'saving checkpoint to {output_dir}') + if model_engine.local_rank == 0 and not os.path.exists(output_dir): + os.makedirs(output_dir) + + model_engine.save_checkpoint(output_dir) +def get_lr_scheduler(optimizer, total_steps, warmup_steps, learning_rate, learning_rate_final): + """Create a linear learning rate scheduler that goes from learning_rate to learning_rate_final""" + from transformers import get_linear_schedule_with_warmup + + def lr_lambda(current_step): + if current_step < warmup_steps: + # 在预热阶段,从0线性增加到learning_rate + return float(current_step) / float(max(1, warmup_steps)) + else: + # 预热后,从learning_rate线性减少到learning_rate_final + progress = float(current_step - warmup_steps) / float(max(1, total_steps - warmup_steps)) + return max(learning_rate_final / learning_rate, 1.0 - progress * (1.0 - learning_rate_final / learning_rate)) + + return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) +def main(): + # Parse arguments + parser = HfArgumentParser(ScriptArguments) + args = parser.parse_args_into_dataclasses()[0] + + # Setup environment variables + local_rank = int(os.getenv('LOCAL_RANK', '0')) + world_size = int(os.getenv('WORLD_SIZE', '1')) + is_main_process = local_rank == 0 + device = torch.device(f'cuda:{local_rank}') + + # Setup logging + setup_logging(local_rank) + logger = logging.getLogger(__name__) + + if is_main_process: + logger.info(f"Arguments: {args}") + + # Set random seed + torch.manual_seed(args.seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(args.seed) + + # Initialize tokenizer + if is_main_process: + logger.info(f"Loading tokenizer from {args.model_name}") + tokenizer = AutoTokenizer.from_pretrained(args.model_name,trust_remote_code=True) + # special_tokens = { + # 'pad_token': '<|rwkv_tokenizer_end_of_text|>', + # 'additional_special_tokens': [ + # '<|endofprompt|>', + # '[breath]', '', '', '[noise]', + # '[laughter]', '[cough]', '[clucking]', '[accent]', + # '[quick_breath]', + # "", "", + # "[hissing]", "[sigh]", "[vocalized-noise]", + # "[lipsmack]", "[mn]" + # ] + # } + special_tokens = { + 'pad_token': '<|rwkv_tokenizer_end_of_text|>', + 'additional_special_tokens': [ + '<|endofprompt|>', + '[breath]', '', '', '[noise]', + '[laughter]', '[cough]', '[clucking]', '[accent]', + '[quick_breath]', + "", "", + "[hissing]", "[sigh]", "[vocalized-noise]", + "[lipsmack]", "[mn]" + ] + } + tokenizer.add_special_tokens(special_tokens) + vocab_size = tokenizer.vocab_size + # Load dataset + if is_main_process: + logger.info(f"Loading dataset from {args.data_file}") + dataset = load_jsonl_dataset(args.data_file,tokenizer) + + # Setup data loading + if is_main_process: + logger.info(f"Creating DataLoader with batch size {args.per_device_train_batch_size}, world size {world_size}") + sampler = DistributedSampler( + dataset, + num_replicas=world_size, + rank=local_rank, + shuffle=True, + seed=args.seed + ) + + data_collator = partial(collate_fn,tokenizer=tokenizer,max_length=args.max_length,pad_to_max_length=False,drop_prompt_audio_rate=0.5) + dataloader = DataLoader( + dataset, + batch_size=args.per_device_train_batch_size, + sampler=sampler, + num_workers=4, + pin_memory=True, + drop_last=True, + collate_fn=data_collator + ) + + # Load DeepSpeed config + if args.deepspeed_config: + if is_main_process: + logger.info(f"Loading DeepSpeed config from {args.deepspeed_config}") + with open(args.deepspeed_config, 'r') as f: + ds_config = json.load(f) + else: + # Default DeepSpeed config is using ZeRO-3 with CPU offload + if is_main_process: + logger.info("Using default DeepSpeed config") + train_batch_size = args.per_device_train_batch_size * world_size* 1 + ds_config = { + "distributed_backend": "nccl", + "train_batch_size": train_batch_size, + "bf16": { + "enabled": True + }, + "zero_optimization": { + "stage": args.ds_stage, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_prefetch_bucket_size": 5e6, + "memory_efficient_linear": True, + "stage3_param_persistence_threshold": 1e4, + "offload_param": { + "device": "cpu", + "pin_memory": True, + "buffer_count": 4, + "buffer_size": 1e8 + }, + "offload_optimizer": { + "device": "cpu", + "pin_memory": True, + "buffer_count": 4 + }, + "allgather_partitions": True, + "reduce_scatter": True, + "reduce_bucket_size": 5e6, + "overlap_comm": True, + "contiguous_gradients": True + }, + "zero_force_ds_cpu_initialization": True, + "gradient_checkpointing": args.gradient_checkpointing, + "dump_state": True + } + + #Init model with deepspeed + if is_main_process: + logger.info(f"Initializing model with DeepSpeed config") + model = AutoModelForCausalLM.from_pretrained(args.model_name, torch_dtype=torch.bfloat16,trust_remote_code=True) + model = alter_emb_and_head(model,vocab_size,args.speech_token_size) + if args.gradient_checkpointing: + model.gradient_checkpointing_enable() + model.train() + llm_input_size = model.config.hidden_size + llm_output_size = model.config.hidden_size + model = RWKV7LM(llm_input_size,llm_output_size,args.speech_token_size,model,None,drop_ratio=args.drop_out) + if args.ckpt_file is not None: + if is_main_process: + logger.info(f"Loading checkpoint from {args.ckpt_file}") + info = model.load_state_dict(torch.load(args.ckpt_file)) + if is_main_process: + logger.info(f"Loaded checkpoint info: {info}") + model.train() + if is_main_process: + logger.info(f'Enable gradient checkpointing: {args.gradient_checkpointing}') + for n,p in model.named_parameters(): + p.requires_grad = True + if is_main_process: + for n,p in model.named_parameters(): + print(f'{n} requires grad: {p.requires_grad}') + logger.info(f'start configuring optimizer') + optimizer = configure_optimizer(model, args) + # Initialize DeepSpeed for main model + model_ds_config = ds_config.copy() + if not args.ds_param_offload: + del model_ds_config["zero_optimization"]["offload_param"] + if not args.ds_optimizer_offload: + del model_ds_config["zero_optimization"]["offload_optimizer"] + + # 在初始化DeepSpeed之前计算总步数 + total_steps = len(dataloader) * args.num_epochs + + # 创建自定义的学习率调度器 + lr_scheduler = get_lr_scheduler( + optimizer, + total_steps, + args.warmup_steps, + args.learning_rate, + args.learning_rate_final + ) + model_engine, optimizer, _, scheduler = deepspeed.initialize( + model=model, + config=model_ds_config, + model_parameters=model.parameters(), + optimizer=optimizer, + lr_scheduler=lr_scheduler + ) + if is_main_process: + logger.info("Model initialized") + del model + if is_main_process: + from tqdm import tqdm + pbar = tqdm(total=len(dataloader)) + wandb.init( + project=args.wandb_project, + name=args.wandb_run_name, + config=vars(args) + ) + #delete the output_dir if it exists + if os.path.exists(args.output_dir) and model_engine.local_rank == 0: + import shutil + shutil.rmtree(args.output_dir) + total_loss = 0.0 + total_steps = 0 + total_acc = 0.0 + all_tokens = 0 + for epoch in range(args.num_epochs): + if is_main_process: + update_time = time.time() + logger.info(f"Epoch {epoch} starts training") + # 使用时间戳生成随机种子 + time_seed = int(time.time() * 1000) & 0xffffffff # 获取毫秒级时间戳并转换为32位整数 + sampler.set_epoch(time_seed) # 使用时间戳作为种子 + + for batch_idx,batch in enumerate(dataloader): + if is_main_process: + speech_token_shape = batch['speech_token'].shape + text_token_shape = batch['text_token'].shape + logger.debug(f'speech_token_shape: {speech_token_shape} text_token_shape: {text_token_shape} at batch_idx: {batch_idx}') + skip = batch['skip'] + if skip: + all_length = batch['text_token'].shape[1] + batch['speech_token'].shape[1] + if all_length > args.max_length: + #truncate the sppech_token first + truncated_length = args.max_length - batch['speech_token'].shape[1] + speech_token = batch['speech_token'] + batch['speech_token'] = speech_token[:,:truncated_length] + batch.pop('skip') + output = train_step(model_engine,batch) + loss = output['loss'] + acc = output['acc'] + if is_main_process: + logger.debug(f'loss: {loss} acc: {acc}') + + # 首先检测 NaN + is_nan_loss = torch.isnan(loss) or torch.isinf(loss) + # 确保所有进程获得相同的 is_nan_loss 值 + is_nan_loss_tensor = torch.tensor([1.0 if is_nan_loss else 0.0], device=model_engine.device) + # 所有进程同步这个张量以获取一致决策 + torch.distributed.all_reduce(is_nan_loss_tensor, op=torch.distributed.ReduceOp.MAX) + is_nan_loss = bool(is_nan_loss_tensor.item()) + + if is_nan_loss: + # 使用一个安全的替代 loss 进行 backward + # 这个 loss 不会影响模型(乘以0),但会确保所有节点都执行 backward + logger.info(f"NaN loss detected at batch {batch_idx}, using safe zero loss instead") + logger.info(f'batch data is {batch}') + safe_loss = loss * 0.0 + if is_main_process: + logger.warning(f"NaN loss detected at batch {batch_idx}, using safe zero loss instead") + wandb.log({ + "nan_detected": 1, + "epoch": epoch, + "step": total_steps + }) + + # 所有节点都执行 backward,但使用的是零梯度 + model_engine.backward(safe_loss) + model_engine.step() # 这步实际上不会改变参数,因为梯度是零 + else: + # 正常情况,使用实际 loss + model_engine.backward(loss) + model_engine.step() + + if batch_idx % args.save_steps == 0 and batch_idx > 0: + if args.ds_stage == 3 or args.ds_stage == 2: + save_checkpoint(model_engine, args.output_dir, epoch, batch_idx,logger) + # 累计统计 + if is_main_process: + elapsed_time = time.time()-update_time + total_loss += loss.item() + total_acc += acc.item() + total_steps += 1 + + # 计算平均值 + avg_loss = total_loss / total_steps + avg_acc = total_acc / total_steps + tokens = (batch['speech_token'].shape[1]+batch['text_token'].shape[1])*args.per_device_train_batch_size*world_size + all_tokens += tokens + kts = tokens / elapsed_time / 1e3 + # 记录到wandb + current_lr = optimizer.param_groups[0]['lr'] + wandb.log({ + "loss": loss.item(), + "avg_loss": avg_loss, + "epoch": epoch, + "step": total_steps, + "acc": acc.item(), + "avg_acc": avg_acc, + "KT/s": kts, + "Gtokens": all_tokens/1e9, + "learning_rate": current_lr + }) + + pbar.update(1) + pbar.set_postfix({ + 'loss': loss.item(), + 'avg_loss': avg_loss, + 'acc': acc.item(), + 'avg_acc': avg_acc, + 'lr': current_lr + }) + #save checkpoint at the end of each epoch + # if (args.ds_stage != 3 and is_main_process) or (args.ds_stage == 3): + if args.ds_stage == 3 or args.ds_stage == 2: + epoch_checkpoint_dir = f"{args.output_dir}/epoch_{epoch}" + if not os.path.exists(epoch_checkpoint_dir): + os.makedirs(epoch_checkpoint_dir) + print(f'saving checkpoint to {epoch_checkpoint_dir}') + model_engine.save_checkpoint(epoch_checkpoint_dir) + # 训练结束后关闭wandb + if is_main_process: + wandb.finish() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/third_party/utils/convert_from_fp32_to_bf16.py b/third_party/utils/convert_from_fp32_to_bf16.py new file mode 100644 index 0000000000000000000000000000000000000000..26b1d6eb8bf697c7c93a787dbe1dbc413e74c36c --- /dev/null +++ b/third_party/utils/convert_from_fp32_to_bf16.py @@ -0,0 +1,16 @@ +import torch +import sys +original_pt_file = sys.argv[1] +output_pt_file = sys.argv[2] +print(f'Converting {original_pt_file} to {output_pt_file}') +#load the orginal model and convert all fp32 parameters to bf16 +print(f'Loading {original_pt_file}') +model = torch.load(original_pt_file) +new_states = {} +for k,v in model.items(): + if v.dtype == torch.float32: + v = v.bfloat16() + new_states[k] = v +print(f'Saving {output_pt_file}') +torch.save(new_states,output_pt_file) +print(f'Finished converting {original_pt_file} to {output_pt_file}') \ No newline at end of file diff --git a/third_party/utils/utilities.py b/third_party/utils/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..cb28460c1f8209e6f15ed8ddb8e53f01a8592a7d --- /dev/null +++ b/third_party/utils/utilities.py @@ -0,0 +1,18 @@ + +from transformers import AutoTokenizer +def get_tokenizer(model_dir): + tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) + special_tokens = { + 'pad_token': '<|rwkv_tokenizer_end_of_text|>', + 'additional_special_tokens': [ + '<|endofprompt|>', + '[breath]', '', '', '[noise]', + '[laughter]', '[cough]', '[clucking]', '[accent]', + '[quick_breath]', + "", "", + "[hissing]", "[sigh]", "[vocalized-noise]", + "[lipsmack]", "[mn]" + ] + } + tokenizer.add_special_tokens(special_tokens) + return tokenizer \ No newline at end of file diff --git a/third_party/zero_2_0.wav b/third_party/zero_2_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..c26f4b8ac8b7d7fb22163b728f1c65ed6f40ad33 --- /dev/null +++ b/third_party/zero_2_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed32090505be5de1a096716c1dd67b04668837bf22a872f36a005eaa9e819f0 +size 115278 diff --git a/train.sh b/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..27b5c18e72fb70d4df54f660b29f2a5734a7fdf5 --- /dev/null +++ b/train.sh @@ -0,0 +1 @@ +export PYTHONPATH=/home/yueyulin/github/CosyVoice/:/home/yueyulin/github/RWKVTTS/ \ No newline at end of file diff --git a/zero_0_0.wav b/zero_0_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..f1b5e623d2ebbea1af5a54188f318009834b62b1 --- /dev/null +++ b/zero_0_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e01dd99e7f4da9f4242aff4765c6a2b85be6145dd6be5bb1448e912fc9b0e2f3 +size 1202000 diff --git a/zero_1_0.wav b/zero_1_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..88b8e5f73bb64d01c7757cc97d71c7f68db724ff --- /dev/null +++ b/zero_1_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a61c6697e627f6dadc4e4874bc1f4085f7dc3cbdd247625a5f3f3f84bbf1bb0 +size 1401680 diff --git a/zero_2_0.wav b/zero_2_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..84e83526bb24db5b660ae271b55941bd43c8c8b1 --- /dev/null +++ b/zero_2_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd7552b3e5ee62f3855d286fcbc6a68883594b8b4929072323f1176d8d8641d +size 1213520 diff --git a/zero_3_0.wav b/zero_3_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..347e2abd2d6355c0288a58b419166971e5ecc5f9 --- /dev/null +++ b/zero_3_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41b2fa30cbc8cadadf552770f8bb63ac53a384d68d4af9ba7ae138584a934bb1 +size 1409360 diff --git a/zero_shot_0.wav b/zero_shot_0.wav new file mode 100644 index 0000000000000000000000000000000000000000..dfa7c7a12a3b9307dc3118093f5e40d4c547c896 --- /dev/null +++ b/zero_shot_0.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd7db61388b6e29fcd5baeca4b4f46450df749d836fafb3ecdc409e4718cfc13 +size 1148240 diff --git a/zero_shot_prompt.wav b/zero_shot_prompt.wav new file mode 100644 index 0000000000000000000000000000000000000000..e8746429bce4bd98c864bd0e166e64f3600ebd58 --- /dev/null +++ b/zero_shot_prompt.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd199eb7109fd6ce9943cb297e3cf350c1073af014063dfadbdc100230526243 +size 111496 diff --git a/zero_shot_prompt.webm b/zero_shot_prompt.webm new file mode 100644 index 0000000000000000000000000000000000000000..f728e3f231f6fcb19adace9577559ac1ef22e3d2 Binary files /dev/null and b/zero_shot_prompt.webm differ