Upload folder using huggingface_hub
Browse files- config.json +5 -23
- pytorch_model.bin.bin +3 -0
config.json
CHANGED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "Qwen-14B
|
| 3 |
"architectures": [
|
| 4 |
"QWenLMHeadModel"
|
| 5 |
],
|
| 6 |
"attn_dropout_prob": 0.0,
|
| 7 |
"auto_map": {
|
| 8 |
-
"AutoConfig": "configuration_qwen.QWenConfig",
|
| 9 |
-
"AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
|
| 10 |
},
|
| 11 |
-
"bf16":
|
| 12 |
"emb_dropout_prob": 0.0,
|
| 13 |
-
"fp16":
|
| 14 |
"fp32": false,
|
| 15 |
"hidden_size": 5120,
|
| 16 |
"initializer_range": 0.02,
|
|
@@ -23,24 +23,6 @@
|
|
| 23 |
"num_attention_heads": 40,
|
| 24 |
"num_hidden_layers": 40,
|
| 25 |
"onnx_safe": null,
|
| 26 |
-
"quantization_config": {
|
| 27 |
-
"batch_size": 1,
|
| 28 |
-
"bits": 8,
|
| 29 |
-
"block_name_to_quantize": null,
|
| 30 |
-
"damp_percent": 0.01,
|
| 31 |
-
"dataset": null,
|
| 32 |
-
"desc_act": false,
|
| 33 |
-
"disable_exllama": false,
|
| 34 |
-
"group_size": 128,
|
| 35 |
-
"model_seqlen": null,
|
| 36 |
-
"module_name_preceding_first_block": null,
|
| 37 |
-
"pad_token_id": null,
|
| 38 |
-
"quant_method": "gptq",
|
| 39 |
-
"sym": true,
|
| 40 |
-
"tokenizer": null,
|
| 41 |
-
"true_sequential": true,
|
| 42 |
-
"use_cuda_fp16": false
|
| 43 |
-
},
|
| 44 |
"rotary_emb_base": 10000,
|
| 45 |
"rotary_pct": 1.0,
|
| 46 |
"scale_attn_weights": true,
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "Qwen/Qwen-14B",
|
| 3 |
"architectures": [
|
| 4 |
"QWenLMHeadModel"
|
| 5 |
],
|
| 6 |
"attn_dropout_prob": 0.0,
|
| 7 |
"auto_map": {
|
| 8 |
+
"AutoConfig": "Qwen/Qwen-14B--configuration_qwen.QWenConfig",
|
| 9 |
+
"AutoModelForCausalLM": "Qwen/Qwen-14B--modeling_qwen.QWenLMHeadModel"
|
| 10 |
},
|
| 11 |
+
"bf16": true,
|
| 12 |
"emb_dropout_prob": 0.0,
|
| 13 |
+
"fp16": false,
|
| 14 |
"fp32": false,
|
| 15 |
"hidden_size": 5120,
|
| 16 |
"initializer_range": 0.02,
|
|
|
|
| 23 |
"num_attention_heads": 40,
|
| 24 |
"num_hidden_layers": 40,
|
| 25 |
"onnx_safe": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
"rotary_emb_base": 10000,
|
| 27 |
"rotary_pct": 1.0,
|
| 28 |
"scale_attn_weights": true,
|
pytorch_model.bin.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd3f1e669764bd161c6eeebc0cc70659173e4bb7c011474efcd9f2d47d1c880d
|
| 3 |
+
size 16029839536
|