| { | |
| "_name_or_path": "checkpoints/pretrain-llava-7b-640-eva02det-qformer-q256l6dim256-dp/final", | |
| "architectures": [ | |
| "LlavaLlamaForCausalLM" | |
| ], | |
| "bos_token_id": 1, | |
| "eos_token_id": 2, | |
| "freeze_mm_mlp_adapter": false, | |
| "hidden_act": "silu", | |
| "hidden_size": 4096, | |
| "image_aspect_ratio": "pad", | |
| "image_grid_pinpoints": null, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 11008, | |
| "max_position_embeddings": 4096, | |
| "mm_hidden_size": 1024, | |
| "mm_projector_lr": null, | |
| "mm_projector_type": "qformer-petr", | |
| "mm_use_im_patch_token": false, | |
| "mm_use_im_start_end": false, | |
| "mm_vision_select_feature": "patch", | |
| "mm_vision_select_layer": -1, | |
| "mm_vision_tower": "eva02-l-16", | |
| "model_type": "llava", | |
| "num_attention_heads": 32, | |
| "num_hidden_layers": 32, | |
| "num_key_value_heads": 32, | |
| "pad_token_id": 0, | |
| "pretraining_tp": 1, | |
| "qformer_embed_dim": 256, | |
| "qformer_query_number": 256, | |
| "rms_norm_eps": 1e-05, | |
| "rope_scaling": null, | |
| "rope_theta": 10000.0, | |
| "subpatch_merging_method": "flatten", | |
| "tie_word_embeddings": false, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.33.3", | |
| "tune_mm_mlp_adapter": false, | |
| "use_cache": true, | |
| "use_mm_proj": true, | |
| "use_tile_input": false, | |
| "vision_tower_enable_window_attn": true, | |
| "vision_tower_input_size": 640, | |
| "vision_tower_pretrained_from": "/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/grounding/shimin/checkpoints/eva_vit/eva-02/det/eva02_L_coco_det_sys_o365.pth", | |
| "vocab_size": 32000 | |
| } | |