File size: 4,507 Bytes
084f828 be4162b bfd1982 be4162b bfd1982 be4162b bfd1982 be4162b bfd1982 be4162b bfd1982 be4162b bfd1982 be4162b fa9d1dd be4162b bfd1982 be4162b 084f828 be4162b 084f828 be4162b bfd1982 be4162b 084f828 be4162b bfd1982 be4162b bfd1982 be4162b 084f828 be4162b fa9d1dd bfd1982 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
accumulative_counts = 2
batch_size = 1
betas = (
0.9,
0.999,
)
custom_hooks = [
dict(
tokenizer=dict(
pretrained_model_name_or_path='/data/wangqun/models/InternVL2_5-2B',
trust_remote_code=True,
type='transformers.AutoTokenizer.from_pretrained'),
type='xtuner.engine.hooks.DatasetInfoHook'),
]
data_path = '/home/wangqun/data/layout_ocr_multi.json'
dataloader_num_workers = 4
default_hooks = dict(
checkpoint=dict(
by_epoch=False,
interval=1000,
max_keep_ckpts=-1,
save_optimizer=False,
type='mmengine.hooks.CheckpointHook'),
logger=dict(
interval=10,
log_metric_by_epoch=False,
type='mmengine.hooks.LoggerHook'),
param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
timer=dict(type='mmengine.hooks.IterTimerHook'))
env_cfg = dict(
cudnn_benchmark=False,
dist_cfg=dict(backend='nccl'),
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
image_folder = '/'
launcher = 'none'
llava_dataset = dict(
data_paths='/home/wangqun/data/layout_ocr_multi.json',
image_folders='/',
max_length=8192,
model_path='/data/wangqun/models/InternVL2_5-2B',
template='xtuner.utils.PROMPT_TEMPLATE.internlm2_chat',
type='xtuner.dataset.InternVL_V1_5_Dataset')
load_from = None
log_level = 'DEBUG'
log_processor = dict(by_epoch=False)
lr = 2e-05
max_epochs = 4
max_length = 8192
max_norm = 1
model = dict(
freeze_llm=True,
freeze_visual_encoder=True,
llm_lora=dict(
lora_alpha=256,
lora_dropout=0.05,
r=128,
target_modules=None,
task_type='CAUSAL_LM',
type='peft.LoraConfig'),
model_path='/data/wangqun/models/InternVL2_5-2B',
quantization_llm=True,
quantization_vit=False,
type='xtuner.model.InternVL_V1_5')
optim_type = 'torch.optim.AdamW'
optim_wrapper = dict(
optimizer=dict(
betas=(
0.9,
0.999,
),
lr=2e-05,
type='torch.optim.AdamW',
weight_decay=0.05),
type='DeepSpeedOptimWrapper')
param_scheduler = [
dict(
begin=0,
by_epoch=True,
convert_to_iter_based=True,
end=0.12,
start_factor=1e-05,
type='mmengine.optim.LinearLR'),
dict(
begin=0.12,
by_epoch=True,
convert_to_iter_based=True,
end=4,
eta_min=0.0,
type='mmengine.optim.CosineAnnealingLR'),
]
path = '/data/wangqun/models/InternVL2_5-2B'
prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.internlm2_chat'
randomness = dict(deterministic=False, seed=None)
resume = False
runner_type = 'FlexibleRunner'
save_steps = 1000
save_total_limit = -1
strategy = dict(
config=dict(
bf16=dict(enabled=True),
fp16=dict(enabled=False, initial_scale_power=16),
gradient_accumulation_steps='auto',
gradient_clipping='auto',
train_micro_batch_size_per_gpu='auto',
zero_allow_untested_optimizer=True,
zero_force_ds_cpu_optimizer=False,
zero_optimization=dict(overlap_comm=True, stage=2)),
exclude_frozen_parameters=True,
gradient_accumulation_steps=2,
gradient_clipping=1,
sequence_parallel_size=1,
train_micro_batch_size_per_gpu=1,
type='xtuner.engine.DeepSpeedStrategy')
tokenizer = dict(
pretrained_model_name_or_path='/data/wangqun/models/InternVL2_5-2B',
trust_remote_code=True,
type='transformers.AutoTokenizer.from_pretrained')
train_cfg = dict(max_epochs=4, type='xtuner.engine.runner.TrainLoop')
train_dataloader = dict(
batch_size=1,
collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
dataset=dict(
data_paths='/home/wangqun/data/layout_ocr_multi.json',
image_folders='/',
max_length=8192,
model_path='/data/wangqun/models/InternVL2_5-2B',
template='xtuner.utils.PROMPT_TEMPLATE.internlm2_chat',
type='xtuner.dataset.InternVL_V1_5_Dataset'),
num_workers=4,
sampler=dict(
length_property='modality_length',
per_device_batch_size=2,
type='xtuner.dataset.samplers.LengthGroupedSampler'))
visualizer = dict(
type='mmengine.visualization.Visualizer',
vis_backends=[
dict(type='mmengine.visualization.TensorboardVisBackend'),
])
warmup_ratio = 0.03
weight_decay = 0.05
work_dir = '/home/wangqun/work_dirs/internvl_ft_run_14_filter'
|