{ "bits": 4, "group_size": 128, "sym": true, "data_type": "int", "seqlen": 512, "batch_size": 1, "scale_dtype": "None", "gradient_accumulate_steps": 8, "dataset": "liuhaotian/llava_conv_58k", "autoround_version": "0.9.0", "block_name_to_quantize": "model.language_model.layers", "quant_method": "auto-round", "packing_format": "auto_round:auto_gptq" }