File size: 571 Bytes
306e94d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
{
  "quantization_method": "hybrid_int8_fp16",
  "linear_layers": "0/0 (INT8)",
  "embedding_layers": "1/1 (FP16)",
  "total_quantized": "1/1",
  "original_model": "luca-deandrea/MNLP_M3_mcqa_model",
  "quantization_timestamp": "2025-06-10 22:10:59",
  "pytorch_version": "2.6.0+cu118",
  "estimated_compression_ratio": "1.3x",
  "estimated_size_mb": 1704.5302734375,
  "original_size_mb": 2272.70703125,
  "formats_included": [
    "pytorch_bin_only"
  ],
  "lighteval_compatible": true,
  "notes": "Linear layers: INT8 quantization, Embedding layers: FP16 conversion"
}