diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 4435644c..512d6449 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -116,7 +116,9 @@ class Fp8Config(QuantizationConfig): from vllm.attention.layer import Attention # Avoid circular import if isinstance(layer, LinearBase): - if is_layer_skipped(prefix, self.ignored_layers): + if is_layer_skipped(prefix=prefix, + ignored_layers=self.ignored_layers, + fused_mapping=self.packed_modules_mapping): return UnquantizedLinearMethod() return Fp8LinearMethod(self) elif isinstance(layer, FusedMoE):