# Copyright 2026 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest from inspect import signature from .test_configuration_common import ConfigTester from .test_modeling_common import ( GenerationTesterMixin, ModelTesterMixin, floats_tensor, ids_tensor, is_torch_available, require_torch, torch_device, ) from .test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch class VLMModelTester: # If the model follows the standard naming conventions, only `base_model_class` needs to be set (the others are # inferred from available public classes). base_model_class = None config_class = None text_config_class = None vision_config_class = None conditional_generation_class = None sequence_classification_class = None # These attributes are required after the initialization phase of the tester. _required_attributes = ("base_model_class", "config_class", "conditional_generation_class") # Arguments that should be passed to the config class even if not in its signature forced_config_args = ["pad_token_id"] @property def all_model_classes(self): # Models that set `all_model_classes` in their `XXXModelTest` class must have a new class that doesn't fit # any of the common classes. return [ model_class for model_class in ( self.base_model_class, self.conditional_generation_class, self.sequence_classification_class, ) if model_class is not None ] @property def pipeline_model_mapping(self): mapping = { "feature-extraction": self.base_model_class, "image-text-to-text": self.conditional_generation_class, } return mapping def __init__(self, parent, **kwargs): self.parent = parent # Standard defaults kwargs.setdefault("batch_size", 3) kwargs.setdefault("is_training", True) kwargs.setdefault("use_input_mask", True) kwargs.setdefault("use_token_type_ids", False) kwargs.setdefault("use_labels", True) kwargs.setdefault("vocab_size", 99) kwargs.setdefault("hidden_size", 32) kwargs.setdefault("num_hidden_layers", 2) kwargs.setdefault("num_attention_heads", 2) kwargs.setdefault("num_key_value_heads", 2) kwargs.setdefault("intermediate_size", 32) # Keep this divisible by 8 for fp16/bf16/fp32 16-bytes alignment kwargs.setdefault("hidden_act", "gelu") kwargs.setdefault("hidden_dropout_prob", 0.1) kwargs.setdefault("attention_probs_dropout_prob", 0.1) kwargs.setdefault("max_position_embeddings", 512) kwargs.setdefault("type_vocab_size", 16) kwargs.setdefault("type_sequence_label_size", 2) kwargs.setdefault("initializer_range", 0.02) kwargs.setdefault("num_labels", 3) kwargs.setdefault("num_choices", 4) kwargs.setdefault("pad_token_id", 0) kwargs.setdefault("bos_token_id", 1) kwargs.setdefault("eos_token_id", 2) kwargs.setdefault("image_token_id", 3) kwargs.setdefault("is_decoder", False) kwargs.setdefault("scope", None) kwargs.setdefault("expert_interval", 1) kwargs.setdefault("moe_layer_start_index", 0) kwargs.setdefault("moe_intermediate_size", 12) kwargs.setdefault("shared_expert_intermediate_size", 36) kwargs.setdefault("shared_expert_gate", True) kwargs.setdefault("moe_num_shared_experts", 2) kwargs.setdefault("num_experts_per_tok", 2) kwargs.setdefault("num_experts", 8) kwargs.setdefault("mamba_n_groups", 1) kwargs.setdefault("mamba_n_heads", 16) kwargs.setdefault("mamba_d_state", 16) kwargs.setdefault("mamba_d_conv", 4) kwargs.setdefault("mamba_expand", 2) kwargs.setdefault("mamba_chunk_size", 16) kwargs.setdefault("image_size", 8) kwargs.setdefault("patch_size", 4) kwargs.setdefault("num_channels", 3) kwargs.setdefault("projection_dim", 32) kwargs.setdefault("projector_hidden_act", "gelu") kwargs.setdefault("ignore_index", -100) kwargs.setdefault("vision_feature_select_strategy", "default") kwargs.setdefault("vision_feature_layer", -1) kwargs.setdefault("tie_word_embeddings", False) # Computed defaults (can still be overridden in derived classes) kwargs.setdefault("head_dim", kwargs["hidden_size"] // kwargs["num_attention_heads"]) kwargs.setdefault("num_image_tokens", (kwargs["image_size"] // kwargs["patch_size"]) ** 2) kwargs.setdefault("seq_length", 7 + kwargs["num_image_tokens"]) # Set all kwargs as instance attributes for key, value in kwargs.items(): setattr(self, key, value) for required_attribute in [ "base_model_class", "config_class", "conditional_generation_class", "text_config_class", "vision_config_class", ]: if getattr(self, required_attribute) is None: raise ValueError( f"You have inherited from VLMModelTester but did not set the {required_attribute} attribute." ) # Because VLMs have some different standards in how they handle image tokens, we need a few methods # that can be overridden if required: def create_pixel_values(self): # Override to 5D for patch-based models return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], scale=1.0) def create_attention_mask(self, input_ids): # Override for bidirectional attention models like Gemma3 return torch.tril(torch.ones_like(input_ids).to(torch_device)) def place_image_tokens(self, input_ids, config): # Override if the image tokens shouldn't be placed at the start of the test sequence image_token_id = getattr(config, "image_token_id", self.image_token_id) # Clear any accidental image tokens first input_ids = input_ids.clone() input_ids[input_ids == image_token_id] = self.bos_token_id # Place image tokens at the start input_ids[:, : self.num_image_tokens] = image_token_id return input_ids def get_additional_inputs(self, config, input_ids, pixel_values): # Override for model-specific inputs like LlavaNext's image_sizes return {} # End of overridable methods def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) pixel_values = self.create_pixel_values() config = self.get_config() special_tokens = [self.pad_token_id, self.bos_token_id, self.eos_token_id, self.image_token_id] for i in range(self.vocab_size): if i not in special_tokens: # The smallest token ID that is not a special token safe_token_id = i break else: raise ValueError("vocab_size is too small and there is no token ID that is not a special token!") # Avoid flaky tests, clear any special tokens in ids_tensor # image_token_id is handled separately by place_image_tokens() input_ids[input_ids == self.pad_token_id] = safe_token_id input_ids[input_ids == self.eos_token_id] = safe_token_id input_ids = self.place_image_tokens(input_ids, config) # Create attention mask with final input_ids (after image tokens are placed) # This is important for models that use padding masks based on token values input_mask = None if self.use_input_mask: input_mask = self.create_attention_mask(input_ids) inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values} additional_inputs = self.get_additional_inputs(config, input_ids, pixel_values) inputs_dict.update(additional_inputs) return config, inputs_dict @property def config_args(self): return list(signature(self.config_class.__init__).parameters.keys()) @property def text_config_args(self): args = list(signature(self.text_config_class.__init__).parameters.keys()) for token_arg in ["pad_token_id", "bos_token_id", "eos_token_id"]: # Not always explicitly in the sig if token_arg not in args: args.append(token_arg) return args @property def vision_config_args(self): return list(signature(self.vision_config_class.__init__).parameters.keys()) def get_config(self): kwargs = {} attribute_map = getattr(self.config_class, "attribute_map", {}) model_name_to_common_name = {v: k for k, v in attribute_map.items()} for k in self.config_args + self.forced_config_args: if hasattr(self, k) and k != "self": kwargs[k] = getattr(self, k) elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]): kwargs[k] = getattr(self, model_name_to_common_name[k]) kwargs["text_config"] = self.get_text_config() kwargs["vision_config"] = self.get_vision_config() return self.config_class(**kwargs) def get_text_config(self): kwargs = {} attribute_map = getattr(self.text_config_class, "attribute_map", {}) model_name_to_common_name = {v: k for k, v in attribute_map.items()} for k in self.text_config_args: if hasattr(self, k) and k != "self": kwargs[k] = getattr(self, k) elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]): kwargs[k] = getattr(self, model_name_to_common_name[k]) return self.text_config_class(**kwargs) def get_vision_config(self): kwargs = {} attribute_map = getattr(self.vision_config_class, "attribute_map", {}) model_name_to_common_name = {v: k for k, v in attribute_map.items()} for k in self.vision_config_args: if hasattr(self, k) and k != "self": kwargs[k] = getattr(self, k) elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]): kwargs[k] = getattr(self, model_name_to_common_name[k]) return self.vision_config_class(**kwargs) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = self.base_model_class(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) @require_torch class VLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin): """ Base test class for Vision-Language Models. Subclasses should set: - `model_tester_class`: The tester class (subclass of VLMModelTester) Optional: - `all_model_classes`: Override if not using default from model_tester - `pipeline_model_mapping`: Override if not using default from model_tester """ model_tester_class = None all_model_classes = None pipeline_model_mapping = None # VLMs are always composite _is_composite = True def setUp(self): if self.model_tester_class is None: raise ValueError("You have inherited from VLMModelTest but did not set the model_tester_class attribute.") self.model_tester = self.model_tester_class(self) self.config_tester = ConfigTester(self, config_class=self.model_tester.config_class, has_text_modality=False) if self.pipeline_model_mapping is None: if self.all_model_classes is not None: raise ValueError( "Tests that inherit from `VLMModelTest` and set `all_model_classes` must manually set " "`pipeline_model_mapping`." ) else: self.pipeline_model_mapping = self.model_tester.pipeline_model_mapping if self.all_model_classes is None: self.all_model_classes = self.model_tester.all_model_classes def test_config(self): """Test config common functionality.""" self.config_tester.run_common_tests() def test_mismatching_num_image_tokens(self): """ Tests that VLMs throw an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompt has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() curr_input_dict = copy.deepcopy(input_dict) _ = model(**curr_input_dict) # successful forward with no modifications # Test 1: remove one image but leave the image token in text curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...] if "image_sizes" in curr_input_dict: curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # Test 2: simulate multi-image case by concatenating inputs where each has exactly one image/image-token # First, take just the first item from each tensor curr_input_dict = {key: val[:1] for key, val in curr_input_dict.items()} # Double the batch size for all batch-dimension tensors except pixel_values # This simulates having 2 prompts (each with image tokens) but only 1 image batch_tensors_to_double = ["input_ids", "attention_mask", "token_type_ids"] for key in batch_tensors_to_double: if key in curr_input_dict and curr_input_dict[key] is not None: curr_input_dict[key] = torch.cat([curr_input_dict[key], curr_input_dict[key]], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(**curr_input_dict) # Test 3: two images and two image tokens don't raise an error curr_input_dict["pixel_values"] = torch.cat( [curr_input_dict["pixel_values"], curr_input_dict["pixel_values"]], dim=0 ) if "image_sizes" in curr_input_dict: curr_input_dict["image_sizes"] = torch.cat( [curr_input_dict["image_sizes"], curr_input_dict["image_sizes"]], dim=0 ) _ = model(**curr_input_dict) @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. " "Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass