fix compatibility issue of older version llava · dadwadw233/lmms-eval@093da38 (original) (raw)
`@@ -30,12 +30,6 @@
`
30
30
`except ImportError:
`
31
31
`eval_logger.error("LLaVA is not installed. Please install LLaVA to use this model.")
`
32
32
``
33
``
`-
from transformers.integrations.deepspeed import (
`
34
``
`-
is_deepspeed_zero3_enabled,
`
35
``
`-
set_hf_deepspeed_config,
`
36
``
`-
unset_hf_deepspeed_config,
`
37
``
`-
)
`
38
``
-
39
33
`if torch.version > "2.1.2":
`
40
34
`best_fit_attn_implementation = "sdpa"
`
41
35
`else:
`
`@@ -94,8 +88,9 @@ def init(
`
94
88
`# Try to load the model with the multimodal argument
`
95
89
`self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
`
96
90
`except TypeError:
`
97
``
`-
for older versions of LLaVA that don't have multimodal argument
`
``
91
`+
for older versions of LLaVA that don't have multimodal and attn_implementation arguments
`
98
92
`llava_model_args.pop("multimodal", None)
`
``
93
`+
llava_model_args.pop("attn_implementation", None)
`
99
94
`self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
`
100
95
``
101
96
`self._config = self._model.config
`