feat: Add tie_weights parameter to Llava model initialization · EvolvingLMMs-Lab/lmms-eval@672d7e5 (original) (raw)
`@@ -58,6 +58,7 @@ def init(
`
58
58
`device_map="cuda:0",
`
59
59
`conv_template="vicuna_v1",
`
60
60
`use_cache=True,
`
``
61
`+
tie_weights: bool = True,
`
61
62
`truncate_context=False, # whether to truncate the context in generation, set it False for LLaVA-1.6
`
62
63
`customized_config=None, # ends in json
`
63
64
`**kwargs,
`
`@@ -97,6 +98,8 @@ def init(
`
97
98
`self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
`
98
99
`self._config = self._model.config
`
99
100
`self.model.eval()
`
``
101
`+
if tie_weights:
`
``
102
`+
self.model.tie_weights()
`
100
103
``
101
104
`self.truncation = truncation
`
102
105
`self.batch_size_per_gpu = int(batch_size)
`