Set printing info for llava_hf to debug level · dadwadw233/lmms-eval@4e9b71d (original) (raw)

Original file line number Diff line number Diff line change
@@ -209,8 +209,8 @@ def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
209 209 labels[: len(contxt_id)] = -100
210 210
211 211 if self.accelerator.is_main_process and doc_id % 100 == 0:
212 -eval_logger.info(f"Prompt for doc ID {doc_id}:\n\n{formatted_contexts[0]}\n")
213 -eval_logger.info(f"Prompt and continuation for doc ID {doc_id}:\n\n{formatted_continuation[0]}\n")
212 +eval_logger.debug(f"Prompt for doc ID {doc_id}:\n\n{formatted_contexts[0]}\n")
213 +eval_logger.debug(f"Prompt and continuation for doc ID {doc_id}:\n\n{formatted_continuation[0]}\n")
214 214
215 215 with torch.inference_mode():
216 216 outputs = self.model(**model_inputs, labels=labels)
@@ -293,7 +293,7 @@ def _collate(x):
293 293 text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
294 294
295 295 if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
296 -eval_logger.info(f"Prompt for doc ID {doc_id[0]}:\n\n{text}\n")
296 +eval_logger.debug(f"Prompt for doc ID {doc_id[0]}:\n\n{text}\n")
297 297
298 298 inputs = self._image_processor(images=visuals, text=text, return_tensors="pt").to(self._device, self.model.dtype)
299 299
@@ -329,7 +329,7 @@ def _collate(x):
329 329 text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()
330 330
331 331 if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
332 -eval_logger.info(f"Generated text for doc ID {doc_id[0]}:\n\n{text_outputs}\n")
332 +eval_logger.debug(f"Generated text for doc ID {doc_id[0]}:\n\n{text_outputs}\n")
333 333
334 334 res.append(text_outputs)
335 335 self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)