Add EasyAnimateV5.1 text-to-video, image-to-video, control-to-video generation model by bubbliiiing · Pull Request #10626 · huggingface/diffusers (original) (raw)
Here is the code to reproduce the error
[pipeline.enable_model_cpu_offload() works fine]
import torch
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, EasyAnimateTransformer3DModel, EasyAnimatePipeline
from diffusers.utils import export_to_video
from transformers import AutoModel
dtype = torch.bfloat16
quant_config = DiffusersBitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=dtype)
text_encoder_4bit = AutoModel.from_pretrained(
"alibaba-pai/EasyAnimateV5.1-12b-zh",
subfolder="text_encoder",
quantization_config=quant_config,
torch_dtype=torch.bfloat16,
)
transformer_4bit = EasyAnimateTransformer3DModel.from_pretrained(
"alibaba-pai/EasyAnimateV5.1-12b-zh",
subfolder="transformer",
quantization_config=quant_config,
torch_dtype=dtype,
)
pipeline = EasyAnimatePipeline.from_pretrained(
"alibaba-pai/EasyAnimateV5.1-12b-zh",
text_encoder=text_encoder_4bit,
transformer=transformer_4bit,
torch_dtype=dtype,
)
pipeline.enable_sequential_cpu_offload()
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()
prompt = "A cat walks on the grass, realistic style."
negative_prompt = "bad detailed"
video = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
num_frames=81,
num_inference_steps=40,
width=512,
height=320
).frames[0]
export_to_video(video, "cat2.mp4", fps=8)
and log
Log 1: #10626 (comment)
Log 2:
(venv) C:\aiOWN\diffuser_webui>python easyanimate_bnb.py
`low_cpu_mem_usage` was None, now default to True since model is quantized.
Downloading shards: 100%|███████████████████████████████████████████████████| 5/5 [00:00<?, ?it/s]
`Qwen2VLRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46
Loading checkpoint shards: 100%|████████████████████████████████████| 5/5 [00:34<00:00, 6.83s/it]
The config attributes {'add_ref_latent_in_control_model': True, 'clip_channels': None, 'enable_clip_in_inpaint': False, 'ref_channels': None, 'swa_layers': None} were passed to EasyAnimateTransformer3DModel, but are not expected and will be ignored. Please verify your config.json configuration file.
Expected types for text_encoder: ['Qwen2VLForConditionalGeneration', 'BertModel'], got Qwen2VLModel.
Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]The config attributes {'force_upcast': True, 'mid_block_use_attention': False, 'sample_size': 256, 'slice_mag_vae': False, 'slice_compression_vae': False, 'cache_compression_vae': False, 'cache_mag_vae': True, 'use_tiling': False, 'norm_type': None, 'use_tiling_encoder': False, 'use_tiling_decoder': False, 'mid_block_attention_type': 'spatial'} were passed to AutoencoderKLMagvit, but are not expected and will be ignored. Please verify your config.json configuration file.
Loading pipeline components...: 100%|███████████████████████████████| 5/5 [00:01<00:00, 4.86it/s]
Traceback (most recent call last):
File "C:\aiOWN\diffuser_webui\easyanimate_bnb.py", line 30, in <module>
video = pipeline(
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\diffusers\pipelines\easyanimate\pipeline_easyanimate.py", line 803, in __call__
) = self.encode_prompt(
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\diffusers\pipelines\easyanimate\pipeline_easyanimate.py", line 383, in encode_prompt
prompt_embeds = text_encoder(
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\hooks.py", line 176, in new_forward
output = module._old_forward(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\transformers\models\qwen2_vl\modeling_qwen2_vl.py", line 1082, in forward
inputs_embeds = self.embed_tokens(input_ids)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\hooks.py", line 171, in new_forward
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\hooks.py", line 370, in pre_forward
return send_to_device(args, self.execution_device), send_to_device(
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\utils\operations.py", line 174, in send_to_device
return honor_type(
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\utils\operations.py", line 81, in honor_type
return type(obj)(generator)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\utils\operations.py", line 175, in <genexpr>
tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
File "C:\aiOWN\diffuser_webui\venv\lib\site-packages\accelerate\utils\operations.py", line 155, in send_to_device
return tensor.to(device, non_blocking=non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!