We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
got prompt W0000 00:00:1735391076.390700 15264 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors. Some weights of VitMatteForImageMatting were not initialized from the model checkpoint at L:\ComfyUI_windows_portable\ComfyUI\models\vitmatte and are newly initialized: ['backbone.embeddings.position_embeddings', 'backbone.embeddings.projection.bias', 'backbone.embeddings.projection.weight', 'backbone.encoder.layer.0.attention.proj.bias', 'backbone.encoder.layer.0.attention.proj.weight', 'backbone.encoder.layer.0.attention.qkv.bias', 'backbone.encoder.layer.0.attention.qkv.weight', 'backbone.encoder.layer.0.mlp.fc1.bias', 'backbone.encoder.layer.0.mlp.fc1.weight', 'backbone.encoder.layer.0.mlp.fc2.bias', 'backbone.encoder.layer.0.mlp.fc2.weight', 'backbone.encoder.layer.0.norm1.bias', 'backbone.encoder.layer.0.norm1.weight', 'backbone.encoder.layer.0.norm2.bias', 'backbone.encoder.layer.0.norm2.weight', 'backbone.encoder.layer.1.attention.proj.bias', 'backbone.encoder.layer.1.attention.proj.weight', 'backbone.encoder.layer.1.attention.qkv.bias', 'backbone.encoder.layer.1.attention.qkv.weight', 'backbone.encoder.layer.1.mlp.fc1.bias', 'backbone.encoder.layer.1.mlp.fc1.weight', 'backbone.encoder.layer.1.mlp.fc2.bias', 'backbone.encoder.layer.1.mlp.fc2.weight', 'backbone.encoder.layer.1.norm1.bias', 'backbone.encoder.layer.1.norm1.weight', 'backbone.encoder.layer.1.norm2.bias', 'backbone.encoder.layer.1.norm2.weight', 'backbone.encoder.layer.10.attention.proj.bias', 'backbone.encoder.layer.10.attention.proj.weight', 'backbone.encoder.layer.10.attention.qkv.bias', 'backbone.encoder.layer.10.attention.qkv.weight', 'backbone.encoder.layer.10.mlp.fc1.bias', 'backbone.encoder.layer.10.mlp.fc1.weight', 'backbone.encoder.layer.10.mlp.fc2.bias', 'backbone.encoder.layer.10.mlp.fc2.weight', 'backbone.encoder.layer.10.norm1.bias', 'backbone.encoder.layer.10.norm1.weight', 'backbone.encoder.layer.10.norm2.bias', 'backbone.encoder.layer.10.norm2.weight', 'backbone.encoder.layer.11.attention.proj.bias', 'backbone.encoder.layer.11.attention.proj.weight', 'backbone.encoder.layer.11.attention.qkv.bias', 'backbone.encoder.layer.11.attention.qkv.weight', 'backbone.encoder.layer.11.mlp.fc1.bias', 'backbone.encoder.layer.11.mlp.fc1.weight', 'backbone.encoder.layer.11.mlp.fc2.bias', 'backbone.encoder.layer.11.mlp.fc2.weight', 'backbone.encoder.layer.11.norm1.bias', 'backbone.encoder.layer.11.norm1.weight', 'backbone.encoder.layer.11.norm2.bias', 'backbone.encoder.layer.11.norm2.weight', 'backbone.encoder.layer.2.attention.proj.bias', 'backbone.encoder.layer.2.attention.proj.weight', 'backbone.encoder.layer.2.attention.qkv.bias', 'backbone.encoder.layer.2.attention.qkv.weight', 'backbone.encoder.layer.2.mlp.fc1.bias', 'backbone.encoder.layer.2.mlp.fc1.weight', 'backbone.encoder.layer.2.mlp.fc2.bias', 'backbone.encoder.layer.2.mlp.fc2.weight', 'backbone.encoder.layer.2.norm1.bias', 'backbone.encoder.layer.2.norm1.weight', 'backbone.encoder.layer.2.norm2.bias', 'backbone.encoder.layer.2.norm2.weight', 'backbone.encoder.layer.3.attention.proj.bias', 'backbone.encoder.layer.3.attention.proj.weight', 'backbone.encoder.layer.3.attention.qkv.bias', 'backbone.encoder.layer.3.attention.qkv.weight', 'backbone.encoder.layer.3.mlp.fc1.bias', 'backbone.encoder.layer.3.mlp.fc1.weight', 'backbone.encoder.layer.3.mlp.fc2.bias', 'backbone.encoder.layer.3.mlp.fc2.weight', 'backbone.encoder.layer.3.norm1.bias', 'backbone.encoder.layer.3.norm1.weight', 'backbone.encoder.layer.3.norm2.bias', 'backbone.encoder.layer.3.norm2.weight', 'backbone.encoder.layer.4.attention.proj.bias', 'backbone.encoder.layer.4.attention.proj.weight', 'backbone.encoder.layer.4.attention.qkv.bias', 'backbone.encoder.layer.4.attention.qkv.weight', 'backbone.encoder.layer.4.mlp.fc1.bias', 'backbone.encoder.layer.4.mlp.fc1.weight', 'backbone.encoder.layer.4.mlp.fc2.bias', 'backbone.encoder.layer.4.mlp.fc2.weight', 'backbone.encoder.layer.4.norm1.bias', 'backbone.encoder.layer.4.norm1.weight', 'backbone.encoder.layer.4.norm2.bias', 'backbone.encoder.layer.4.norm2.weight', 'backbone.encoder.layer.5.attention.proj.bias', 'backbone.encoder.layer.5.attention.proj.weight', 'backbone.encoder.layer.5.attention.qkv.bias', 'backbone.encoder.layer.5.attention.qkv.weight', 'backbone.encoder.layer.5.mlp.fc1.bias', 'backbone.encoder.layer.5.mlp.fc1.weight', 'backbone.encoder.layer.5.mlp.fc2.bias', 'backbone.encoder.layer.5.mlp.fc2.weight', 'backbone.encoder.layer.5.norm1.bias', 'backbone.encoder.layer.5.norm1.weight', 'backbone.encoder.layer.5.norm2.bias', 'backbone.encoder.layer.5.norm2.weight', 'backbone.encoder.layer.6.attention.proj.bias', 'backbone.encoder.layer.6.attention.proj.weight', 'backbone.encoder.layer.6.attention.qkv.bias', 'backbone.encoder.layer.6.attention.qkv.weight', 'backbone.encoder.layer.6.mlp.fc1.bias', 'backbone.encoder.layer.6.mlp.fc1.weight', 'backbone.encoder.layer.6.mlp.fc2.bias', 'backbone.encoder.layer.6.mlp.fc2.weight', 'backbone.encoder.layer.6.norm1.bias', 'backbone.encoder.layer.6.norm1.weight', 'backbone.encoder.layer.6.norm2.bias', 'backbone.encoder.layer.6.norm2.weight', 'backbone.encoder.layer.7.attention.proj.bias', 'backbone.encoder.layer.7.attention.proj.weight', 'backbone.encoder.layer.7.attention.qkv.bias', 'backbone.encoder.layer.7.attention.qkv.weight', 'backbone.encoder.layer.7.mlp.fc1.bias', 'backbone.encoder.layer.7.mlp.fc1.weight', 'backbone.encoder.layer.7.mlp.fc2.bias', 'backbone.encoder.layer.7.mlp.fc2.weight', 'backbone.encoder.layer.7.norm1.bias', 'backbone.encoder.layer.7.norm1.weight', 'backbone.encoder.layer.7.norm2.bias', 'backbone.encoder.layer.7.norm2.weight', 'backbone.encoder.layer.8.attention.proj.bias', 'backbone.encoder.layer.8.attention.proj.weight', 'backbone.encoder.layer.8.attention.qkv.bias', 'backbone.encoder.layer.8.attention.qkv.weight', 'backbone.encoder.layer.8.mlp.fc1.bias', 'backbone.encoder.layer.8.mlp.fc1.weight', 'backbone.encoder.layer.8.mlp.fc2.bias', 'backbone.encoder.layer.8.mlp.fc2.weight', 'backbone.encoder.layer.8.norm1.bias', 'backbone.encoder.layer.8.norm1.weight', 'backbone.encoder.layer.8.norm2.bias', 'backbone.encoder.layer.8.norm2.weight', 'backbone.encoder.layer.9.attention.proj.bias', 'backbone.encoder.layer.9.attention.proj.weight', 'backbone.encoder.layer.9.attention.qkv.bias', 'backbone.encoder.layer.9.attention.qkv.weight', 'backbone.encoder.layer.9.mlp.fc1.bias', 'backbone.encoder.layer.9.mlp.fc1.weight', 'backbone.encoder.layer.9.mlp.fc2.bias', 'backbone.encoder.layer.9.mlp.fc2.weight', 'backbone.encoder.layer.9.norm1.bias', 'backbone.encoder.layer.9.norm1.weight', 'backbone.encoder.layer.9.norm2.bias', 'backbone.encoder.layer.9.norm2.weight', 'decoder.convstream.convs.0.batch_norm.bias', 'decoder.convstream.convs.0.batch_norm.num_batches_tracked', 'decoder.convstream.convs.0.batch_norm.running_mean', 'decoder.convstream.convs.0.batch_norm.running_var', 'decoder.convstream.convs.0.batch_norm.weight', 'decoder.convstream.convs.0.conv.weight', 'decoder.convstream.convs.1.batch_norm.bias', 'decoder.convstream.convs.1.batch_norm.num_batches_tracked', 'decoder.convstream.convs.1.batch_norm.running_mean', 'decoder.convstream.convs.1.batch_norm.running_var', 'decoder.convstream.convs.1.batch_norm.weight', 'decoder.convstream.convs.1.conv.weight', 'decoder.convstream.convs.2.batch_norm.bias', 'decoder.convstream.convs.2.batch_norm.num_batches_tracked', 'decoder.convstream.convs.2.batch_norm.running_mean', 'decoder.convstream.convs.2.batch_norm.running_var', 'decoder.convstream.convs.2.batch_norm.weight', 'decoder.convstream.convs.2.conv.weight', 'decoder.fusion_blocks.0.conv.batch_norm.bias', 'decoder.fusion_blocks.0.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.0.conv.batch_norm.running_mean', 'decoder.fusion_blocks.0.conv.batch_norm.running_var', 'decoder.fusion_blocks.0.conv.batch_norm.weight', 'decoder.fusion_blocks.0.conv.conv.weight', 'decoder.fusion_blocks.1.conv.batch_norm.bias', 'decoder.fusion_blocks.1.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.1.conv.batch_norm.running_mean', 'decoder.fusion_blocks.1.conv.batch_norm.running_var', 'decoder.fusion_blocks.1.conv.batch_norm.weight', 'decoder.fusion_blocks.1.conv.conv.weight', 'decoder.fusion_blocks.2.conv.batch_norm.bias', 'decoder.fusion_blocks.2.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.2.conv.batch_norm.running_mean', 'decoder.fusion_blocks.2.conv.batch_norm.running_var', 'decoder.fusion_blocks.2.conv.batch_norm.weight', 'decoder.fusion_blocks.2.conv.conv.weight', 'decoder.fusion_blocks.3.conv.batch_norm.bias', 'decoder.fusion_blocks.3.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.3.conv.batch_norm.running_mean', 'decoder.fusion_blocks.3.conv.batch_norm.running_var', 'decoder.fusion_blocks.3.conv.batch_norm.weight', 'decoder.fusion_blocks.3.conv.conv.weight', 'decoder.matting_head.matting_convs.0.bias', 'decoder.matting_head.matting_convs.0.weight', 'decoder.matting_head.matting_convs.1.bias', 'decoder.matting_head.matting_convs.1.num_batches_tracked', 'decoder.matting_head.matting_convs.1.running_mean', 'decoder.matting_head.matting_convs.1.running_var', 'decoder.matting_head.matting_convs.1.weight', 'decoder.matting_head.matting_convs.3.bias', 'decoder.matting_head.matting_convs.3.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. !!! Exception during processing !!! Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected 3 but got 4. Traceback (most recent call last): File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 328, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 203, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 174, in _map_node_over_list process_inputs(input_dict, i) File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 163, in process_inputs results.append(getattr(obj, func)(**inputs)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_LayerStyle_Advance\py\person_mask_ultra_v2.py", line 158, in person_mask_ultra_v2 _mask = generate_VITMatte(orig_image, _trimap, local_files_only=local_files_only, device=device, max_megapixels=max_megapixels) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_LayerStyle_Advance\py\imagefunc.py", line 1595, in generate_VITMatte predictions = vit_matte_model.model(**inputs).alphas ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitmatte\modeling_vitmatte.py", line 322, in forward outputs = self.backbone.forward_with_filtered_kwargs( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\backbone_utils.py", line 235, in forward_with_filtered_kwargs return self(*args, **filtered_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitdet\modeling_vitdet.py", line 847, in forward embedding_output = self.embeddings(pixel_values) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitdet\modeling_vitdet.py", line 113, in forward raise ValueError( ValueError: Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected 3 but got 4.
Prompt executed in 0.91 seconds
The text was updated successfully, but these errors were encountered:
No branches or pull requests
got prompt
W0000 00:00:1735391076.390700 15264 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
Some weights of VitMatteForImageMatting were not initialized from the model checkpoint at L:\ComfyUI_windows_portable\ComfyUI\models\vitmatte and are newly initialized: ['backbone.embeddings.position_embeddings', 'backbone.embeddings.projection.bias', 'backbone.embeddings.projection.weight', 'backbone.encoder.layer.0.attention.proj.bias', 'backbone.encoder.layer.0.attention.proj.weight', 'backbone.encoder.layer.0.attention.qkv.bias', 'backbone.encoder.layer.0.attention.qkv.weight', 'backbone.encoder.layer.0.mlp.fc1.bias', 'backbone.encoder.layer.0.mlp.fc1.weight', 'backbone.encoder.layer.0.mlp.fc2.bias', 'backbone.encoder.layer.0.mlp.fc2.weight', 'backbone.encoder.layer.0.norm1.bias', 'backbone.encoder.layer.0.norm1.weight', 'backbone.encoder.layer.0.norm2.bias', 'backbone.encoder.layer.0.norm2.weight', 'backbone.encoder.layer.1.attention.proj.bias', 'backbone.encoder.layer.1.attention.proj.weight', 'backbone.encoder.layer.1.attention.qkv.bias', 'backbone.encoder.layer.1.attention.qkv.weight', 'backbone.encoder.layer.1.mlp.fc1.bias', 'backbone.encoder.layer.1.mlp.fc1.weight', 'backbone.encoder.layer.1.mlp.fc2.bias', 'backbone.encoder.layer.1.mlp.fc2.weight', 'backbone.encoder.layer.1.norm1.bias', 'backbone.encoder.layer.1.norm1.weight', 'backbone.encoder.layer.1.norm2.bias', 'backbone.encoder.layer.1.norm2.weight', 'backbone.encoder.layer.10.attention.proj.bias', 'backbone.encoder.layer.10.attention.proj.weight', 'backbone.encoder.layer.10.attention.qkv.bias', 'backbone.encoder.layer.10.attention.qkv.weight', 'backbone.encoder.layer.10.mlp.fc1.bias', 'backbone.encoder.layer.10.mlp.fc1.weight', 'backbone.encoder.layer.10.mlp.fc2.bias', 'backbone.encoder.layer.10.mlp.fc2.weight', 'backbone.encoder.layer.10.norm1.bias', 'backbone.encoder.layer.10.norm1.weight', 'backbone.encoder.layer.10.norm2.bias', 'backbone.encoder.layer.10.norm2.weight', 'backbone.encoder.layer.11.attention.proj.bias', 'backbone.encoder.layer.11.attention.proj.weight', 'backbone.encoder.layer.11.attention.qkv.bias', 'backbone.encoder.layer.11.attention.qkv.weight', 'backbone.encoder.layer.11.mlp.fc1.bias', 'backbone.encoder.layer.11.mlp.fc1.weight', 'backbone.encoder.layer.11.mlp.fc2.bias', 'backbone.encoder.layer.11.mlp.fc2.weight', 'backbone.encoder.layer.11.norm1.bias', 'backbone.encoder.layer.11.norm1.weight', 'backbone.encoder.layer.11.norm2.bias', 'backbone.encoder.layer.11.norm2.weight', 'backbone.encoder.layer.2.attention.proj.bias', 'backbone.encoder.layer.2.attention.proj.weight', 'backbone.encoder.layer.2.attention.qkv.bias', 'backbone.encoder.layer.2.attention.qkv.weight', 'backbone.encoder.layer.2.mlp.fc1.bias', 'backbone.encoder.layer.2.mlp.fc1.weight', 'backbone.encoder.layer.2.mlp.fc2.bias', 'backbone.encoder.layer.2.mlp.fc2.weight', 'backbone.encoder.layer.2.norm1.bias', 'backbone.encoder.layer.2.norm1.weight', 'backbone.encoder.layer.2.norm2.bias', 'backbone.encoder.layer.2.norm2.weight', 'backbone.encoder.layer.3.attention.proj.bias', 'backbone.encoder.layer.3.attention.proj.weight', 'backbone.encoder.layer.3.attention.qkv.bias', 'backbone.encoder.layer.3.attention.qkv.weight', 'backbone.encoder.layer.3.mlp.fc1.bias', 'backbone.encoder.layer.3.mlp.fc1.weight', 'backbone.encoder.layer.3.mlp.fc2.bias', 'backbone.encoder.layer.3.mlp.fc2.weight', 'backbone.encoder.layer.3.norm1.bias', 'backbone.encoder.layer.3.norm1.weight', 'backbone.encoder.layer.3.norm2.bias', 'backbone.encoder.layer.3.norm2.weight', 'backbone.encoder.layer.4.attention.proj.bias', 'backbone.encoder.layer.4.attention.proj.weight', 'backbone.encoder.layer.4.attention.qkv.bias', 'backbone.encoder.layer.4.attention.qkv.weight', 'backbone.encoder.layer.4.mlp.fc1.bias', 'backbone.encoder.layer.4.mlp.fc1.weight', 'backbone.encoder.layer.4.mlp.fc2.bias', 'backbone.encoder.layer.4.mlp.fc2.weight', 'backbone.encoder.layer.4.norm1.bias', 'backbone.encoder.layer.4.norm1.weight', 'backbone.encoder.layer.4.norm2.bias', 'backbone.encoder.layer.4.norm2.weight', 'backbone.encoder.layer.5.attention.proj.bias', 'backbone.encoder.layer.5.attention.proj.weight', 'backbone.encoder.layer.5.attention.qkv.bias', 'backbone.encoder.layer.5.attention.qkv.weight', 'backbone.encoder.layer.5.mlp.fc1.bias', 'backbone.encoder.layer.5.mlp.fc1.weight', 'backbone.encoder.layer.5.mlp.fc2.bias', 'backbone.encoder.layer.5.mlp.fc2.weight', 'backbone.encoder.layer.5.norm1.bias', 'backbone.encoder.layer.5.norm1.weight', 'backbone.encoder.layer.5.norm2.bias', 'backbone.encoder.layer.5.norm2.weight', 'backbone.encoder.layer.6.attention.proj.bias', 'backbone.encoder.layer.6.attention.proj.weight', 'backbone.encoder.layer.6.attention.qkv.bias', 'backbone.encoder.layer.6.attention.qkv.weight', 'backbone.encoder.layer.6.mlp.fc1.bias', 'backbone.encoder.layer.6.mlp.fc1.weight', 'backbone.encoder.layer.6.mlp.fc2.bias', 'backbone.encoder.layer.6.mlp.fc2.weight', 'backbone.encoder.layer.6.norm1.bias', 'backbone.encoder.layer.6.norm1.weight', 'backbone.encoder.layer.6.norm2.bias', 'backbone.encoder.layer.6.norm2.weight', 'backbone.encoder.layer.7.attention.proj.bias', 'backbone.encoder.layer.7.attention.proj.weight', 'backbone.encoder.layer.7.attention.qkv.bias', 'backbone.encoder.layer.7.attention.qkv.weight', 'backbone.encoder.layer.7.mlp.fc1.bias', 'backbone.encoder.layer.7.mlp.fc1.weight', 'backbone.encoder.layer.7.mlp.fc2.bias', 'backbone.encoder.layer.7.mlp.fc2.weight', 'backbone.encoder.layer.7.norm1.bias', 'backbone.encoder.layer.7.norm1.weight', 'backbone.encoder.layer.7.norm2.bias', 'backbone.encoder.layer.7.norm2.weight', 'backbone.encoder.layer.8.attention.proj.bias', 'backbone.encoder.layer.8.attention.proj.weight', 'backbone.encoder.layer.8.attention.qkv.bias', 'backbone.encoder.layer.8.attention.qkv.weight', 'backbone.encoder.layer.8.mlp.fc1.bias', 'backbone.encoder.layer.8.mlp.fc1.weight', 'backbone.encoder.layer.8.mlp.fc2.bias', 'backbone.encoder.layer.8.mlp.fc2.weight', 'backbone.encoder.layer.8.norm1.bias', 'backbone.encoder.layer.8.norm1.weight', 'backbone.encoder.layer.8.norm2.bias', 'backbone.encoder.layer.8.norm2.weight', 'backbone.encoder.layer.9.attention.proj.bias', 'backbone.encoder.layer.9.attention.proj.weight', 'backbone.encoder.layer.9.attention.qkv.bias', 'backbone.encoder.layer.9.attention.qkv.weight', 'backbone.encoder.layer.9.mlp.fc1.bias', 'backbone.encoder.layer.9.mlp.fc1.weight', 'backbone.encoder.layer.9.mlp.fc2.bias', 'backbone.encoder.layer.9.mlp.fc2.weight', 'backbone.encoder.layer.9.norm1.bias', 'backbone.encoder.layer.9.norm1.weight', 'backbone.encoder.layer.9.norm2.bias', 'backbone.encoder.layer.9.norm2.weight', 'decoder.convstream.convs.0.batch_norm.bias', 'decoder.convstream.convs.0.batch_norm.num_batches_tracked', 'decoder.convstream.convs.0.batch_norm.running_mean', 'decoder.convstream.convs.0.batch_norm.running_var', 'decoder.convstream.convs.0.batch_norm.weight', 'decoder.convstream.convs.0.conv.weight', 'decoder.convstream.convs.1.batch_norm.bias', 'decoder.convstream.convs.1.batch_norm.num_batches_tracked', 'decoder.convstream.convs.1.batch_norm.running_mean', 'decoder.convstream.convs.1.batch_norm.running_var', 'decoder.convstream.convs.1.batch_norm.weight', 'decoder.convstream.convs.1.conv.weight', 'decoder.convstream.convs.2.batch_norm.bias', 'decoder.convstream.convs.2.batch_norm.num_batches_tracked', 'decoder.convstream.convs.2.batch_norm.running_mean', 'decoder.convstream.convs.2.batch_norm.running_var', 'decoder.convstream.convs.2.batch_norm.weight', 'decoder.convstream.convs.2.conv.weight', 'decoder.fusion_blocks.0.conv.batch_norm.bias', 'decoder.fusion_blocks.0.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.0.conv.batch_norm.running_mean', 'decoder.fusion_blocks.0.conv.batch_norm.running_var', 'decoder.fusion_blocks.0.conv.batch_norm.weight', 'decoder.fusion_blocks.0.conv.conv.weight', 'decoder.fusion_blocks.1.conv.batch_norm.bias', 'decoder.fusion_blocks.1.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.1.conv.batch_norm.running_mean', 'decoder.fusion_blocks.1.conv.batch_norm.running_var', 'decoder.fusion_blocks.1.conv.batch_norm.weight', 'decoder.fusion_blocks.1.conv.conv.weight', 'decoder.fusion_blocks.2.conv.batch_norm.bias', 'decoder.fusion_blocks.2.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.2.conv.batch_norm.running_mean', 'decoder.fusion_blocks.2.conv.batch_norm.running_var', 'decoder.fusion_blocks.2.conv.batch_norm.weight', 'decoder.fusion_blocks.2.conv.conv.weight', 'decoder.fusion_blocks.3.conv.batch_norm.bias', 'decoder.fusion_blocks.3.conv.batch_norm.num_batches_tracked', 'decoder.fusion_blocks.3.conv.batch_norm.running_mean', 'decoder.fusion_blocks.3.conv.batch_norm.running_var', 'decoder.fusion_blocks.3.conv.batch_norm.weight', 'decoder.fusion_blocks.3.conv.conv.weight', 'decoder.matting_head.matting_convs.0.bias', 'decoder.matting_head.matting_convs.0.weight', 'decoder.matting_head.matting_convs.1.bias', 'decoder.matting_head.matting_convs.1.num_batches_tracked', 'decoder.matting_head.matting_convs.1.running_mean', 'decoder.matting_head.matting_convs.1.running_var', 'decoder.matting_head.matting_convs.1.weight', 'decoder.matting_head.matting_convs.3.bias', 'decoder.matting_head.matting_convs.3.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
!!! Exception during processing !!! Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected 3 but got 4.
Traceback (most recent call last):
File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 328, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 203, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 174, in _map_node_over_list
process_inputs(input_dict, i)
File "L:\ComfyUI_windows_portable\ComfyUI\execution.py", line 163, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_LayerStyle_Advance\py\person_mask_ultra_v2.py", line 158, in person_mask_ultra_v2
_mask = generate_VITMatte(orig_image, _trimap, local_files_only=local_files_only, device=device, max_megapixels=max_megapixels)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_LayerStyle_Advance\py\imagefunc.py", line 1595, in generate_VITMatte
predictions = vit_matte_model.model(**inputs).alphas
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitmatte\modeling_vitmatte.py", line 322, in forward
outputs = self.backbone.forward_with_filtered_kwargs(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\backbone_utils.py", line 235, in forward_with_filtered_kwargs
return self(*args, **filtered_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitdet\modeling_vitdet.py", line 847, in forward
embedding_output = self.embeddings(pixel_values)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "L:\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\vitdet\modeling_vitdet.py", line 113, in forward
raise ValueError(
ValueError: Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected 3 but got 4.
Prompt executed in 0.91 seconds
The text was updated successfully, but these errors were encountered: