Skip to content

Commit b5c3508

Browse files
authored
Merge pull request huggingface#4 from huggingface/refactor-dinov3-vit
Refactor DINOv3ViT
2 parents a474d23 + 379447b commit b5c3508

File tree

12 files changed

+1283
-761
lines changed

12 files changed

+1283
-761
lines changed

docs/source/en/_toctree.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -763,6 +763,8 @@
763763
title: DINOV2
764764
- local: model_doc/dinov2_with_registers
765765
title: DINOv2 with Registers
766+
- local: model_doc/dinov3
767+
title: DINOv3
766768
- local: model_doc/dit
767769
title: DiT
768770
- local: model_doc/dpt

docs/source/en/model_doc/dinov3.md

Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4+
the License. You may obtain a copy of the License at
5+
6+
http://www.apache.org/licenses/LICENSE-2.0
7+
8+
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9+
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10+
specific language governing permissions and limitations under the License.
11+
-->
12+
13+
<div style="float: right;">
14+
<div class="flex flex-wrap space-x-1">
15+
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
16+
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC">
17+
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
18+
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
19+
</div>
20+
</div>
21+
22+
23+
# DINOv3
24+
25+
<TODO: DESCRIPTION>
26+
27+
You can find all the original DINOv3 checkpoints under the [DINOv3](https://huggingface.co/collections/facebook/dinov2-6526c98554b3d2576e071ce3) collection.
28+
29+
> [!TIP]
30+
> Click on the DINOv3 models in the right sidebar for more examples of how to apply DINOv3 to different vision tasks.
31+
32+
The example below demonstrates how to obtain an image embedding with [`Pipeline`] or the [`AutoModel`] class.
33+
34+
<hfoptions id="usage">
35+
<hfoption id="Pipeline">
36+
37+
```py
38+
import torch
39+
from transformers import pipeline
40+
41+
pipe = pipeline(
42+
task="image-feature-extraction",
43+
model="facebook/dinov3-vits16-pretrain-lvd1689m",
44+
torch_dtype=torch.float16,
45+
device=0
46+
)
47+
48+
pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")
49+
```
50+
51+
</hfoption>
52+
<hfoption id="AutoModel">
53+
54+
```py
55+
import torch
56+
from transformers import AutoImageProcessor, AutoModel
57+
from transformers.image_utils import load_image
58+
59+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
60+
image = load_image(url)
61+
62+
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
63+
model = AutoModel.from_pretrained(
64+
"facebook/dinov3-vits16-pretrain-lvd1689m",
65+
torch_dtype=torch.float16,
66+
device_map="auto",
67+
attn_implementation="sdpa"
68+
)
69+
70+
inputs = processor(images=image, return_tensors="pt").to(model.device)
71+
with torch.inference_mode():
72+
outputs = model(**inputs)
73+
74+
pooled_output = outputs.pooler_output
75+
print("Pooled output shape:", pooled_output.shape)
76+
```
77+
78+
</hfoption>
79+
</hfoptions>
80+
81+
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
82+
83+
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
84+
85+
```py
86+
# pip install torchao
87+
from transformers import TorchAoConfig, AutoImageProcessor, AutoModel
88+
from torchao.quantization import Int4WeightOnlyConfig
89+
from transformers.image_utils import load_image
90+
91+
92+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
93+
image = load_image(url)
94+
95+
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
96+
97+
quant_config = Int4WeightOnlyConfig(group_size=128)
98+
quantization_config = TorchAoConfig(quant_type=quant_config)
99+
100+
model = AutoModelForImageClassification.from_pretrained(
101+
"facebook/dinov3-vits16-pretrain-lvd1689m",
102+
torch_dtype=torch.bfloat16,
103+
device_map="auto",
104+
quantization_config=quantization_config
105+
)
106+
107+
inputs = processor(images=image, return_tensors="pt")
108+
with torch.inference_mode():
109+
outputs = model(**inputs)
110+
111+
pooled_output = outputs.pooler_output
112+
print("Pooled output shape:", pooled_output.shape)
113+
```
114+
115+
## Notes
116+
117+
- The example below shows how to split the output tensor into:
118+
- one embedding for the whole image, commonly referred to as a `CLS` token,
119+
useful for classification and retrieval
120+
- register tokens - learnable embeddings that act as dedicated “memory slots” for global information,
121+
they reduce high-norm artifacts in patch tokens, yielding cleaner attention maps and better
122+
performance on dense prediction tasks.
123+
- a set of local embeddings, one for each `16x16` patch of the input image,
124+
useful for dense tasks, such as semantic segmentation
125+
126+
```py
127+
import torch
128+
from transformers import AutoImageProcessor, AutoModel
129+
from transformers.image_utils import load_image
130+
131+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
132+
image = load_image(url)
133+
print("Image size:", image.height, image.width) # [480, 640]
134+
135+
processor = AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
136+
model = AutoModel.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
137+
patch_size = model.config.patch_size
138+
print("Patch size:", patch_size) # 16
139+
print("Num register tokens:", model.config.num_register_tokens) # 4
140+
141+
inputs = processor(images=image, return_tensors="pt")
142+
print("Preprocessed image size:", inputs.pixel_values.shape) # [1, 3, 224, 224]
143+
144+
batch_size, _, img_height, img_width = inputs.pixel_values.shape
145+
num_patches_height, num_patches_width = img_height // patch_size, img_width // patch_size
146+
num_patches_flat = num_patches_height * num_patches_width
147+
148+
with torch.inference_mode():
149+
outputs = model(**inputs)
150+
151+
last_hidden_states = outputs.last_hidden_state
152+
print(last_hidden_states.shape) # [1, 1 + 4 + 256, 384]
153+
assert last_hidden_states.shape == (batch_size, 1 + model.config.num_register_tokens + num_patches_flat, model.config.hidden_size)
154+
155+
cls_token = last_hidden_states[:, 0, :]
156+
patch_features_flat = last_hidden_states[:, 1 + model.config.num_register_tokens:, :]
157+
patch_features = patch_features_flat.unflatten(1, (num_patches_height, num_patches_width))
158+
```
159+
160+
## DINOv3ViTConfig
161+
162+
[[autodoc]] DINOv3ViTConfig
163+
164+
## DINOv3ConvNeXtConfig
165+
166+
[[autodoc]] DINOv3ConvNextConfig
167+
168+
## DINOv3ViTModel
169+
170+
[[autodoc]] DINOv3ViTModel
171+
- forward
172+
173+
## DINOv3ConvNextModel
174+
175+
[[autodoc]] DINOv3ConvNextModel
176+
- forward
177+
178+
## DINOv3ViTImageProcessorFast
179+
180+
[[autodoc]] DINOv3ViTImageProcessorFast
181+
- preprocess

src/transformers/models/auto/configuration_auto.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,8 @@
117117
("dinat", "DinatConfig"),
118118
("dinov2", "Dinov2Config"),
119119
("dinov2_with_registers", "Dinov2WithRegistersConfig"),
120+
("dinov3_convnext", "DINOv3ConvNextConfig"),
121+
("dinov3_vit", "DINOv3ViTConfig"),
120122
("distilbert", "DistilBertConfig"),
121123
("doge", "DogeConfig"),
122124
("donut-swin", "DonutSwinConfig"),

src/transformers/models/auto/modeling_auto.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
121121
("dinat", "DinatModel"),
122122
("dinov2", "Dinov2Model"),
123123
("dinov2_with_registers", "Dinov2WithRegistersModel"),
124+
("dinov3_convnext", "DINOv3ConvNextModel"),
125+
("dinov3_vit", "DINOv3ViTModel"),
124126
("distilbert", "DistilBertModel"),
125127
("doge", "DogeModel"),
126128
("donut-swin", "DonutSwinModel"),
@@ -740,6 +742,8 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
740742
("dinat", "DinatModel"),
741743
("dinov2", "Dinov2Model"),
742744
("dinov2_with_registers", "Dinov2WithRegistersModel"),
745+
("dinov3_convnext", "DINOv3ConvNextModel"),
746+
("dinov3_vit", "DINOv3ViTModel"),
743747
("dpt", "DPTModel"),
744748
("efficientformer", "EfficientFormerModel"),
745749
("efficientnet", "EfficientNetModel"),

src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ class DINOv3ConvNextConfig(PretrainedConfig):
6868
>>> configuration = model.config
6969
```"""
7070

71-
model_type = "DINOv3ConvNext"
71+
model_type = "dinov3_convnext"
7272

7373
def __init__(
7474
self,

0 commit comments

Comments
 (0)