amyeroberts HF staff VictorSanh commited on
Commit
e9c76ea
·
verified ·
0 Parent(s):

Duplicate from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit

Browse files

Co-authored-by: Victor Sanh <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ Same as https://huggingface.co/HuggingFaceM4/siglip-so400m-14-384-flash-attn2 with two changes:
6
+ - increase max resolution to 980 x 980 (instead of 384 x 384) by interpolating the position embeddings
7
+ - implement the strategy in [NaViT](https://arxiv.org/abs/2307.06304) to allow a/ variable resoltion images, b/ aspect ratio preserved images
8
+
9
+ These changes only apply to the vision tower. No changes to the text tower.
10
+ Implementation is fully backward compatible to `https://huggingface.co/HuggingFaceM4/siglip-so400m-14-384-flash-attn2` -> just don't specify the `patch_attention_mask`
11
+
12
+
13
+ Usage:
14
+ ```python
15
+ import torch
16
+ from modeling_siglip import SiglipVisionModel
17
+
18
+ DEVICE = torch.device("cuda:0")
19
+ PATCH_SIZE = 14
20
+
21
+ pixel_values = torch.randn(2, 3, 28, 42, dtype=torch.bfloat16, device=DEVICE)
22
+ pixel_attention_mask = [
23
+ [
24
+ [1] * 14 + [1] * 14 + [1] * 14,
25
+ [1] * 14 + [1] * 14 + [1] * 14,
26
+ [1] * 14 + [1] * 14 + [1] * 14,
27
+ [1] * 14 + [1] * 14 + [1] * 14,
28
+ [1] * 14 + [1] * 14 + [1] * 14,
29
+ [1] * 14 + [1] * 14 + [1] * 14,
30
+ [1] * 14 + [1] * 14 + [1] * 14,
31
+ [1] * 14 + [1] * 14 + [1] * 14,
32
+ [1] * 14 + [1] * 14 + [1] * 14,
33
+ [1] * 14 + [1] * 14 + [1] * 14,
34
+ [1] * 14 + [1] * 14 + [1] * 14,
35
+ [1] * 14 + [1] * 14 + [1] * 14,
36
+ [1] * 14 + [1] * 14 + [1] * 14,
37
+ [1] * 14 + [1] * 14 + [1] * 14,
38
+
39
+ [0] * 14 + [0] * 14 + [0] * 14,
40
+ [0] * 14 + [0] * 14 + [0] * 14,
41
+ [0] * 14 + [0] * 14 + [0] * 14,
42
+ [0] * 14 + [0] * 14 + [0] * 14,
43
+ [0] * 14 + [0] * 14 + [0] * 14,
44
+ [0] * 14 + [0] * 14 + [0] * 14,
45
+ [0] * 14 + [0] * 14 + [0] * 14,
46
+ [0] * 14 + [0] * 14 + [0] * 14,
47
+ [0] * 14 + [0] * 14 + [0] * 14,
48
+ [0] * 14 + [0] * 14 + [0] * 14,
49
+ [0] * 14 + [0] * 14 + [0] * 14,
50
+ [0] * 14 + [0] * 14 + [0] * 14,
51
+ [0] * 14 + [0] * 14 + [0] * 14,
52
+ [0] * 14 + [0] * 14 + [0] * 14,
53
+ ],
54
+ [
55
+ [1] * 14 + [1] * 14 + [0] * 14,
56
+ [1] * 14 + [1] * 14 + [0] * 14,
57
+ [1] * 14 + [1] * 14 + [0] * 14,
58
+ [1] * 14 + [1] * 14 + [0] * 14,
59
+ [1] * 14 + [1] * 14 + [0] * 14,
60
+ [1] * 14 + [1] * 14 + [0] * 14,
61
+ [1] * 14 + [1] * 14 + [0] * 14,
62
+ [1] * 14 + [1] * 14 + [0] * 14,
63
+ [1] * 14 + [1] * 14 + [0] * 14,
64
+ [1] * 14 + [1] * 14 + [0] * 14,
65
+ [1] * 14 + [1] * 14 + [0] * 14,
66
+ [1] * 14 + [1] * 14 + [0] * 14,
67
+ [1] * 14 + [1] * 14 + [0] * 14,
68
+ [1] * 14 + [1] * 14 + [0] * 14,
69
+
70
+ [1] * 14 + [1] * 14 + [0] * 14,
71
+ [1] * 14 + [1] * 14 + [0] * 14,
72
+ [1] * 14 + [1] * 14 + [0] * 14,
73
+ [1] * 14 + [1] * 14 + [0] * 14,
74
+ [1] * 14 + [1] * 14 + [0] * 14,
75
+ [1] * 14 + [1] * 14 + [0] * 14,
76
+ [1] * 14 + [1] * 14 + [0] * 14,
77
+ [1] * 14 + [1] * 14 + [0] * 14,
78
+ [1] * 14 + [1] * 14 + [0] * 14,
79
+ [1] * 14 + [1] * 14 + [0] * 14,
80
+ [1] * 14 + [1] * 14 + [0] * 14,
81
+ [1] * 14 + [1] * 14 + [0] * 14,
82
+ [1] * 14 + [1] * 14 + [0] * 14,
83
+ [1] * 14 + [1] * 14 + [0] * 14,
84
+ ],
85
+ ]
86
+ pixel_attention_mask = torch.tensor(pixel_attention_mask, dtype=torch.bool, device=DEVICE)
87
+ patches_subgrid = pixel_attention_mask.unfold(
88
+ dimension=1, size=PATCH_SIZE, step=PATCH_SIZE
89
+ ).unfold(dimension=2, size=PATCH_SIZE, step=PATCH_SIZE)
90
+ patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
91
+
92
+ model = SiglipVisionModel.from_pretrained("HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit", _flash_attn_2_enabled=True)
93
+ model.train()
94
+ model.vision_model.to(DEVICE, dtype=torch.bfloat16)
95
+
96
+ output = model.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
97
+ ```
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit",
3
+ "architectures": [
4
+ "SiglipModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit--configuration_siglip.SiglipConfig",
8
+ "AutoModel": "HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit--modeling_siglip.SiglipModel"
9
+ },
10
+ "initializer_factor": 1.0,
11
+ "model_type": "siglip",
12
+ "text_config": {
13
+ "hidden_size": 1152,
14
+ "intermediate_size": 4304,
15
+ "model_type": "siglip_text_model",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 27
18
+ },
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.37.0.dev0",
21
+ "vision_config": {
22
+ "hidden_size": 1152,
23
+ "image_size": 980,
24
+ "intermediate_size": 4304,
25
+ "model_type": "siglip_vision_model",
26
+ "num_attention_heads": 16,
27
+ "num_hidden_layers": 27,
28
+ "patch_size": 14
29
+ }
30
+ }
configuration_siglip.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Siglip model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/config.json",
28
+ }
29
+
30
+
31
+ class SiglipTextConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a
34
+ Siglip text encoder according to the specified arguments, defining the model architecture. Instantiating a
35
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip
36
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32000):
43
+ Vocabulary size of the Siglip text model. Defines the number of different tokens that can be represented by
44
+ the `inputs_ids` passed when calling [`SiglipModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ intermediate_size (`int`, *optional*, defaults to 3072):
48
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ max_position_embeddings (`int`, *optional*, defaults to 64):
54
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
55
+ just in case (e.g., 512 or 1024 or 2048).
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
59
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
60
+ The epsilon used by the layer normalization layers.
61
+ attention_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the attention probabilities.
63
+ pad_token_id (`int`, *optional*, defaults to 1):
64
+ The id of the padding token in the vocabulary.
65
+ bos_token_id (`int`, *optional*, defaults to 49406):
66
+ The id of the beginning-of-sequence token in the vocabulary.
67
+ eos_token_id (`int`, *optional*, defaults to 49407):
68
+ The id of the end-of-sequence token in the vocabulary.
69
+
70
+ Example:
71
+
72
+ ```python
73
+ >>> from transformers import SiglipTextConfig, SiglipTextModel
74
+
75
+ >>> # Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration
76
+ >>> configuration = SiglipTextConfig()
77
+
78
+ >>> # Initializing a SiglipTextModel (with random weights) from the google/siglip-base-patch16-224 style configuration
79
+ >>> model = SiglipTextModel(configuration)
80
+
81
+ >>> # Accessing the model configuration
82
+ >>> configuration = model.config
83
+ ```"""
84
+
85
+ model_type = "siglip_text_model"
86
+
87
+ def __init__(
88
+ self,
89
+ vocab_size=32000,
90
+ hidden_size=768,
91
+ intermediate_size=3072,
92
+ num_hidden_layers=12,
93
+ num_attention_heads=12,
94
+ max_position_embeddings=64,
95
+ hidden_act="gelu_pytorch_tanh",
96
+ layer_norm_eps=1e-6,
97
+ attention_dropout=0.0,
98
+ # This differs from `CLIPTokenizer`'s default and from openai/siglip
99
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
100
+ pad_token_id=1,
101
+ bos_token_id=49406,
102
+ eos_token_id=49407,
103
+ _flash_attn_2_enabled=True,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
107
+
108
+ self.vocab_size = vocab_size
109
+ self.hidden_size = hidden_size
110
+ self.intermediate_size = intermediate_size
111
+ self.num_hidden_layers = num_hidden_layers
112
+ self.num_attention_heads = num_attention_heads
113
+ self.max_position_embeddings = max_position_embeddings
114
+ self.layer_norm_eps = layer_norm_eps
115
+ self.hidden_act = hidden_act
116
+ self.attention_dropout = attention_dropout
117
+ self._flash_attn_2_enabled = _flash_attn_2_enabled
118
+
119
+ @classmethod
120
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
121
+ cls._set_token_in_kwargs(kwargs)
122
+
123
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
124
+
125
+ # get the text config dict if we are loading from SiglipConfig
126
+ if config_dict.get("model_type") == "siglip":
127
+ config_dict = config_dict["text_config"]
128
+
129
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
130
+ logger.warning(
131
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
132
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
133
+ )
134
+
135
+ return cls.from_dict(config_dict, **kwargs)
136
+
137
+
138
+ class SiglipVisionConfig(PretrainedConfig):
139
+ r"""
140
+ This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
141
+ Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
142
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
143
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
144
+
145
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
146
+ documentation from [`PretrainedConfig`] for more information.
147
+
148
+ Args:
149
+ hidden_size (`int`, *optional*, defaults to 768):
150
+ Dimensionality of the encoder layers and the pooler layer.
151
+ intermediate_size (`int`, *optional*, defaults to 3072):
152
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
153
+ num_hidden_layers (`int`, *optional*, defaults to 12):
154
+ Number of hidden layers in the Transformer encoder.
155
+ num_attention_heads (`int`, *optional*, defaults to 12):
156
+ Number of attention heads for each attention layer in the Transformer encoder.
157
+ num_channels (`int`, *optional*, defaults to 3):
158
+ Number of channels in the input images.
159
+ image_size (`int`, *optional*, defaults to 224):
160
+ The size (resolution) of each image.
161
+ patch_size (`int`, *optional*, defaults to 16):
162
+ The size (resolution) of each patch.
163
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
164
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
165
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
166
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
167
+ The epsilon used by the layer normalization layers.
168
+ attention_dropout (`float`, *optional*, defaults to 0.0):
169
+ The dropout ratio for the attention probabilities.
170
+
171
+ Example:
172
+
173
+ ```python
174
+ >>> from transformers import SiglipVisionConfig, SiglipVisionModel
175
+
176
+ >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
177
+ >>> configuration = SiglipVisionConfig()
178
+
179
+ >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
180
+ >>> model = SiglipVisionModel(configuration)
181
+
182
+ >>> # Accessing the model configuration
183
+ >>> configuration = model.config
184
+ ```"""
185
+
186
+ model_type = "siglip_vision_model"
187
+
188
+ def __init__(
189
+ self,
190
+ hidden_size=768,
191
+ intermediate_size=3072,
192
+ num_hidden_layers=12,
193
+ num_attention_heads=12,
194
+ num_channels=3,
195
+ image_size=224,
196
+ patch_size=16,
197
+ hidden_act="gelu_pytorch_tanh",
198
+ layer_norm_eps=1e-6,
199
+ attention_dropout=0.0,
200
+ _flash_attn_2_enabled=True,
201
+ **kwargs,
202
+ ):
203
+ super().__init__(**kwargs)
204
+
205
+ self.hidden_size = hidden_size
206
+ self.intermediate_size = intermediate_size
207
+ self.num_hidden_layers = num_hidden_layers
208
+ self.num_attention_heads = num_attention_heads
209
+ self.num_channels = num_channels
210
+ self.patch_size = patch_size
211
+ self.image_size = image_size
212
+ self.attention_dropout = attention_dropout
213
+ self.layer_norm_eps = layer_norm_eps
214
+ self.hidden_act = hidden_act
215
+ self._flash_attn_2_enabled = _flash_attn_2_enabled
216
+
217
+ @classmethod
218
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
219
+ cls._set_token_in_kwargs(kwargs)
220
+
221
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
222
+
223
+ # get the vision config dict if we are loading from SiglipConfig
224
+ if config_dict.get("model_type") == "siglip":
225
+ config_dict = config_dict["vision_config"]
226
+
227
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
228
+ logger.warning(
229
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
230
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
231
+ )
232
+
233
+ return cls.from_dict(config_dict, **kwargs)
234
+
235
+
236
+ class SiglipConfig(PretrainedConfig):
237
+ r"""
238
+ [`SiglipConfig`] is the configuration class to store the configuration of a [`SiglipModel`]. It is used to
239
+ instantiate a Siglip model according to the specified arguments, defining the text model and vision model configs.
240
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip
241
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
242
+
243
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
244
+ documentation from [`PretrainedConfig`] for more information.
245
+
246
+ Args:
247
+ text_config (`dict`, *optional*):
248
+ Dictionary of configuration options used to initialize [`SiglipTextConfig`].
249
+ vision_config (`dict`, *optional*):
250
+ Dictionary of configuration options used to initialize [`SiglipVisionConfig`].
251
+ kwargs (*optional*):
252
+ Dictionary of keyword arguments.
253
+
254
+ Example:
255
+
256
+ ```python
257
+ >>> from transformers import SiglipConfig, SiglipModel
258
+
259
+ >>> # Initializing a SiglipConfig with google/siglip-base-patch16-224 style configuration
260
+ >>> configuration = SiglipConfig()
261
+
262
+ >>> # Initializing a SiglipModel (with random weights) from the google/siglip-base-patch16-224 style configuration
263
+ >>> model = SiglipModel(configuration)
264
+
265
+ >>> # Accessing the model configuration
266
+ >>> configuration = model.config
267
+
268
+ >>> # We can also initialize a SiglipConfig from a SiglipTextConfig and a SiglipVisionConfig
269
+ >>> from transformers import SiglipTextConfig, SiglipVisionConfig
270
+
271
+ >>> # Initializing a SiglipText and SiglipVision configuration
272
+ >>> config_text = SiglipTextConfig()
273
+ >>> config_vision = SiglipVisionConfig()
274
+
275
+ >>> config = SiglipConfig.from_text_vision_configs(config_text, config_vision)
276
+ ```"""
277
+
278
+ model_type = "siglip"
279
+
280
+ def __init__(self, text_config=None, vision_config=None, **kwargs):
281
+ super().__init__(**kwargs)
282
+
283
+ if text_config is None:
284
+ text_config = {}
285
+ logger.info("`text_config` is `None`. Initializing the `SiglipTextConfig` with default values.")
286
+
287
+ if vision_config is None:
288
+ vision_config = {}
289
+ logger.info("`vision_config` is `None`. initializing the `SiglipVisionConfig` with default values.")
290
+
291
+ self.text_config = SiglipTextConfig(**text_config)
292
+ self.vision_config = SiglipVisionConfig(**vision_config)
293
+
294
+ self.initializer_factor = 1.0
295
+
296
+ @classmethod
297
+ def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs):
298
+ r"""
299
+ Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision
300
+ model configuration.
301
+
302
+ Returns:
303
+ [`SiglipConfig`]: An instance of a configuration object
304
+ """
305
+
306
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
convert_siglip_to_hf.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert SigLIP checkpoints from the original repository.
16
+
17
+ URL: https://github.com/google-research/big_vision/tree/main
18
+ """
19
+
20
+
21
+ import argparse
22
+ import collections
23
+ from pathlib import Path
24
+
25
+ import numpy as np
26
+ import requests
27
+ import torch
28
+ from huggingface_hub import hf_hub_download
29
+ from numpy import load
30
+ from PIL import Image
31
+
32
+ from transformers import SiglipConfig, SiglipImageProcessor, SiglipModel, SiglipProcessor, SiglipTokenizer
33
+ from transformers.utils import logging
34
+
35
+
36
+ logging.set_verbosity_info()
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ model_name_to_checkpoint = {
41
+ # base checkpoints
42
+ "siglip-base-patch16-224": "/Users/nielsrogge/Documents/SigLIP/webli_en_b16_224_63724782.npz",
43
+ "siglip-base-patch16-256": "/Users/nielsrogge/Documents/SigLIP/webli_en_b16_256_60500360.npz",
44
+ "siglip-base-patch16-384": "/Users/nielsrogge/Documents/SigLIP/webli_en_b16_384_68578854.npz",
45
+ "siglip-base-patch16-512": "/Users/nielsrogge/Documents/SigLIP/webli_en_b16_512_68580893.npz",
46
+ # large checkpoints
47
+ "siglip-large-patch16-256": "/Users/nielsrogge/Documents/SigLIP/webli_en_l16_256_60552751.npz",
48
+ "siglip-large-patch16-384": "/Users/nielsrogge/Documents/SigLIP/webli_en_l16_384_63634585.npz",
49
+ # multilingual checkpoint
50
+ "siglip-base-patch16-256-i18n": "/Users/nielsrogge/Documents/SigLIP/webli_i18n_b16_256_66117334.npz",
51
+ # so400m checkpoints
52
+ "siglip-so400m-patch14-384": "/Users/nielsrogge/Documents/SigLIP/webli_en_so400m_384_58765454.npz",
53
+ }
54
+
55
+ model_name_to_image_size = {
56
+ "siglip-base-patch16-224": 224,
57
+ "siglip-base-patch16-256": 256,
58
+ "siglip-base-patch16-384": 384,
59
+ "siglip-base-patch16-512": 512,
60
+ "siglip-large-patch16-256": 256,
61
+ "siglip-large-patch16-384": 384,
62
+ "siglip-base-patch16-256-i18n": 256,
63
+ "siglip-so400m-patch14-384": 384,
64
+ }
65
+
66
+
67
+ def get_siglip_config(model_name):
68
+ config = SiglipConfig()
69
+
70
+ vocab_size = 250000 if "i18n" in model_name else 32000
71
+ image_size = model_name_to_image_size[model_name]
72
+ patch_size = 16 if "patch16" in model_name else 14
73
+
74
+ # size of the architecture
75
+ config.vision_config.image_size = image_size
76
+ config.vision_config.patch_size = patch_size
77
+ config.text_config.vocab_size = vocab_size
78
+
79
+ if "base" in model_name:
80
+ pass
81
+ elif "large" in model_name:
82
+ config.text_config.hidden_size = 1024
83
+ config.text_config.intermediate_size = 4096
84
+ config.text_config.num_hidden_layers = 24
85
+ config.text_config.num_attention_heads = 16
86
+ config.vision_config.hidden_size = 1024
87
+ config.vision_config.intermediate_size = 4096
88
+ config.vision_config.num_hidden_layers = 24
89
+ config.vision_config.num_attention_heads = 16
90
+ elif "so400m" in model_name:
91
+ config.text_config.hidden_size = 1152
92
+ config.text_config.intermediate_size = 4304
93
+ config.text_config.num_hidden_layers = 27
94
+ config.text_config.num_attention_heads = 16
95
+ config.vision_config.hidden_size = 1152
96
+ config.vision_config.intermediate_size = 4304
97
+ config.vision_config.num_hidden_layers = 27
98
+ config.vision_config.num_attention_heads = 16
99
+ else:
100
+ raise ValueError("Model not supported")
101
+
102
+ return config
103
+
104
+
105
+ def create_rename_keys(config):
106
+ rename_keys = []
107
+ # fmt: off
108
+
109
+ # vision encoder
110
+
111
+ rename_keys.append(("params/img/embedding/kernel", "vision_model.embeddings.patch_embedding.weight"))
112
+ rename_keys.append(("params/img/embedding/bias", "vision_model.embeddings.patch_embedding.bias"))
113
+ rename_keys.append(("params/img/pos_embedding", "vision_model.embeddings.position_embedding.weight"))
114
+
115
+ for i in range(config.vision_config.num_hidden_layers):
116
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/LayerNorm_0/scale", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
117
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/LayerNorm_0/bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
118
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/LayerNorm_1/scale", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
119
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/LayerNorm_1/bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
120
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MlpBlock_0/Dense_0/kernel", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
121
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MlpBlock_0/Dense_0/bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
122
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MlpBlock_0/Dense_1/kernel", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
123
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MlpBlock_0/Dense_1/bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
124
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/key/kernel", f"vision_model.encoder.layers.{i}.self_attn.k_proj.weight"))
125
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/key/bias", f"vision_model.encoder.layers.{i}.self_attn.k_proj.bias"))
126
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/value/kernel", f"vision_model.encoder.layers.{i}.self_attn.v_proj.weight"))
127
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/value/bias", f"vision_model.encoder.layers.{i}.self_attn.v_proj.bias"))
128
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/query/kernel", f"vision_model.encoder.layers.{i}.self_attn.q_proj.weight"))
129
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/query/bias", f"vision_model.encoder.layers.{i}.self_attn.q_proj.bias"))
130
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/out/kernel", f"vision_model.encoder.layers.{i}.self_attn.out_proj.weight"))
131
+ rename_keys.append((f"params/img/Transformer/encoderblock_{i}/MultiHeadDotProductAttention_0/out/bias", f"vision_model.encoder.layers.{i}.self_attn.out_proj.bias"))
132
+
133
+ rename_keys.append(("params/img/Transformer/encoder_norm/scale", "vision_model.post_layernorm.weight"))
134
+ rename_keys.append(("params/img/Transformer/encoder_norm/bias", "vision_model.post_layernorm.bias"))
135
+
136
+ rename_keys.append(("params/img/MAPHead_0/probe", "vision_model.head.probe"))
137
+ rename_keys.append(("params/img/MAPHead_0/LayerNorm_0/scale", "vision_model.head.layernorm.weight"))
138
+ rename_keys.append(("params/img/MAPHead_0/LayerNorm_0/bias", "vision_model.head.layernorm.bias"))
139
+ rename_keys.append(("params/img/MAPHead_0/MlpBlock_0/Dense_0/kernel", "vision_model.head.mlp.fc1.weight"))
140
+ rename_keys.append(("params/img/MAPHead_0/MlpBlock_0/Dense_0/bias", "vision_model.head.mlp.fc1.bias"))
141
+ rename_keys.append(("params/img/MAPHead_0/MlpBlock_0/Dense_1/kernel", "vision_model.head.mlp.fc2.weight"))
142
+ rename_keys.append(("params/img/MAPHead_0/MlpBlock_0/Dense_1/bias", "vision_model.head.mlp.fc2.bias"))
143
+ rename_keys.append(("params/img/MAPHead_0/MultiHeadDotProductAttention_0/out/kernel", "vision_model.head.attention.out_proj.weight"))
144
+ rename_keys.append(("params/img/MAPHead_0/MultiHeadDotProductAttention_0/out/bias", "vision_model.head.attention.out_proj.bias"))
145
+
146
+ # text encoder
147
+
148
+ rename_keys.append(("params/txt/Embed_0/embedding", "text_model.embeddings.token_embedding.weight"))
149
+ rename_keys.append(("params/txt/pos_embedding", "text_model.embeddings.position_embedding.weight"))
150
+
151
+ for i in range(config.text_config.num_hidden_layers):
152
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/LayerNorm_0/scale", f"text_model.encoder.layers.{i}.layer_norm1.weight"))
153
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/LayerNorm_0/bias", f"text_model.encoder.layers.{i}.layer_norm1.bias"))
154
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/LayerNorm_1/scale", f"text_model.encoder.layers.{i}.layer_norm2.weight"))
155
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/LayerNorm_1/bias", f"text_model.encoder.layers.{i}.layer_norm2.bias"))
156
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MlpBlock_0/Dense_0/kernel", f"text_model.encoder.layers.{i}.mlp.fc1.weight"))
157
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MlpBlock_0/Dense_0/bias", f"text_model.encoder.layers.{i}.mlp.fc1.bias"))
158
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MlpBlock_0/Dense_1/kernel", f"text_model.encoder.layers.{i}.mlp.fc2.weight"))
159
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MlpBlock_0/Dense_1/bias", f"text_model.encoder.layers.{i}.mlp.fc2.bias"))
160
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/key/kernel", f"text_model.encoder.layers.{i}.self_attn.k_proj.weight"))
161
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/key/bias", f"text_model.encoder.layers.{i}.self_attn.k_proj.bias"))
162
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/value/kernel", f"text_model.encoder.layers.{i}.self_attn.v_proj.weight"))
163
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/value/bias", f"text_model.encoder.layers.{i}.self_attn.v_proj.bias"))
164
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/query/kernel", f"text_model.encoder.layers.{i}.self_attn.q_proj.weight"))
165
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/query/bias", f"text_model.encoder.layers.{i}.self_attn.q_proj.bias"))
166
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/out/kernel", f"text_model.encoder.layers.{i}.self_attn.out_proj.weight"))
167
+ rename_keys.append((f"params/txt/Encoder_0/encoderblock_{i}/MultiHeadDotProductAttention_0/out/bias", f"text_model.encoder.layers.{i}.self_attn.out_proj.bias"))
168
+
169
+ rename_keys.append(("params/txt/Encoder_0/encoder_norm/scale", "text_model.final_layer_norm.weight"))
170
+ rename_keys.append(("params/txt/Encoder_0/encoder_norm/bias", "text_model.final_layer_norm.bias"))
171
+ rename_keys.append(("params/txt/head/kernel", "text_model.head.weight"))
172
+ rename_keys.append(("params/txt/head/bias", "text_model.head.bias"))
173
+
174
+ # learned temperature and bias
175
+ rename_keys.append(("params/t", "logit_scale"))
176
+ rename_keys.append(("params/b", "logit_bias"))
177
+
178
+ # fmt: on
179
+ return rename_keys
180
+
181
+
182
+ def rename_key(dct, old, new, config):
183
+ val = dct.pop(old)
184
+
185
+ if ("out_proj" in new or "v_proj" in new or "k_proj" in new or "q_proj" in new) and "vision" in new:
186
+ val = val.reshape(-1, config.vision_config.hidden_size)
187
+ if ("out_proj" in new or "v_proj" in new or "k_proj" in new or "q_proj" in new) and "text" in new:
188
+ val = val.reshape(-1, config.text_config.hidden_size)
189
+
190
+ if "patch_embedding.weight" in new:
191
+ val = val.transpose(3, 2, 0, 1)
192
+ elif new.endswith("weight") and "position_embedding" not in new and "token_embedding" not in new:
193
+ val = val.T
194
+
195
+ if "position_embedding" in new and "vision" in new:
196
+ val = val.reshape(-1, config.vision_config.hidden_size)
197
+ if "position_embedding" in new and "text" in new:
198
+ val = val.reshape(-1, config.text_config.hidden_size)
199
+
200
+ if new.endswith("bias"):
201
+ val = val.reshape(-1)
202
+
203
+ dct[new] = torch.from_numpy(val)
204
+
205
+
206
+ def read_in_q_k_v_head(state_dict, config):
207
+ # read in individual input projection layers
208
+ key_proj_weight = (
209
+ state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/key/kernel")
210
+ .reshape(-1, config.vision_config.hidden_size)
211
+ .T
212
+ )
213
+ key_proj_bias = state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/key/bias").reshape(-1)
214
+ value_proj_weight = (
215
+ state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/value/kernel")
216
+ .reshape(-1, config.vision_config.hidden_size)
217
+ .T
218
+ )
219
+ value_proj_bias = state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/value/bias").reshape(-1)
220
+ query_proj_weight = (
221
+ state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/query/kernel")
222
+ .reshape(-1, config.vision_config.hidden_size)
223
+ .T
224
+ )
225
+ query_proj_bias = state_dict.pop("params/img/MAPHead_0/MultiHeadDotProductAttention_0/query/bias").reshape(-1)
226
+
227
+ # next, add them to the state dict as a single matrix + vector
228
+ state_dict["vision_model.head.attention.in_proj_weight"] = torch.from_numpy(
229
+ np.concatenate([query_proj_weight, key_proj_weight, value_proj_weight], axis=0)
230
+ )
231
+ state_dict["vision_model.head.attention.in_proj_bias"] = torch.from_numpy(
232
+ np.concatenate([query_proj_bias, key_proj_bias, value_proj_bias], axis=0)
233
+ )
234
+
235
+
236
+ # We will verify our results on an image of cute cats
237
+ def prepare_img():
238
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
239
+ image = Image.open(requests.get(url, stream=True).raw)
240
+ return image
241
+
242
+
243
+ def flatten_nested_dict(params, parent_key="", sep="/"):
244
+ items = []
245
+
246
+ for k, v in params.items():
247
+ new_key = parent_key + sep + k if parent_key else k
248
+
249
+ if isinstance(v, collections.abc.MutableMapping):
250
+ items.extend(flatten_nested_dict(v, new_key, sep=sep).items())
251
+ else:
252
+ items.append((new_key, v))
253
+ return dict(items)
254
+
255
+
256
+ @torch.no_grad()
257
+ def convert_siglip_checkpoint(model_name, pytorch_dump_folder_path, verify_logits=True, push_to_hub=False):
258
+ """
259
+ Copy/paste/tweak model's weights to our SigLIP structure.
260
+ """
261
+
262
+ # define default SigLIP configuration
263
+ config = get_siglip_config(model_name)
264
+
265
+ # get checkpoint
266
+ checkpoint = model_name_to_checkpoint[model_name]
267
+
268
+ # get vocab file
269
+ if "i18n" in model_name:
270
+ vocab_file = "/Users/nielsrogge/Documents/SigLIP/multilingual_vocab/sentencepiece.model"
271
+ else:
272
+ vocab_file = "/Users/nielsrogge/Documents/SigLIP/english_vocab/sentencepiece.model"
273
+
274
+ # load original state dict
275
+ data = load(checkpoint)
276
+ state_dict = flatten_nested_dict(data)
277
+
278
+ # remove and rename some keys
279
+ rename_keys = create_rename_keys(config)
280
+ for src, dest in rename_keys:
281
+ rename_key(state_dict, src, dest, config)
282
+
283
+ # qkv matrices of attention pooling head need special treatment
284
+ read_in_q_k_v_head(state_dict, config)
285
+
286
+ # load HuggingFace model
287
+ model = SiglipModel(config).eval()
288
+ model.load_state_dict(state_dict)
289
+
290
+ # create processor
291
+ # important: make tokenizer not return attention_mask since original one doesn't require it
292
+ image_size = config.vision_config.image_size
293
+ size = {"height": image_size, "width": image_size}
294
+ image_processor = SiglipImageProcessor(size=size)
295
+ tokenizer = SiglipTokenizer(vocab_file=vocab_file, model_input_names=["input_ids"])
296
+ processor = SiglipProcessor(image_processor=image_processor, tokenizer=tokenizer)
297
+
298
+ # verify on dummy images and texts
299
+ url_1 = "https://cdn.openai.com/multimodal-neurons/assets/apple/apple-ipod.jpg"
300
+ image_1 = Image.open(requests.get(url_1, stream=True).raw).convert("RGB")
301
+ url_2 = "https://cdn.openai.com/multimodal-neurons/assets/apple/apple-blank.jpg"
302
+ image_2 = Image.open(requests.get(url_2, stream=True).raw).convert("RGB")
303
+ texts = ["an apple", "a picture of an apple"]
304
+
305
+ inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt", padding="max_length")
306
+
307
+ # verify input_ids against original ones
308
+ if image_size == 224:
309
+ filename = "siglip_pixel_values.pt"
310
+ elif image_size == 256:
311
+ filename = "siglip_pixel_values_256.pt"
312
+ elif image_size == 384:
313
+ filename = "siglip_pixel_values_384.pt"
314
+ elif image_size == 512:
315
+ filename = "siglip_pixel_values_512.pt"
316
+ else:
317
+ raise ValueError("Image size not supported")
318
+
319
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename=filename, repo_type="dataset")
320
+ original_pixel_values = torch.load(filepath)
321
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename="siglip_input_ids.pt", repo_type="dataset")
322
+ original_input_ids = torch.load(filepath)
323
+
324
+ if "i18n" not in model_name:
325
+ assert inputs.input_ids.tolist() == original_input_ids.tolist()
326
+
327
+ print("Mean of original pixel values:", original_pixel_values.mean())
328
+ print("Mean of new pixel values:", inputs.pixel_values.mean())
329
+
330
+ # note: we're testing with original pixel values here since we don't have exact pixel values
331
+ with torch.no_grad():
332
+ outputs = model(input_ids=inputs.input_ids, pixel_values=original_pixel_values)
333
+
334
+ # with torch.no_grad():
335
+ # outputs = model(input_ids=inputs.input_ids, pixel_values=inputs.pixel_values)
336
+
337
+ print(outputs.logits_per_image[:3, :3])
338
+
339
+ probs = torch.sigmoid(outputs.logits_per_image) # these are the probabilities
340
+ print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
341
+ print(f"{probs[0][1]:.1%} that image 0 is '{texts[1]}'")
342
+
343
+ if verify_logits:
344
+ if model_name == "siglip-base-patch16-224":
345
+ expected_slice = torch.tensor(
346
+ [[-2.9621, -2.1672], [-0.2713, 0.2910]],
347
+ )
348
+ elif model_name == "siglip-base-patch16-256":
349
+ expected_slice = torch.tensor(
350
+ [[-3.1146, -1.9894], [-0.7312, 0.6387]],
351
+ )
352
+ elif model_name == "siglip-base-patch16-384":
353
+ expected_slice = torch.tensor(
354
+ [[-2.8098, -2.1891], [-0.4242, 0.4102]],
355
+ )
356
+ elif model_name == "siglip-base-patch16-512":
357
+ expected_slice = torch.tensor(
358
+ [[-2.7899, -2.2668], [-0.4295, -0.0735]],
359
+ )
360
+ elif model_name == "siglip-large-patch16-256":
361
+ expected_slice = torch.tensor(
362
+ [[-1.5827, -0.5801], [-0.9153, 0.1363]],
363
+ )
364
+ elif model_name == "siglip-large-patch16-384":
365
+ expected_slice = torch.tensor(
366
+ [[-2.1523, -0.2899], [-0.2959, 0.7884]],
367
+ )
368
+ elif model_name == "siglip-so400m-patch14-384":
369
+ expected_slice = torch.tensor([[-1.2441, -0.6649], [-0.7060, 0.7374]])
370
+ elif model_name == "siglip-base-patch16-256-i18n":
371
+ expected_slice = torch.tensor(
372
+ [[-0.9064, 0.1073], [-0.0299, 0.5304]],
373
+ )
374
+
375
+ assert torch.allclose(outputs.logits_per_image[:3, :3], expected_slice, atol=1e-4)
376
+ print("Looks ok!")
377
+
378
+ if pytorch_dump_folder_path is not None:
379
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
380
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
381
+ model.save_pretrained(pytorch_dump_folder_path)
382
+ print(f"Saving processor to {pytorch_dump_folder_path}")
383
+ processor.save_pretrained(pytorch_dump_folder_path)
384
+
385
+ if push_to_hub:
386
+ model.push_to_hub(f"nielsr/{model_name}")
387
+ processor.push_to_hub(f"nielsr/{model_name}")
388
+
389
+
390
+ if __name__ == "__main__":
391
+ parser = argparse.ArgumentParser()
392
+ # Required parameters
393
+ parser.add_argument(
394
+ "--model_name",
395
+ default="siglip-base-patch16-224",
396
+ type=str,
397
+ choices=model_name_to_checkpoint.keys(),
398
+ help="Name of the model you'd like to convert.",
399
+ )
400
+ parser.add_argument(
401
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
402
+ )
403
+ parser.add_argument(
404
+ "--verify_logits",
405
+ action="store_false",
406
+ help="Whether to verify logits against the original implementation.",
407
+ )
408
+ parser.add_argument(
409
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
410
+ )
411
+
412
+ args = parser.parse_args()
413
+ convert_siglip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.verify_logits, args.push_to_hub)
image_processing_siglip.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for SigLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
20
+ from transformers.image_transforms import (
21
+ resize,
22
+ to_channel_dimension_format,
23
+ )
24
+ from transformers.image_utils import (
25
+ IMAGENET_STANDARD_MEAN,
26
+ IMAGENET_STANDARD_STD,
27
+ ChannelDimension,
28
+ ImageInput,
29
+ PILImageResampling,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ make_list_of_images,
33
+ to_numpy_array,
34
+ valid_images,
35
+ )
36
+ from transformers.utils import TensorType, is_vision_available, logging
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ if is_vision_available():
43
+ import PIL
44
+
45
+
46
+ class SiglipImageProcessor(BaseImageProcessor):
47
+ r"""
48
+ Constructs a SigLIP image processor.
49
+
50
+ Args:
51
+ do_resize (`bool`, *optional*, defaults to `True`):
52
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
53
+ `do_resize` in the `preprocess` method.
54
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
55
+ Size of the image after resizing. Can be overridden by `size` in the `preprocess` method.
56
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
57
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
58
+ do_rescale (`bool`, *optional*, defaults to `True`):
59
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
60
+ the `preprocess` method.
61
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
62
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
63
+ method.
64
+ do_normalize (`bool`, *optional*, defaults to `True`):
65
+ Whether to normalize the image by the specified mean and standard deviation. Can be overridden by
66
+ `do_normalize` in the `preprocess` method.
67
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
68
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
69
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
70
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
71
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
72
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
73
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
74
+ """
75
+
76
+ model_input_names = ["pixel_values"]
77
+
78
+ def __init__(
79
+ self,
80
+ do_resize: bool = True,
81
+ size: Dict[str, int] = None,
82
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
83
+ do_rescale: bool = True,
84
+ rescale_factor: Union[int, float] = 1 / 255,
85
+ do_normalize: bool = True,
86
+ image_mean: Optional[Union[float, List[float]]] = None,
87
+ image_std: Optional[Union[float, List[float]]] = None,
88
+ **kwargs,
89
+ ) -> None:
90
+ super().__init__(**kwargs)
91
+ size = size if size is not None else {"height": 224, "width": 224}
92
+ image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
93
+ image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
94
+
95
+ self.do_resize = do_resize
96
+ self.size = size
97
+ self.resample = resample
98
+ self.do_rescale = do_rescale
99
+ self.rescale_factor = rescale_factor
100
+ self.do_normalize = do_normalize
101
+ self.image_mean = image_mean
102
+ self.image_std = image_std
103
+
104
+ def preprocess(
105
+ self,
106
+ images: ImageInput,
107
+ do_resize: bool = None,
108
+ size: Dict[str, int] = None,
109
+ resample: PILImageResampling = None,
110
+ do_rescale: bool = None,
111
+ rescale_factor: float = None,
112
+ do_normalize: bool = None,
113
+ image_mean: Optional[Union[float, List[float]]] = None,
114
+ image_std: Optional[Union[float, List[float]]] = None,
115
+ return_tensors: Optional[Union[str, TensorType]] = None,
116
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
117
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
118
+ **kwargs,
119
+ ) -> PIL.Image.Image:
120
+ """
121
+ Preprocess an image or batch of images.
122
+
123
+ Args:
124
+ images (`ImageInput`):
125
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
126
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
127
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
128
+ Whether to resize the image.
129
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
130
+ Size of the image after resizing.
131
+ resample (`int`, *optional*, defaults to `self.resample`):
132
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
133
+ has an effect if `do_resize` is set to `True`.
134
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
135
+ Whether to rescale the image.
136
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
137
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
138
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
139
+ Whether to normalize the image.
140
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
141
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
142
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
143
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
144
+ `True`.
145
+ return_tensors (`str` or `TensorType`, *optional*):
146
+ The type of tensors to return. Can be one of:
147
+ - Unset: Return a list of `np.ndarray`.
148
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
149
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
150
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
151
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
152
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
153
+ The channel dimension format for the output image. Can be one of:
154
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
155
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
156
+ - Unset: Use the channel dimension format of the input image.
157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
158
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
159
+ from the input image. Can be one of:
160
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
161
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
162
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
163
+ """
164
+ do_resize = do_resize if do_resize is not None else self.do_resize
165
+ size = size if size is not None else self.size
166
+ size = get_size_dict(size, param_name="size", default_to_square=False)
167
+ resample = resample if resample is not None else self.resample
168
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
169
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
170
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
171
+ image_mean = image_mean if image_mean is not None else self.image_mean
172
+ image_std = image_std if image_std is not None else self.image_std
173
+
174
+ images = make_list_of_images(images)
175
+
176
+ if not valid_images(images):
177
+ raise ValueError(
178
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
179
+ "torch.Tensor, tf.Tensor or jax.ndarray."
180
+ )
181
+
182
+ if do_resize and size is None:
183
+ raise ValueError("Size must be specified if do_resize is True.")
184
+
185
+ if do_rescale and rescale_factor is None:
186
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
187
+
188
+ # All transformations expect numpy arrays.
189
+ images = [to_numpy_array(image) for image in images]
190
+
191
+ if is_scaled_image(images[0]) and do_rescale:
192
+ logger.warning_once(
193
+ "It looks like you are trying to rescale already rescaled images. If the input"
194
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
195
+ )
196
+
197
+ if input_data_format is None:
198
+ # We assume that all images have the same channel dimension format.
199
+ input_data_format = infer_channel_dimension_format(images[0])
200
+
201
+ if do_resize:
202
+ height, width = size["height"], size["width"]
203
+ images = [
204
+ resize(image=image, size=(height, width), resample=resample, input_data_format=input_data_format)
205
+ for image in images
206
+ ]
207
+
208
+ if do_rescale:
209
+ images = [
210
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
211
+ for image in images
212
+ ]
213
+
214
+ if do_normalize:
215
+ images = [
216
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
217
+ for image in images
218
+ ]
219
+
220
+ images = [
221
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
222
+ ]
223
+
224
+ data = {"pixel_values": images}
225
+ return BatchFeature(data=data, tensor_type=return_tensors)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccea61f0d7617845a66fdf30bf2bcf0a090f7c74e8f7da2bf7b76e41ae4dfbc
3
+ size 3531170592
modeling_siglip.py ADDED
@@ -0,0 +1,1473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Siglip model."""
16
+
17
+
18
+ import math
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn.init import _calculate_fan_in_and_fan_out
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
32
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (
35
+ ModelOutput,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ is_flash_attn_2_available,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
48
+
49
+ SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
50
+ "google/siglip-base-patch16-224",
51
+ # See all SigLIP models at https://huggingface.co/models?filter=siglip
52
+ ]
53
+
54
+ if is_flash_attn_2_available():
55
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
56
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
57
+
58
+
59
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
60
+ def _get_unpad_data(attention_mask):
61
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
62
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
63
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
64
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
65
+ return (
66
+ indices,
67
+ cu_seqlens,
68
+ max_seqlen_in_batch,
69
+ )
70
+
71
+
72
+ def _trunc_normal_(tensor, mean, std, a, b):
73
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
74
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
75
+ def norm_cdf(x):
76
+ # Computes standard normal cumulative distribution function
77
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
78
+
79
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
80
+ warnings.warn(
81
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
82
+ "The distribution of values may be incorrect.",
83
+ stacklevel=2,
84
+ )
85
+
86
+ # Values are generated by using a truncated uniform distribution and
87
+ # then using the inverse CDF for the normal distribution.
88
+ # Get upper and lower cdf values
89
+ l = norm_cdf((a - mean) / std)
90
+ u = norm_cdf((b - mean) / std)
91
+
92
+ # Uniformly fill tensor with values from [l, u], then translate to
93
+ # [2l-1, 2u-1].
94
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
95
+
96
+ # Use inverse cdf transform for normal distribution to get truncated
97
+ # standard normal
98
+ if tensor.dtype in [torch.float16, torch.bfloat16]:
99
+ # The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
100
+ og_dtype = tensor.dtype
101
+ tensor = tensor.to(torch.float32)
102
+ tensor.erfinv_()
103
+ tensor = tensor.to(og_dtype)
104
+ else:
105
+ tensor.erfinv_()
106
+
107
+ # Transform to proper mean, std
108
+ tensor.mul_(std * math.sqrt(2.0))
109
+ tensor.add_(mean)
110
+
111
+ # Clamp to ensure it's in the proper range
112
+ if tensor.dtype == torch.float16:
113
+ # The `clamp_` op is not (yet?) defined in float16+cpu
114
+ tensor = tensor.to(torch.float32)
115
+ tensor.clamp_(min=a, max=b)
116
+ tensor = tensor.to(torch.float16)
117
+ else:
118
+ tensor.clamp_(min=a, max=b)
119
+
120
+
121
+ def trunc_normal_tf_(
122
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
123
+ ) -> torch.Tensor:
124
+ """Fills the input Tensor with values drawn from a truncated
125
+ normal distribution. The values are effectively drawn from the
126
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
127
+ with values outside :math:`[a, b]` redrawn until they are within
128
+ the bounds. The method used for generating the random values works
129
+ best when :math:`a \\leq \text{mean} \\leq b`.
130
+
131
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
132
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
133
+ and the result is subsquently scaled and shifted by the mean and std args.
134
+
135
+ Args:
136
+ tensor: an n-dimensional `torch.Tensor`
137
+ mean: the mean of the normal distribution
138
+ std: the standard deviation of the normal distribution
139
+ a: the minimum cutoff value
140
+ b: the maximum cutoff value
141
+ """
142
+ with torch.no_grad():
143
+ _trunc_normal_(tensor, 0, 1.0, a, b)
144
+ tensor.mul_(std).add_(mean)
145
+
146
+
147
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
148
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
149
+ if mode == "fan_in":
150
+ denom = fan_in
151
+ elif mode == "fan_out":
152
+ denom = fan_out
153
+ elif mode == "fan_avg":
154
+ denom = (fan_in + fan_out) / 2
155
+
156
+ variance = scale / denom
157
+
158
+ if distribution == "truncated_normal":
159
+ # constant is stddev of standard normal truncated to (-2, 2)
160
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
161
+ elif distribution == "normal":
162
+ with torch.no_grad():
163
+ tensor.normal_(std=math.sqrt(variance))
164
+ elif distribution == "uniform":
165
+ bound = math.sqrt(3 * variance)
166
+ with torch.no_grad():
167
+ tensor.uniform_(-bound, bound)
168
+ else:
169
+ raise ValueError(f"invalid distribution {distribution}")
170
+
171
+
172
+ def lecun_normal_(tensor):
173
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
174
+
175
+
176
+ def default_flax_embed_init(tensor):
177
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
178
+
179
+
180
+ @dataclass
181
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
182
+ class SiglipVisionModelOutput(ModelOutput):
183
+ """
184
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
185
+
186
+ Args:
187
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
188
+ The image embeddings obtained by applying the projection layer to the pooler_output.
189
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
190
+ Sequence of hidden-states at the output of the last layer of the model.
191
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
192
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
193
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
194
+
195
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
196
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
197
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
198
+ sequence_length)`.
199
+
200
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
201
+ heads.
202
+ """
203
+
204
+ image_embeds: Optional[torch.FloatTensor] = None
205
+ last_hidden_state: torch.FloatTensor = None
206
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
207
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
208
+
209
+
210
+ @dataclass
211
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip
212
+ class SiglipTextModelOutput(ModelOutput):
213
+ """
214
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
215
+
216
+ Args:
217
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
218
+ The text embeddings obtained by applying the projection layer to the pooler_output.
219
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
220
+ Sequence of hidden-states at the output of the last layer of the model.
221
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
222
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
223
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
224
+
225
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
226
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
227
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
228
+ sequence_length)`.
229
+
230
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
231
+ heads.
232
+ """
233
+
234
+ text_embeds: Optional[torch.FloatTensor] = None
235
+ last_hidden_state: torch.FloatTensor = None
236
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
237
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
238
+
239
+
240
+ @dataclass
241
+ # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Siglip
242
+ class SiglipOutput(ModelOutput):
243
+ """
244
+ Args:
245
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
246
+ Contrastive loss for image-text similarity.
247
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
248
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
249
+ similarity scores.
250
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
251
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
252
+ similarity scores.
253
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
254
+ The text embeddings obtained by applying the projection layer to the pooled output of [`SiglipTextModel`].
255
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
256
+ The image embeddings obtained by applying the projection layer to the pooled output of [`SiglipVisionModel`].
257
+ text_model_output(`BaseModelOutputWithPooling`):
258
+ The output of the [`SiglipTextModel`].
259
+ vision_model_output(`BaseModelOutputWithPooling`):
260
+ The output of the [`SiglipVisionModel`].
261
+ """
262
+
263
+ loss: Optional[torch.FloatTensor] = None
264
+ logits_per_image: torch.FloatTensor = None
265
+ logits_per_text: torch.FloatTensor = None
266
+ text_embeds: torch.FloatTensor = None
267
+ image_embeds: torch.FloatTensor = None
268
+ text_model_output: BaseModelOutputWithPooling = None
269
+ vision_model_output: BaseModelOutputWithPooling = None
270
+
271
+ def to_tuple(self) -> Tuple[Any]:
272
+ return tuple(
273
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
274
+ for k in self.keys()
275
+ )
276
+
277
+
278
+ class SiglipVisionEmbeddings(nn.Module):
279
+ def __init__(self, config: SiglipVisionConfig):
280
+ super().__init__()
281
+ self.config = config
282
+ self.embed_dim = config.hidden_size
283
+ self.image_size = config.image_size
284
+ self.patch_size = config.patch_size
285
+
286
+ self.patch_embedding = nn.Conv2d(
287
+ in_channels=config.num_channels,
288
+ out_channels=self.embed_dim,
289
+ kernel_size=self.patch_size,
290
+ stride=self.patch_size,
291
+ padding="valid",
292
+ )
293
+
294
+ self.num_patches_per_side = self.image_size // self.patch_size
295
+ self.num_patches = self.num_patches_per_side**2
296
+ self.num_positions = self.num_patches
297
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
298
+
299
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
300
+ batch_size = pixel_values.size(0)
301
+
302
+ patch_embeds = self.patch_embedding(pixel_values)
303
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
304
+
305
+ max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
306
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
307
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
308
+ position_ids = torch.full(
309
+ size=(
310
+ batch_size,
311
+ max_nb_patches_h * max_nb_patches_w,
312
+ ),
313
+ fill_value=0,
314
+ )
315
+
316
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
317
+ nb_patches_h = p_attn_mask[:, 0].sum()
318
+ nb_patches_w = p_attn_mask[0].sum()
319
+
320
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
321
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
322
+
323
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
324
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
325
+
326
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
327
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
328
+
329
+ position_ids = position_ids.to(self.position_embedding.weight.device)
330
+
331
+ embeddings = embeddings + self.position_embedding(position_ids)
332
+ return embeddings
333
+
334
+
335
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Siglip
336
+ class SiglipTextEmbeddings(nn.Module):
337
+ def __init__(self, config: SiglipTextConfig):
338
+ super().__init__()
339
+ embed_dim = config.hidden_size
340
+
341
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
342
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
343
+
344
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
345
+ self.register_buffer(
346
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
347
+ )
348
+
349
+ def forward(
350
+ self,
351
+ input_ids: Optional[torch.LongTensor] = None,
352
+ position_ids: Optional[torch.LongTensor] = None,
353
+ inputs_embeds: Optional[torch.FloatTensor] = None,
354
+ ) -> torch.Tensor:
355
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
356
+
357
+ if position_ids is None:
358
+ position_ids = self.position_ids[:, :seq_length]
359
+
360
+ if inputs_embeds is None:
361
+ inputs_embeds = self.token_embedding(input_ids)
362
+
363
+ position_embeddings = self.position_embedding(position_ids)
364
+ embeddings = inputs_embeds + position_embeddings
365
+
366
+ return embeddings
367
+
368
+
369
+ class SiglipAttention(nn.Module):
370
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
371
+
372
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
373
+ def __init__(self, config):
374
+ super().__init__()
375
+ self.config = config
376
+ self.embed_dim = config.hidden_size
377
+ self.num_heads = config.num_attention_heads
378
+ self.head_dim = self.embed_dim // self.num_heads
379
+ if self.head_dim * self.num_heads != self.embed_dim:
380
+ raise ValueError(
381
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
382
+ f" {self.num_heads})."
383
+ )
384
+ self.scale = self.head_dim**-0.5
385
+ self.dropout = config.attention_dropout
386
+
387
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
388
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
389
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
390
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
391
+
392
+ def forward(
393
+ self,
394
+ hidden_states: torch.Tensor,
395
+ attention_mask: Optional[torch.Tensor] = None,
396
+ output_attentions: Optional[bool] = False,
397
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
398
+ """Input shape: Batch x Time x Channel"""
399
+
400
+ batch_size, q_len, _ = hidden_states.size()
401
+
402
+ query_states = self.q_proj(hidden_states)
403
+ key_states = self.k_proj(hidden_states)
404
+ value_states = self.v_proj(hidden_states)
405
+
406
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
407
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
408
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
409
+
410
+ k_v_seq_len = key_states.shape[-2]
411
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
412
+
413
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
414
+ raise ValueError(
415
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
416
+ f" {attn_weights.size()}"
417
+ )
418
+
419
+ if attention_mask is not None:
420
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
421
+ raise ValueError(
422
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
423
+ )
424
+ attn_weights = attn_weights + attention_mask
425
+
426
+ # upcast attention to fp32
427
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
428
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
429
+ attn_output = torch.matmul(attn_weights, value_states)
430
+
431
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
432
+ raise ValueError(
433
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
434
+ f" {attn_output.size()}"
435
+ )
436
+
437
+ attn_output = attn_output.transpose(1, 2).contiguous()
438
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
439
+
440
+ attn_output = self.out_proj(attn_output)
441
+
442
+ return attn_output, attn_weights
443
+
444
+
445
+ class SiglipFlashAttention2(SiglipAttention):
446
+ """
447
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
448
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
449
+ flash attention and deal with padding tokens in case the input contains any of them.
450
+ """
451
+
452
+ def __init__(self, *args, **kwargs):
453
+ super().__init__(*args, **kwargs)
454
+ self.is_causal = False # Hack to make sure we don't use a causal mask
455
+
456
+ def forward(
457
+ self,
458
+ hidden_states: torch.Tensor,
459
+ attention_mask: Optional[torch.LongTensor] = None,
460
+ position_ids: Optional[torch.LongTensor] = None,
461
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
462
+ output_attentions: bool = False,
463
+ use_cache: bool = False,
464
+ **kwargs,
465
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
466
+ output_attentions = False
467
+
468
+ bsz, q_len, _ = hidden_states.size()
469
+
470
+ query_states = self.q_proj(hidden_states)
471
+ key_states = self.k_proj(hidden_states)
472
+ value_states = self.v_proj(hidden_states)
473
+
474
+ # Flash attention requires the input to have the shape
475
+ # batch_size x seq_length x head_dim x hidden_dim
476
+ # therefore we just need to keep the original shape
477
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
478
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
479
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
480
+
481
+ kv_seq_len = key_states.shape[-2]
482
+ if past_key_value is not None:
483
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
484
+ # cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
485
+ # query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
486
+
487
+ # if past_key_value is not None:
488
+ # cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
489
+ # key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
490
+
491
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
492
+ # to be able to avoid many of these transpose/reshape/view.
493
+ query_states = query_states.transpose(1, 2)
494
+ key_states = key_states.transpose(1, 2)
495
+ value_states = value_states.transpose(1, 2)
496
+
497
+ dropout_rate = self.dropout if self.training else 0.0
498
+
499
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
500
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
501
+ # cast them back in the correct dtype just to be sure everything works as expected.
502
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
503
+ # in fp32. (LlamaRMSNorm handles it correctly)
504
+
505
+ input_dtype = query_states.dtype
506
+ if input_dtype == torch.float32:
507
+ if torch.is_autocast_enabled():
508
+ target_dtype = torch.get_autocast_gpu_dtype()
509
+ # Handle the case where the model is quantized
510
+ elif hasattr(self.config, "_pre_quantization_dtype"):
511
+ target_dtype = self.config._pre_quantization_dtype
512
+ else:
513
+ target_dtype = self.q_proj.weight.dtype
514
+
515
+ logger.warning_once(
516
+ "The input hidden states seems to be silently casted in float32, this might be related to the fact"
517
+ " you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
518
+ f" {target_dtype}."
519
+ )
520
+
521
+ query_states = query_states.to(target_dtype)
522
+ key_states = key_states.to(target_dtype)
523
+ value_states = value_states.to(target_dtype)
524
+
525
+ attn_output = self._flash_attention_forward(
526
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
527
+ )
528
+
529
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
530
+ attn_output = self.out_proj(attn_output)
531
+
532
+ if not output_attentions:
533
+ attn_weights = None
534
+
535
+ return attn_output, attn_weights
536
+
537
+ def _flash_attention_forward(
538
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
539
+ ):
540
+ """
541
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
542
+ first unpad the input, then computes the attention scores and pad the final attention scores.
543
+
544
+ Args:
545
+ query_states (`torch.Tensor`):
546
+ Input query states to be passed to Flash Attention API
547
+ key_states (`torch.Tensor`):
548
+ Input key states to be passed to Flash Attention API
549
+ value_states (`torch.Tensor`):
550
+ Input value states to be passed to Flash Attention API
551
+ attention_mask (`torch.Tensor`):
552
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
553
+ position of padding tokens and 1 for the position of non-padding tokens.
554
+ dropout (`int`, *optional*):
555
+ Attention dropout
556
+ softmax_scale (`float`, *optional*):
557
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
558
+ """
559
+
560
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
561
+ causal = self.is_causal and query_length != 1
562
+
563
+ # Contains at least one padding token in the sequence
564
+ if attention_mask is not None:
565
+ batch_size = query_states.shape[0]
566
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
567
+ query_states, key_states, value_states, attention_mask, query_length
568
+ )
569
+
570
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
571
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
572
+
573
+ attn_output_unpad = flash_attn_varlen_func(
574
+ query_states,
575
+ key_states,
576
+ value_states,
577
+ cu_seqlens_q=cu_seqlens_q,
578
+ cu_seqlens_k=cu_seqlens_k,
579
+ max_seqlen_q=max_seqlen_in_batch_q,
580
+ max_seqlen_k=max_seqlen_in_batch_k,
581
+ dropout_p=dropout,
582
+ softmax_scale=softmax_scale,
583
+ causal=causal,
584
+ )
585
+
586
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
587
+ else:
588
+ attn_output = flash_attn_func(
589
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
590
+ )
591
+
592
+ return attn_output
593
+
594
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
595
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
596
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
597
+
598
+ key_layer = index_first_axis(
599
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
600
+ )
601
+ value_layer = index_first_axis(
602
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
603
+ )
604
+ if query_length == kv_seq_len:
605
+ query_layer = index_first_axis(
606
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
607
+ )
608
+ cu_seqlens_q = cu_seqlens_k
609
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
610
+ indices_q = indices_k
611
+ elif query_length == 1:
612
+ max_seqlen_in_batch_q = 1
613
+ cu_seqlens_q = torch.arange(
614
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
615
+ ) # There is a memcpy here, that is very bad.
616
+ indices_q = cu_seqlens_q[:-1]
617
+ query_layer = query_layer.squeeze(1)
618
+ else:
619
+ # The -q_len: slice assumes left padding.
620
+ attention_mask = attention_mask[:, -query_length:]
621
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
622
+
623
+ return (
624
+ query_layer,
625
+ key_layer,
626
+ value_layer,
627
+ indices_q,
628
+ (cu_seqlens_q, cu_seqlens_k),
629
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
630
+ )
631
+
632
+
633
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
634
+ class SiglipMLP(nn.Module):
635
+ def __init__(self, config):
636
+ super().__init__()
637
+ self.config = config
638
+ self.activation_fn = ACT2FN[config.hidden_act]
639
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
640
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
641
+
642
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
643
+ hidden_states = self.fc1(hidden_states)
644
+ hidden_states = self.activation_fn(hidden_states)
645
+ hidden_states = self.fc2(hidden_states)
646
+ return hidden_states
647
+
648
+
649
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
650
+ class SiglipEncoderLayer(nn.Module):
651
+ def __init__(self, config: SiglipConfig):
652
+ super().__init__()
653
+ self.embed_dim = config.hidden_size
654
+ self.self_attn = (
655
+ SiglipAttention(config)
656
+ if not getattr(config, "_flash_attn_2_enabled", False)
657
+ else SiglipFlashAttention2(config)
658
+ )
659
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
660
+ self.mlp = SiglipMLP(config)
661
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
662
+
663
+ def forward(
664
+ self,
665
+ hidden_states: torch.Tensor,
666
+ attention_mask: torch.Tensor,
667
+ output_attentions: Optional[bool] = False,
668
+ ) -> Tuple[torch.FloatTensor]:
669
+ """
670
+ Args:
671
+ hidden_states (`torch.FloatTensor`):
672
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
673
+ attention_mask (`torch.FloatTensor`):
674
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
675
+ output_attentions (`bool`, *optional*, defaults to `False`):
676
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
677
+ returned tensors for more detail.
678
+ """
679
+ residual = hidden_states
680
+
681
+ hidden_states = self.layer_norm1(hidden_states)
682
+ hidden_states, attn_weights = self.self_attn(
683
+ hidden_states=hidden_states,
684
+ attention_mask=attention_mask,
685
+ output_attentions=output_attentions,
686
+ )
687
+ hidden_states = residual + hidden_states
688
+
689
+ residual = hidden_states
690
+ hidden_states = self.layer_norm2(hidden_states)
691
+ hidden_states = self.mlp(hidden_states)
692
+ hidden_states = residual + hidden_states
693
+
694
+ outputs = (hidden_states,)
695
+
696
+ if output_attentions:
697
+ outputs += (attn_weights,)
698
+
699
+ return outputs
700
+
701
+
702
+ class SiglipPreTrainedModel(PreTrainedModel):
703
+ """
704
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
705
+ models.
706
+ """
707
+
708
+ config_class = SiglipConfig
709
+ base_model_prefix = "siglip"
710
+ supports_gradient_checkpointing = True
711
+
712
+ def _init_weights(self, module):
713
+ """Initialize the weights"""
714
+
715
+ if isinstance(module, SiglipVisionEmbeddings):
716
+ width = (
717
+ self.config.vision_config.hidden_size
718
+ if isinstance(self.config, SiglipConfig)
719
+ else self.config.hidden_size
720
+ )
721
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
722
+ elif isinstance(module, nn.Embedding):
723
+ default_flax_embed_init(module.weight)
724
+ elif isinstance(module, SiglipAttention):
725
+ nn.init.normal_(module.q_proj.weight)
726
+ nn.init.normal_(module.k_proj.weight)
727
+ nn.init.normal_(module.v_proj.weight)
728
+ nn.init.normal_(module.out_proj.weight)
729
+ nn.init.zeros_(module.q_proj.bias)
730
+ nn.init.zeros_(module.k_proj.bias)
731
+ nn.init.zeros_(module.v_proj.bias)
732
+ nn.init.zeros_(module.out_proj.bias)
733
+ elif isinstance(module, SiglipMLP):
734
+ nn.init.normal_(module.fc1.weight)
735
+ nn.init.normal_(module.fc2.weight)
736
+ nn.init.normal_(module.fc1.bias, std=1e-6)
737
+ nn.init.normal_(module.fc2.bias, std=1e-6)
738
+ elif isinstance(module, SiglipMultiheadAttentionPoolingHead):
739
+ nn.init.normal_(module.probe.data)
740
+ nn.init.normal_(module.attention.in_proj_weight.data)
741
+ nn.init.zeros_(module.attention.in_proj_bias.data)
742
+ elif isinstance(module, SiglipModel):
743
+ logit_scale_init = torch.tensor(0.0)
744
+ module.logit_scale.data.fill_(logit_scale_init)
745
+ module.logit_bias.data.zero_()
746
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
747
+ lecun_normal_(module.weight)
748
+ if module.bias is not None:
749
+ nn.init.zeros_(module.bias)
750
+ elif isinstance(module, nn.LayerNorm):
751
+ module.bias.data.zero_()
752
+ module.weight.data.fill_(1.0)
753
+
754
+
755
+ SIGLIP_START_DOCSTRING = r"""
756
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
757
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
758
+ etc.)
759
+
760
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
761
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
762
+ and behavior.
763
+
764
+ Parameters:
765
+ config ([`SiglipConfig`]): Model configuration class with all the parameters of the model.
766
+ Initializing with a config file does not load the weights associated with the model, only the
767
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
768
+ """
769
+
770
+ SIGLIP_TEXT_INPUTS_DOCSTRING = r"""
771
+ Args:
772
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
773
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
774
+ it.
775
+
776
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
777
+ [`PreTrainedTokenizer.__call__`] for details.
778
+
779
+ [What are input IDs?](../glossary#input-ids)
780
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
781
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
782
+
783
+ - 1 for tokens that are **not masked**,
784
+ - 0 for tokens that are **masked**.
785
+
786
+ [What are attention masks?](../glossary#attention-mask)
787
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
788
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
789
+ config.max_position_embeddings - 1]`.
790
+
791
+ [What are position IDs?](../glossary#position-ids)
792
+ output_attentions (`bool`, *optional*):
793
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
794
+ tensors for more detail.
795
+ output_hidden_states (`bool`, *optional*):
796
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
797
+ more detail.
798
+ return_dict (`bool`, *optional*):
799
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
800
+ """
801
+
802
+ SIGLIP_VISION_INPUTS_DOCSTRING = r"""
803
+ Args:
804
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
805
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
806
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
807
+ output_attentions (`bool`, *optional*):
808
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
809
+ tensors for more detail.
810
+ output_hidden_states (`bool`, *optional*):
811
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
812
+ more detail.
813
+ return_dict (`bool`, *optional*):
814
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
815
+ """
816
+
817
+ SIGLIP_INPUTS_DOCSTRING = r"""
818
+ Args:
819
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
820
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
821
+ it.
822
+
823
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
824
+ [`PreTrainedTokenizer.__call__`] for details.
825
+
826
+ [What are input IDs?](../glossary#input-ids)
827
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
828
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
829
+
830
+ - 1 for tokens that are **not masked**,
831
+ - 0 for tokens that are **masked**.
832
+
833
+ [What are attention masks?](../glossary#attention-mask)
834
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
835
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
836
+ config.max_position_embeddings - 1]`.
837
+
838
+ [What are position IDs?](../glossary#position-ids)
839
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
840
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
841
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
842
+ return_loss (`bool`, *optional*):
843
+ Whether or not to return the contrastive loss.
844
+ output_attentions (`bool`, *optional*):
845
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
846
+ tensors for more detail.
847
+ output_hidden_states (`bool`, *optional*):
848
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
849
+ more detail.
850
+ return_dict (`bool`, *optional*):
851
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
852
+ """
853
+
854
+
855
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
856
+ class SiglipEncoder(nn.Module):
857
+ """
858
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
859
+ [`SiglipEncoderLayer`].
860
+
861
+ Args:
862
+ config: SiglipConfig
863
+ """
864
+
865
+ def __init__(self, config: SiglipConfig):
866
+ super().__init__()
867
+ self.config = config
868
+ self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
869
+ self.gradient_checkpointing = False
870
+
871
+ # Ignore copy
872
+ def forward(
873
+ self,
874
+ inputs_embeds,
875
+ attention_mask: Optional[torch.Tensor] = None,
876
+ output_attentions: Optional[bool] = None,
877
+ output_hidden_states: Optional[bool] = None,
878
+ return_dict: Optional[bool] = None,
879
+ ) -> Union[Tuple, BaseModelOutput]:
880
+ r"""
881
+ Args:
882
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
883
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
884
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
885
+ than the model's internal embedding lookup matrix.
886
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
887
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
888
+
889
+ - 1 for tokens that are **not masked**,
890
+ - 0 for tokens that are **masked**.
891
+
892
+ [What are attention masks?](../glossary#attention-mask)
893
+ output_attentions (`bool`, *optional*):
894
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
895
+ returned tensors for more detail.
896
+ output_hidden_states (`bool`, *optional*):
897
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
898
+ for more detail.
899
+ return_dict (`bool`, *optional*):
900
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
901
+ """
902
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
903
+ output_hidden_states = (
904
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
905
+ )
906
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
907
+
908
+ encoder_states = () if output_hidden_states else None
909
+ all_attentions = () if output_attentions else None
910
+
911
+ hidden_states = inputs_embeds
912
+ for encoder_layer in self.layers:
913
+ if output_hidden_states:
914
+ encoder_states = encoder_states + (hidden_states,)
915
+ if self.gradient_checkpointing and self.training:
916
+ layer_outputs = self._gradient_checkpointing_func(
917
+ encoder_layer.__call__,
918
+ hidden_states,
919
+ attention_mask,
920
+ output_attentions,
921
+ )
922
+ else:
923
+ layer_outputs = encoder_layer(
924
+ hidden_states,
925
+ attention_mask,
926
+ output_attentions=output_attentions,
927
+ )
928
+
929
+ hidden_states = layer_outputs[0]
930
+
931
+ if output_attentions:
932
+ all_attentions = all_attentions + (layer_outputs[1],)
933
+
934
+ if output_hidden_states:
935
+ encoder_states = encoder_states + (hidden_states,)
936
+
937
+ if not return_dict:
938
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
939
+ return BaseModelOutput(
940
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
941
+ )
942
+
943
+
944
+ class SiglipTextTransformer(nn.Module):
945
+ def __init__(self, config: SiglipTextConfig):
946
+ super().__init__()
947
+ self.config = config
948
+ embed_dim = config.hidden_size
949
+ self.embeddings = SiglipTextEmbeddings(config)
950
+ self.encoder = SiglipEncoder(config)
951
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
952
+
953
+ self.head = nn.Linear(embed_dim, embed_dim)
954
+
955
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
956
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
957
+ def forward(
958
+ self,
959
+ input_ids: Optional[torch.Tensor] = None,
960
+ attention_mask: Optional[torch.Tensor] = None,
961
+ position_ids: Optional[torch.Tensor] = None,
962
+ output_attentions: Optional[bool] = None,
963
+ output_hidden_states: Optional[bool] = None,
964
+ return_dict: Optional[bool] = None,
965
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
966
+ r"""
967
+ Returns:
968
+
969
+ """
970
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
971
+ output_hidden_states = (
972
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
973
+ )
974
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
975
+
976
+ if input_ids is None:
977
+ raise ValueError("You have to specify input_ids")
978
+
979
+ input_shape = input_ids.size()
980
+ input_ids = input_ids.view(-1, input_shape[-1])
981
+
982
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
983
+
984
+ # note: SigLIP's text model does not use a causal mask, unlike the original CLIP model.
985
+ # expand attention_mask
986
+ if attention_mask is not None:
987
+ # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
988
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
989
+
990
+ encoder_outputs = self.encoder(
991
+ inputs_embeds=hidden_states,
992
+ attention_mask=attention_mask,
993
+ output_attentions=output_attentions,
994
+ output_hidden_states=output_hidden_states,
995
+ return_dict=return_dict,
996
+ )
997
+
998
+ last_hidden_state = encoder_outputs[0]
999
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
1000
+
1001
+ # Assuming "sticky" EOS tokenization, last token is always EOS.
1002
+ pooled_output = last_hidden_state[:, -1, :]
1003
+ pooled_output = self.head(pooled_output)
1004
+
1005
+ if not return_dict:
1006
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1007
+
1008
+ return BaseModelOutputWithPooling(
1009
+ last_hidden_state=last_hidden_state,
1010
+ pooler_output=pooled_output,
1011
+ hidden_states=encoder_outputs.hidden_states,
1012
+ attentions=encoder_outputs.attentions,
1013
+ )
1014
+
1015
+
1016
+ @add_start_docstrings(
1017
+ """The text model from SigLIP without any head or projection on top.""",
1018
+ SIGLIP_START_DOCSTRING,
1019
+ )
1020
+ class SiglipTextModel(SiglipPreTrainedModel):
1021
+ config_class = SiglipTextConfig
1022
+
1023
+ _no_split_modules = ["SiglipTextEmbeddings", "SiglipEncoderLayer"]
1024
+
1025
+ def __init__(self, config: SiglipTextConfig):
1026
+ super().__init__(config)
1027
+ self.text_model = SiglipTextTransformer(config)
1028
+ # Initialize weights and apply final processing
1029
+ self.post_init()
1030
+
1031
+ def get_input_embeddings(self) -> nn.Module:
1032
+ return self.text_model.embeddings.token_embedding
1033
+
1034
+ def set_input_embeddings(self, value):
1035
+ self.text_model.embeddings.token_embedding = value
1036
+
1037
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
1038
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
1039
+ def forward(
1040
+ self,
1041
+ input_ids: Optional[torch.Tensor] = None,
1042
+ attention_mask: Optional[torch.Tensor] = None,
1043
+ position_ids: Optional[torch.Tensor] = None,
1044
+ output_attentions: Optional[bool] = None,
1045
+ output_hidden_states: Optional[bool] = None,
1046
+ return_dict: Optional[bool] = None,
1047
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1048
+ r"""
1049
+ Returns:
1050
+
1051
+ Examples:
1052
+
1053
+ ```python
1054
+ >>> from transformers import AutoTokenizer, SiglipTextModel
1055
+
1056
+ >>> model = SiglipTextModel.from_pretrained("google/siglip-base-patch16-224")
1057
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
1058
+
1059
+ >>> # important: make sure to set padding="max_length" as that's how the model was trained
1060
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
1061
+
1062
+ >>> outputs = model(**inputs)
1063
+ >>> last_hidden_state = outputs.last_hidden_state
1064
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1065
+ ```"""
1066
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1067
+
1068
+ return self.text_model(
1069
+ input_ids=input_ids,
1070
+ attention_mask=attention_mask,
1071
+ position_ids=position_ids,
1072
+ output_attentions=output_attentions,
1073
+ output_hidden_states=output_hidden_states,
1074
+ return_dict=return_dict,
1075
+ )
1076
+
1077
+
1078
+ class SiglipVisionTransformer(nn.Module):
1079
+ def __init__(self, config: SiglipVisionConfig):
1080
+ super().__init__()
1081
+ self.config = config
1082
+ embed_dim = config.hidden_size
1083
+
1084
+ self.embeddings = SiglipVisionEmbeddings(config)
1085
+ self.encoder = SiglipEncoder(config)
1086
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1087
+ self.head = SiglipMultiheadAttentionPoolingHead(config)
1088
+
1089
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1090
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
1091
+ def forward(
1092
+ self,
1093
+ pixel_values,
1094
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
1095
+ output_attentions: Optional[bool] = None,
1096
+ output_hidden_states: Optional[bool] = None,
1097
+ return_dict: Optional[bool] = None,
1098
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1099
+ r"""
1100
+ Returns:
1101
+
1102
+ """
1103
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1104
+ output_hidden_states = (
1105
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1106
+ )
1107
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1108
+
1109
+ batch_size = pixel_values.size(0)
1110
+ if patch_attention_mask is None:
1111
+ patch_attention_mask = torch.ones(
1112
+ size=(
1113
+ batch_size,
1114
+ pixel_values.size(2) // self.config.patch_size,
1115
+ pixel_values.size(3) // self.config.patch_size,
1116
+ ),
1117
+ dtype=torch.bool,
1118
+ device=pixel_values.device,
1119
+ )
1120
+
1121
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
1122
+
1123
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
1124
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
1125
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
1126
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
1127
+ if not torch.any(~patch_attention_mask):
1128
+ attention_mask=None
1129
+ else:
1130
+ attention_mask = (
1131
+ _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
1132
+ if not self.config._flash_attn_2_enabled
1133
+ else patch_attention_mask
1134
+ )
1135
+
1136
+ encoder_outputs = self.encoder(
1137
+ inputs_embeds=hidden_states,
1138
+ attention_mask=attention_mask,
1139
+ output_attentions=output_attentions,
1140
+ output_hidden_states=output_hidden_states,
1141
+ return_dict=return_dict,
1142
+ )
1143
+
1144
+ last_hidden_state = encoder_outputs[0]
1145
+ last_hidden_state = self.post_layernorm(last_hidden_state)
1146
+
1147
+ pooled_output = self.head(
1148
+ hidden_state=last_hidden_state,
1149
+ attention_mask=patch_attention_mask,
1150
+ )
1151
+
1152
+ if not return_dict:
1153
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1154
+
1155
+ return BaseModelOutputWithPooling(
1156
+ last_hidden_state=last_hidden_state,
1157
+ pooler_output=pooled_output,
1158
+ hidden_states=encoder_outputs.hidden_states,
1159
+ attentions=encoder_outputs.attentions,
1160
+ )
1161
+
1162
+
1163
+ class SiglipMultiheadAttentionPoolingHead(nn.Module):
1164
+ """Multihead Attention Pooling."""
1165
+
1166
+ def __init__(self, config: SiglipVisionConfig):
1167
+ super().__init__()
1168
+
1169
+ self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
1170
+ self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
1171
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1172
+ self.mlp = SiglipMLP(config)
1173
+
1174
+ def forward(self, hidden_state, attention_mask):
1175
+ batch_size = hidden_state.shape[0]
1176
+ probe = self.probe.repeat(batch_size, 1, 1)
1177
+
1178
+ hidden_state = self.attention(
1179
+ query=probe, key=hidden_state, value=hidden_state, key_padding_mask=~attention_mask
1180
+ )[0]
1181
+
1182
+ residual = hidden_state
1183
+ hidden_state = self.layernorm(hidden_state)
1184
+ hidden_state = residual + self.mlp(hidden_state)
1185
+
1186
+ return hidden_state[:, 0]
1187
+
1188
+
1189
+ @add_start_docstrings(
1190
+ """The vision model from SigLIP without any head or projection on top.""",
1191
+ SIGLIP_START_DOCSTRING,
1192
+ )
1193
+ class SiglipVisionModel(SiglipPreTrainedModel):
1194
+ config_class = SiglipVisionConfig
1195
+ main_input_name = "pixel_values"
1196
+
1197
+ def __init__(self, config: SiglipVisionConfig):
1198
+ super().__init__(config)
1199
+
1200
+ self.vision_model = SiglipVisionTransformer(config)
1201
+
1202
+ # Initialize weights and apply final processing
1203
+ self.post_init()
1204
+
1205
+ def get_input_embeddings(self) -> nn.Module:
1206
+ return self.vision_model.embeddings.patch_embedding
1207
+
1208
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1209
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
1210
+ def forward(
1211
+ self,
1212
+ pixel_values,
1213
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
1214
+ output_attentions: Optional[bool] = None,
1215
+ output_hidden_states: Optional[bool] = None,
1216
+ return_dict: Optional[bool] = None,
1217
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1218
+ r"""
1219
+ Returns:
1220
+
1221
+ Examples:
1222
+
1223
+ ```python
1224
+ >>> from PIL import Image
1225
+ >>> import requests
1226
+ >>> from transformers import AutoProcessor, SiglipVisionModel
1227
+
1228
+ >>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224")
1229
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1230
+
1231
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1232
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1233
+
1234
+ >>> inputs = processor(images=image, return_tensors="pt")
1235
+
1236
+ >>> outputs = model(**inputs)
1237
+ >>> last_hidden_state = outputs.last_hidden_state
1238
+ >>> pooled_output = outputs.pooler_output # pooled features
1239
+ ```"""
1240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1241
+
1242
+ return self.vision_model(
1243
+ pixel_values=pixel_values,
1244
+ patch_attention_mask=patch_attention_mask,
1245
+ output_attentions=output_attentions,
1246
+ output_hidden_states=output_hidden_states,
1247
+ return_dict=return_dict,
1248
+ )
1249
+
1250
+
1251
+ @add_start_docstrings(SIGLIP_START_DOCSTRING)
1252
+ class SiglipModel(SiglipPreTrainedModel):
1253
+ config_class = SiglipConfig
1254
+
1255
+ def __init__(self, config: SiglipConfig):
1256
+ super().__init__(config)
1257
+
1258
+ if not isinstance(config.text_config, SiglipTextConfig):
1259
+ raise ValueError(
1260
+ "config.text_config is expected to be of type SiglipTextConfig but is of type"
1261
+ f" {type(config.text_config)}."
1262
+ )
1263
+
1264
+ if not isinstance(config.vision_config, SiglipVisionConfig):
1265
+ raise ValueError(
1266
+ "config.vision_config is expected to be of type SiglipVisionConfig but is of type"
1267
+ f" {type(config.vision_config)}."
1268
+ )
1269
+
1270
+ text_config = config.text_config
1271
+ vision_config = config.vision_config
1272
+
1273
+ self.text_model = SiglipTextTransformer(text_config)
1274
+ self.vision_model = SiglipVisionTransformer(vision_config)
1275
+
1276
+ self.logit_scale = nn.Parameter(torch.randn(1))
1277
+ self.logit_bias = nn.Parameter(torch.randn(1))
1278
+
1279
+ # Initialize weights and apply final processing
1280
+ self.post_init()
1281
+
1282
+ @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
1283
+ def get_text_features(
1284
+ self,
1285
+ input_ids: Optional[torch.Tensor] = None,
1286
+ attention_mask: Optional[torch.Tensor] = None,
1287
+ position_ids: Optional[torch.Tensor] = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ ) -> torch.FloatTensor:
1292
+ r"""
1293
+ Returns:
1294
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1295
+ applying the projection layer to the pooled output of [`SiglipTextModel`].
1296
+
1297
+ Examples:
1298
+
1299
+ ```python
1300
+ >>> from transformers import AutoTokenizer, AutoModel
1301
+ >>> import torch
1302
+
1303
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1304
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
1305
+
1306
+ >>> # important: make sure to set padding="max_length" as that's how the model was trained
1307
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
1308
+ >>> with torch.no_grad():
1309
+ ... text_features = model.get_text_features(**inputs)
1310
+ ```"""
1311
+ # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1312
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1313
+ output_hidden_states = (
1314
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1315
+ )
1316
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1317
+
1318
+ text_outputs = self.text_model(
1319
+ input_ids=input_ids,
1320
+ attention_mask=attention_mask,
1321
+ position_ids=position_ids,
1322
+ output_attentions=output_attentions,
1323
+ output_hidden_states=output_hidden_states,
1324
+ return_dict=return_dict,
1325
+ )
1326
+
1327
+ pooled_output = text_outputs[1]
1328
+
1329
+ return pooled_output
1330
+
1331
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1332
+ def get_image_features(
1333
+ self,
1334
+ pixel_values: Optional[torch.FloatTensor] = None,
1335
+ output_attentions: Optional[bool] = None,
1336
+ output_hidden_states: Optional[bool] = None,
1337
+ return_dict: Optional[bool] = None,
1338
+ ) -> torch.FloatTensor:
1339
+ r"""
1340
+ Returns:
1341
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1342
+ applying the projection layer to the pooled output of [`SiglipVisionModel`].
1343
+
1344
+ Examples:
1345
+
1346
+ ```python
1347
+ >>> from PIL import Image
1348
+ >>> import requests
1349
+ >>> from transformers import AutoProcessor, AutoModel
1350
+ >>> import torch
1351
+
1352
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1353
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1354
+
1355
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1356
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1357
+
1358
+ >>> inputs = processor(images=image, return_tensors="pt")
1359
+
1360
+ >>> with torch.no_grad():
1361
+ ... image_features = model.get_image_features(**inputs)
1362
+ ```"""
1363
+ # Use SiglipModel's config for some fields (if specified) instead of those of vision & text components.
1364
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1365
+ output_hidden_states = (
1366
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1367
+ )
1368
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1369
+
1370
+ vision_outputs = self.vision_model(
1371
+ pixel_values=pixel_values,
1372
+ output_attentions=output_attentions,
1373
+ output_hidden_states=output_hidden_states,
1374
+ return_dict=return_dict,
1375
+ )
1376
+
1377
+ pooled_output = vision_outputs[1]
1378
+
1379
+ return pooled_output
1380
+
1381
+ @add_start_docstrings_to_model_forward(SIGLIP_INPUTS_DOCSTRING)
1382
+ @replace_return_docstrings(output_type=SiglipOutput, config_class=SiglipConfig)
1383
+ def forward(
1384
+ self,
1385
+ input_ids: Optional[torch.LongTensor] = None,
1386
+ pixel_values: Optional[torch.FloatTensor] = None,
1387
+ attention_mask: Optional[torch.Tensor] = None,
1388
+ position_ids: Optional[torch.LongTensor] = None,
1389
+ return_loss: Optional[bool] = None,
1390
+ output_attentions: Optional[bool] = None,
1391
+ output_hidden_states: Optional[bool] = None,
1392
+ return_dict: Optional[bool] = None,
1393
+ ) -> Union[Tuple, SiglipOutput]:
1394
+ r"""
1395
+ Returns:
1396
+
1397
+ Examples:
1398
+
1399
+ ```python
1400
+ >>> from PIL import Image
1401
+ >>> import requests
1402
+ >>> from transformers import AutoProcessor, AutoModel
1403
+ >>> import torch
1404
+
1405
+ >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
1406
+ >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1407
+
1408
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1409
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1410
+
1411
+ >>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
1412
+ >>> # important: we pass `padding=max_length` since the model was trained with this
1413
+ >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
1414
+
1415
+ >>> with torch.no_grad():
1416
+ ... outputs = model(**inputs)
1417
+
1418
+ >>> logits_per_image = outputs.logits_per_image
1419
+ >>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
1420
+ >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
1421
+ 31.9% that image 0 is 'a photo of 2 cats'
1422
+ ```"""
1423
+ # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1424
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1425
+ output_hidden_states = (
1426
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1427
+ )
1428
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1429
+
1430
+ vision_outputs = self.vision_model(
1431
+ pixel_values=pixel_values,
1432
+ output_attentions=output_attentions,
1433
+ output_hidden_states=output_hidden_states,
1434
+ return_dict=return_dict,
1435
+ )
1436
+
1437
+ text_outputs = self.text_model(
1438
+ input_ids=input_ids,
1439
+ attention_mask=attention_mask,
1440
+ position_ids=position_ids,
1441
+ output_attentions=output_attentions,
1442
+ output_hidden_states=output_hidden_states,
1443
+ return_dict=return_dict,
1444
+ )
1445
+
1446
+ image_embeds = vision_outputs[1]
1447
+ text_embeds = text_outputs[1]
1448
+
1449
+ # normalized features
1450
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1451
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1452
+
1453
+ # cosine similarity as logits
1454
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * self.logit_scale.exp() + self.logit_bias
1455
+ logits_per_image = logits_per_text.t()
1456
+
1457
+ loss = None
1458
+ if return_loss:
1459
+ raise NotImplementedError("SigLIP loss to be implemented")
1460
+
1461
+ if not return_dict:
1462
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1463
+ return ((loss,) + output) if loss is not None else output
1464
+
1465
+ return SiglipOutput(
1466
+ loss=loss,
1467
+ logits_per_image=logits_per_image,
1468
+ logits_per_text=logits_per_text,
1469
+ text_embeds=text_embeds,
1470
+ image_embeds=image_embeds,
1471
+ text_model_output=text_outputs,
1472
+ vision_model_output=vision_outputs,
1473
+ )
processing_siglip.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for SigLIP.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.image_utils import ImageInput
23
+ from transformers.processing_utils import ProcessorMixin
24
+ from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
25
+ from transformers.utils import TensorType
26
+
27
+
28
+ class SiglipProcessor(ProcessorMixin):
29
+ r"""
30
+ Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor.
31
+
32
+ [`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the
33
+ [`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ image_processor ([`SiglipImageProcessor`]):
37
+ The image processor is a required input.
38
+ tokenizer ([`SiglipTokenizer`]):
39
+ The tokenizer is a required input.
40
+ """
41
+
42
+ attributes = ["image_processor", "tokenizer"]
43
+ image_processor_class = "SiglipImageProcessor"
44
+ tokenizer_class = "SiglipTokenizer"
45
+
46
+ def __init__(self, image_processor, tokenizer):
47
+ super().__init__(image_processor, tokenizer)
48
+
49
+ def __call__(
50
+ self,
51
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
52
+ images: ImageInput = None,
53
+ padding: Union[bool, str, PaddingStrategy] = False,
54
+ truncation: Union[bool, str, TruncationStrategy] = None,
55
+ max_length: int = None,
56
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
57
+ ) -> BatchFeature:
58
+ """
59
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
60
+ and `kwargs` arguments to SiglipTokenizer's [`~SiglipTokenizer.__call__`] if `text` is not `None` to encode
61
+ the text. To prepare the image(s), this method forwards the `images` argument to
62
+ SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
63
+ of the above two methods for more information.
64
+
65
+ Args:
66
+ text (`str`, `List[str]`, `List[List[str]]`):
67
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
68
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
69
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
70
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
71
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
72
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
73
+ number of channels, H and W are image height and width.
74
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
75
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
76
+ index) among:
77
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
78
+ sequence if provided).
79
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
80
+ acceptable input length for the model if that argument is not provided.
81
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
82
+ lengths).
83
+ max_length (`int`, *optional*):
84
+ Maximum length of the returned list and optionally padding length (see above).
85
+ truncation (`bool`, *optional*):
86
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
87
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
88
+ If set, will return tensors of a particular framework. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return NumPy `np.ndarray` objects.
93
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
94
+
95
+ Returns:
96
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
97
+
98
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
99
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
100
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
101
+ `None`).
102
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
103
+ """
104
+
105
+ if text is None and images is None:
106
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
107
+
108
+ if text is not None:
109
+ encoding = self.tokenizer(
110
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
111
+ )
112
+
113
+ if images is not None:
114
+ image_features = self.image_processor(images, return_tensors=return_tensors)
115
+
116
+ if text is not None and images is not None:
117
+ encoding["pixel_values"] = image_features.pixel_values
118
+ return encoding
119
+ elif text is not None:
120
+ return encoding
121
+ else:
122
+ return BatchFeature(data=dict(**image_features), tensor_type=return_tensors)
123
+
124
+ def decode(self, *args, **kwargs):
125
+ """
126
+ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
127
+ the docstring of this method for more information.
128
+ """
129
+ return self.tokenizer.decode(*args, **kwargs)
130
+
131
+ def batch_decode(self, *args, **kwargs):
132
+ """
133
+ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
134
+ refer to the docstring of this method for more information.
135
+ """
136
+ return self.tokenizer.batch_decode(*args, **kwargs)
137
+
138
+ @property
139
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Siglip, T5->Siglip
140
+ def model_input_names(self):
141
+ tokenizer_input_names = self.tokenizer.model_input_names
142
+ image_processor_input_names = self.image_processor.model_input_names
143
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
tokenization_siglip.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for SigLIP model."""
16
+
17
+ import os
18
+ import re
19
+ import string
20
+ import warnings
21
+ from shutil import copyfile
22
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
23
+
24
+ import sentencepiece as spm
25
+
26
+ from transformers.convert_slow_tokenizer import import_protobuf
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.tokenization_utils_base import AddedToken
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ from transformers.tokenization_utils_base import TextInput
33
+ from transformers.utils import logging, requires_backends
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
39
+
40
+ PRETRAINED_VOCAB_FILES_MAP = {
41
+ "vocab_file": {
42
+ "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/spiece.model",
43
+ }
44
+ }
45
+
46
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
47
+ "google/siglip-base-patch16-224": 256,
48
+ }
49
+
50
+ SPIECE_UNDERLINE = "▁"
51
+
52
+
53
+ class SiglipTokenizer(PreTrainedTokenizer):
54
+ """
55
+ Construct a Siglip tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
63
+ contains the vocabulary necessary to instantiate a tokenizer.
64
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
65
+ The end of sequence token.
66
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
67
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
68
+ token instead.
69
+ pad_token (`str`, *optional*, defaults to `"</s>"`):
70
+ The token used for padding, for example when batching sequences of different lengths.
71
+ additional_special_tokens (`List[str]`, *optional*):
72
+ Additional special tokens used by the tokenizer.
73
+ sp_model_kwargs (`dict`, *optional*):
74
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
75
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
76
+ to set:
77
+
78
+ - `enable_sampling`: Enable subword regularization.
79
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
80
+
81
+ - `nbest_size = {0,1}`: No sampling is performed.
82
+ - `nbest_size > 1`: samples from the nbest_size results.
83
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
84
+ using forward-filtering-and-backward-sampling algorithm.
85
+
86
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
87
+ BPE-dropout.
88
+ model_max_length (`int`, *optional*, defaults to 64):
89
+ The maximum length (in number of tokens) for model inputs.
90
+ do_lower_case (`bool`, *optional*, defaults to `True`):
91
+ Whether or not to lowercase the input when tokenizing.
92
+ """
93
+
94
+ vocab_files_names = VOCAB_FILES_NAMES
95
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
96
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
97
+ model_input_names = ["input_ids", "attention_mask"]
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file,
102
+ eos_token="</s>",
103
+ unk_token="<unk>",
104
+ pad_token="</s>",
105
+ additional_special_tokens=None,
106
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
107
+ model_max_length=64,
108
+ do_lower_case=True,
109
+ **kwargs,
110
+ ) -> None:
111
+ requires_backends(self, "protobuf")
112
+
113
+ pad_token = (
114
+ AddedToken(pad_token, rstrip=True, lstrip=True, normalized=False, special=True)
115
+ if isinstance(pad_token, str)
116
+ else pad_token
117
+ )
118
+ unk_token = (
119
+ AddedToken(unk_token, rstrip=True, lstrip=True, normalized=False, special=True)
120
+ if isinstance(unk_token, str)
121
+ else unk_token
122
+ )
123
+ eos_token = (
124
+ AddedToken(eos_token, rstrip=True, lstrip=True, normalized=False, special=True)
125
+ if isinstance(eos_token, str)
126
+ else eos_token
127
+ )
128
+
129
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
130
+
131
+ self.do_lower_case = do_lower_case
132
+ self.vocab_file = vocab_file
133
+
134
+ self.sp_model = self.get_spm_processor()
135
+ self.vocab_file = vocab_file
136
+
137
+ super().__init__(
138
+ eos_token=eos_token,
139
+ unk_token=unk_token,
140
+ pad_token=pad_token,
141
+ additional_special_tokens=additional_special_tokens,
142
+ sp_model_kwargs=self.sp_model_kwargs,
143
+ model_max_length=model_max_length,
144
+ do_lower_case=do_lower_case,
145
+ **kwargs,
146
+ )
147
+
148
+ def get_spm_processor(self):
149
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
150
+ with open(self.vocab_file, "rb") as f:
151
+ sp_model = f.read()
152
+ model_pb2 = import_protobuf()
153
+ model = model_pb2.ModelProto.FromString(sp_model)
154
+ normalizer_spec = model_pb2.NormalizerSpec()
155
+ normalizer_spec.add_dummy_prefix = False
156
+ model.normalizer_spec.MergeFrom(normalizer_spec)
157
+ sp_model = model.SerializeToString()
158
+ tokenizer.LoadFromSerializedProto(sp_model)
159
+ return tokenizer
160
+
161
+ @property
162
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.vocab_size
163
+ def vocab_size(self):
164
+ return self.sp_model.get_piece_size()
165
+
166
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_vocab
167
+ def get_vocab(self):
168
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
169
+ vocab.update(self.added_tokens_encoder)
170
+ return vocab
171
+
172
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_special_tokens_mask
173
+ def get_special_tokens_mask(
174
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
175
+ ) -> List[int]:
176
+ """
177
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
178
+ special tokens using the tokenizer `prepare_for_model` method.
179
+
180
+ Args:
181
+ token_ids_0 (`List[int]`):
182
+ List of IDs.
183
+ token_ids_1 (`List[int]`, *optional*):
184
+ Optional second list of IDs for sequence pairs.
185
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
186
+ Whether or not the token list is already formatted with special tokens for the model.
187
+
188
+ Returns:
189
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
190
+ """
191
+ if already_has_special_tokens:
192
+ return super().get_special_tokens_mask(
193
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
194
+ )
195
+
196
+ # normal case: some special tokens
197
+ if token_ids_1 is None:
198
+ return ([0] * len(token_ids_0)) + [1]
199
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
200
+
201
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._add_eos_if_not_present
202
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
203
+ """Do not add eos again if user already added it."""
204
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
205
+ warnings.warn(
206
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
207
+ " eos tokens being added."
208
+ )
209
+ return token_ids
210
+ else:
211
+ return token_ids + [self.eos_token_id]
212
+
213
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.create_token_type_ids_from_sequences
214
+ def create_token_type_ids_from_sequences(
215
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
216
+ ) -> List[int]:
217
+ """
218
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
219
+ use of token type ids, therefore a list of zeros is returned.
220
+
221
+ Args:
222
+ token_ids_0 (`List[int]`):
223
+ List of IDs.
224
+ token_ids_1 (`List[int]`, *optional*):
225
+ Optional second list of IDs for sequence pairs.
226
+
227
+ Returns:
228
+ `List[int]`: List of zeros.
229
+ """
230
+ eos = [self.eos_token_id]
231
+
232
+ if token_ids_1 is None:
233
+ return len(token_ids_0 + eos) * [0]
234
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
235
+
236
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.build_inputs_with_special_tokens
237
+ def build_inputs_with_special_tokens(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
242
+ adding special tokens. A sequence has the following format:
243
+
244
+ - single sequence: `X </s>`
245
+ - pair of sequences: `A </s> B </s>`
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs to which the special tokens will be added.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
255
+ """
256
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
257
+ if token_ids_1 is None:
258
+ return token_ids_0
259
+ else:
260
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
261
+ return token_ids_0 + token_ids_1
262
+
263
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__getstate__
264
+ def __getstate__(self):
265
+ state = self.__dict__.copy()
266
+ state["sp_model"] = None
267
+ return state
268
+
269
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__setstate__
270
+ def __setstate__(self, d):
271
+ self.__dict__ = d
272
+
273
+ # for backward compatibility
274
+ if not hasattr(self, "sp_model_kwargs"):
275
+ self.sp_model_kwargs = {}
276
+
277
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
278
+ self.sp_model.Load(self.vocab_file)
279
+
280
+ def remove_punctuation(self, text: str) -> str:
281
+ return text.translate(str.maketrans("", "", string.punctuation))
282
+
283
+ # source: https://github.com/google-research/big_vision/blob/3b8e5ab6ad4f96e32b32826f9e1b8fd277914f9c/big_vision/evaluators/proj/image_text/prompt_engineering.py#L94
284
+ def canonicalize_text(self, text, *, keep_punctuation_exact_string=None):
285
+ """Returns canonicalized `text` (puncuation removed).
286
+
287
+ Args:
288
+ text (`str`):
289
+ String to be canonicalized.
290
+ keep_punctuation_exact_string (`str`, *optional*):
291
+ If provided, then this exact string is kept. For example providing '{}' will keep any occurrences of '{}'
292
+ (but will still remove '{' and '}' that appear separately).
293
+ """
294
+ if keep_punctuation_exact_string:
295
+ text = keep_punctuation_exact_string.join(
296
+ self.remove_punctuation(part) for part in text.split(keep_punctuation_exact_string)
297
+ )
298
+ else:
299
+ text = self.remove_punctuation(text)
300
+ text = re.sub(r"\s+", " ", text)
301
+ text = text.strip()
302
+
303
+ return text
304
+
305
+ def tokenize(self, text: "TextInput", add_special_tokens=False, **kwargs) -> List[str]:
306
+ """
307
+ Converts a string to a list of tokens.
308
+ """
309
+ tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " "), **kwargs)
310
+
311
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
312
+ tokens = tokens[1:]
313
+ return tokens
314
+
315
+ @property
316
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.unk_token_length
317
+ def unk_token_length(self):
318
+ return len(self.sp_model.encode(str(self.unk_token)))
319
+
320
+ def _tokenize(self, text, **kwargs):
321
+ """
322
+ Returns a tokenized string.
323
+
324
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
325
+ SPIECE_UNDERLINE.
326
+
327
+ For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`.
328
+
329
+ Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
330
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
331
+ """
332
+ text = self.canonicalize_text(text, keep_punctuation_exact_string=None)
333
+ tokens = self.sp_model.encode(text, out_type=str)
334
+
335
+ # 1. Encode string + prefix ex: "<unk> Hey"
336
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
337
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
338
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
339
+
340
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._convert_token_to_id
341
+ def _convert_token_to_id(self, token):
342
+ """Converts a token (str) in an id using the vocab."""
343
+ return self.sp_model.piece_to_id(token)
344
+
345
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._convert_id_to_token
346
+ def _convert_id_to_token(self, index):
347
+ """Converts an index (integer) in a token (str) using the vocab."""
348
+ token = self.sp_model.IdToPiece(index)
349
+ return token
350
+
351
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.convert_tokens_to_string
352
+ def convert_tokens_to_string(self, tokens):
353
+ """Converts a sequence of tokens (string) in a single string."""
354
+ current_sub_tokens = []
355
+ # since we manually add the prefix space, we have to remove it
356
+ tokens[0] = tokens[0].lstrip(SPIECE_UNDERLINE)
357
+ out_string = ""
358
+ prev_is_special = False
359
+ for token in tokens:
360
+ # make sure that special tokens are not decoded using sentencepiece model
361
+ if token in self.all_special_tokens:
362
+ if not prev_is_special:
363
+ out_string += " "
364
+ out_string += self.sp_model.decode(current_sub_tokens) + token
365
+ prev_is_special = True
366
+ current_sub_tokens = []
367
+ else:
368
+ current_sub_tokens.append(token)
369
+ prev_is_special = False
370
+ out_string += self.sp_model.decode(current_sub_tokens)
371
+ return out_string.strip()
372
+
373
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.save_vocabulary
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+
382
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
383
+ copyfile(self.vocab_file, out_vocab_file)
384
+ elif not os.path.isfile(self.vocab_file):
385
+ with open(out_vocab_file, "wb") as fi:
386
+ content_spiece_model = self.sp_model.serialized_model_proto()
387
+ fi.write(content_spiece_model)
388
+
389
+ return (out_vocab_file,)