text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
---|---|---|---|
""" Tensorflow Preprocessing Adapter
Allows use of Tensorflow preprocessing pipeline in PyTorch Transform
Copyright of original Tensorflow code below.
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for MnasNet."""
import tensorflow.compat.v1 as tf
import numpy as np
IMAGE_SIZE = 224
CROP_PADDING = 32
tf.compat.v1.disable_eager_execution()
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size, resize_method):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0])
return image
def _decode_and_center_crop(image_bytes, image_size, resize_method):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], resize_method)[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE,
interpolation='bicubic'):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)
class TfPreprocessTransform:
def __init__(self, is_training=False, size=224, interpolation='bicubic'):
self.is_training = is_training
self.size = size[0] if isinstance(size, tuple) else size
self.interpolation = interpolation
self._image_bytes = None
self.process_image = self._build_tf_graph()
self.sess = None
def _build_tf_graph(self):
with tf.device('/cpu:0'):
self._image_bytes = tf.placeholder(
shape=[],
dtype=tf.string,
)
img = preprocess_image(
self._image_bytes, self.is_training, False, self.size, self.interpolation)
return img
def __call__(self, image_bytes):
if self.sess is None:
self.sess = tf.Session()
img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})
img = img.round().clip(0, 255).astype(np.uint8)
if img.ndim < 3:
img = np.expand_dims(img, axis=-1)
img = np.rollaxis(img, 2) # HWC to CHW
return img
| pytorch-image-models/timm/data/tf_preprocessing.py/0 | {
"file_path": "pytorch-image-models/timm/data/tf_preprocessing.py",
"repo_id": "pytorch-image-models",
"token_count": 3775
} | 200 |
""" Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .config import is_exportable, is_scriptable
from .padding import pad_same, pad_same_arg, get_padding_value
_USE_EXPORT_CONV = False
def conv2d_same(
x,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
groups: int = 1,
):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
def forward(self, x):
return conv2d_same(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
class Conv2dSameExport(nn.Conv2d):
""" ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
NOTE: This does not currently work with torch.jit.script
"""
# pylint: disable=unused-argument
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSameExport, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
self.pad = None
self.pad_input_size = (0, 0)
def forward(self, x):
input_size = x.size()[-2:]
if self.pad is None:
pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation)
self.pad = nn.ZeroPad2d(pad_arg)
self.pad_input_size = input_size
x = self.pad(x)
return F.conv2d(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
if _USE_EXPORT_CONV and is_exportable():
# older PyTorch ver needed this to export same padding reasonably
assert not is_scriptable() # Conv2DSameExport does not work with jit
return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs)
else:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
| pytorch-image-models/timm/layers/conv2d_same.py/0 | {
"file_path": "pytorch-image-models/timm/layers/conv2d_same.py",
"repo_id": "pytorch-image-models",
"token_count": 1560
} | 201 |
""" Global Response Normalization Module
Based on the GRN layer presented in
`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
This implementation
* works for both NCHW and NHWC tensor layouts
* uses affine param names matching existing torch norm layers
* slightly improves eager mode performance via fused addcmul
Hacked together by / Copyright 2023 Ross Wightman
"""
import torch
from torch import nn as nn
class GlobalResponseNorm(nn.Module):
""" Global Response Normalization layer
"""
def __init__(self, dim, eps=1e-6, channels_last=True):
super().__init__()
self.eps = eps
if channels_last:
self.spatial_dim = (1, 2)
self.channel_dim = -1
self.wb_shape = (1, 1, 1, -1)
else:
self.spatial_dim = (2, 3)
self.channel_dim = 1
self.wb_shape = (1, -1, 1, 1)
self.weight = nn.Parameter(torch.zeros(dim))
self.bias = nn.Parameter(torch.zeros(dim))
def forward(self, x):
x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True)
x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps)
return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
| pytorch-image-models/timm/layers/grn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/grn.py",
"repo_id": "pytorch-image-models",
"token_count": 565
} | 202 |
""" Padding Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from typing import List, Tuple, Union
import torch
import torch.nn.functional as F
from .helpers import to_2tuple
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> Union[int, List[int]]:
if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
return [get_padding(*a) for a in zip(kernel_size, stride, dilation)]
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int):
if isinstance(x, torch.Tensor):
return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0)
else:
return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)])
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
def pad_same_arg(
input_size: List[int],
kernel_size: List[int],
stride: List[int],
dilation: List[int] = (1, 1),
) -> List[int]:
ih, iw = input_size
kh, kw = kernel_size
pad_h = get_same_padding(ih, kh, stride[0], dilation[0])
pad_w = get_same_padding(iw, kw, stride[1], dilation[1])
return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(
x,
kernel_size: List[int],
stride: List[int],
dilation: List[int] = (1, 1),
value: float = 0,
):
ih, iw = x.size()[-2:]
pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0])
pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1])
x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value)
return x
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == 'same':
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == 'valid':
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
| pytorch-image-models/timm/layers/padding.py/0 | {
"file_path": "pytorch-image-models/timm/layers/padding.py",
"repo_id": "pytorch-image-models",
"token_count": 1439
} | 203 |
from typing import Callable, Tuple, Type, Union
import torch
LayerType = Union[str, Callable, Type[torch.nn.Module]]
PadType = Union[str, int, Tuple[int, int]]
| pytorch-image-models/timm/layers/typing.py/0 | {
"file_path": "pytorch-image-models/timm/layers/typing.py",
"repo_id": "pytorch-image-models",
"token_count": 55
} | 204 |
import collections.abc
import math
import re
from collections import defaultdict
from itertools import chain
from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union
import torch
from torch import nn as nn
from torch.utils.checkpoint import checkpoint
__all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv',
'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq']
def model_parameters(model: nn.Module, exclude_head: bool = False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(
fn: Callable,
module: nn.Module, name='',
depth_first: bool = True,
include_root: bool = False,
) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
def named_modules_with_params(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if module._parameters and not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules_with_params(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if module._parameters and depth_first and include_root:
yield name, module
MATCH_PREV_GROUP = (99999,)
def group_with_matcher(
named_objects: Iterator[Tuple[str, Any]],
group_matcher: Union[Dict, Callable],
return_values: bool = False,
reverse: bool = False
):
if isinstance(group_matcher, dict):
# dictionary matcher contains a dict of raw-string regex expr that must be compiled
compiled = []
for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()):
if mspec is None:
continue
# map all matching specifications into 3-tuple (compiled re, prefix, suffix)
if isinstance(mspec, (tuple, list)):
# multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix)
for sspec in mspec:
compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])]
else:
compiled += [(re.compile(mspec), (group_ordinal,), None)]
group_matcher = compiled
def _get_grouping(name):
if isinstance(group_matcher, (list, tuple)):
for match_fn, prefix, suffix in group_matcher:
r = match_fn.match(name)
if r:
parts = (prefix, r.groups(), suffix)
# map all tuple elem to int for numeric sort, filter out None entries
return tuple(map(float, chain.from_iterable(filter(None, parts))))
return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal
else:
ord = group_matcher(name)
if not isinstance(ord, collections.abc.Iterable):
return ord,
return tuple(ord)
# map layers into groups via ordinals (ints or tuples of ints) from matcher
grouping = defaultdict(list)
for k, v in named_objects:
grouping[_get_grouping(k)].append(v if return_values else k)
# remap to integers
layer_id_to_param = defaultdict(list)
lid = -1
for k in sorted(filter(lambda x: x is not None, grouping.keys())):
if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]:
lid += 1
layer_id_to_param[lid].extend(grouping[k])
if reverse:
assert not return_values, "reverse mapping only sensible for name output"
# output reverse mapping
param_to_layer_id = {}
for lid, lm in layer_id_to_param.items():
for n in lm:
param_to_layer_id[n] = lid
return param_to_layer_id
return layer_id_to_param
def group_parameters(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse)
def group_modules(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse)
def flatten_modules(
named_modules: Iterator[Tuple[str, nn.Module]],
depth: int = 1,
prefix: Union[str, Tuple[str, ...]] = '',
module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential',
):
prefix_is_tuple = isinstance(prefix, tuple)
if isinstance(module_types, str):
if module_types == 'container':
module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict)
else:
module_types = (nn.Sequential,)
for name, module in named_modules:
if depth and isinstance(module, module_types):
yield from flatten_modules(
module.named_children(),
depth - 1,
prefix=(name,) if prefix_is_tuple else name,
module_types=module_types,
)
else:
if prefix_is_tuple:
name = prefix + (name,)
yield name, module
else:
if prefix:
name = '.'.join([prefix, name])
yield name, module
def checkpoint_seq(
functions,
x,
every=1,
flatten=False,
skip_last=False,
preserve_rng_state=True
):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a sequence into segments
and checkpoint each segment. All segments except run in :func:`torch.no_grad`
manner, i.e., not storing the intermediate activations. The inputs of each
checkpointed segment will be saved for re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing currently only supports :func:`torch.autograd.backward`
and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`
is not supported.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.
x: A Tensor that is input to :attr:`functions`
every: checkpoint every-n functions (default: 1)
flatten (bool): flatten nn.Sequential of nn.Sequentials
skip_last (bool): skip checkpointing the last function in the sequence if True
preserve_rng_state (bool, optional, default=True): Omit stashing and restoring
the RNG state during each checkpoint.
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_seq(model, input_var, every=2)
"""
def run_function(start, end, functions):
def forward(_x):
for j in range(start, end + 1):
_x = functions[j](_x)
return _x
return forward
if isinstance(functions, torch.nn.Sequential):
functions = functions.children()
if flatten:
functions = chain.from_iterable(functions)
if not isinstance(functions, (tuple, list)):
functions = tuple(functions)
num_checkpointed = len(functions)
if skip_last:
num_checkpointed -= 1
end = -1
for start in range(0, num_checkpointed, every):
end = min(start + every - 1, num_checkpointed - 1)
x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)
if skip_last:
return run_function(end + 1, len(functions) - 1, functions)(x)
return x
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
| pytorch-image-models/timm/models/_manipulate.py/0 | {
"file_path": "pytorch-image-models/timm/models/_manipulate.py",
"repo_id": "pytorch-image-models",
"token_count": 4393
} | 205 |
""" ConvNeXt
Papers:
* `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
@Article{liu2022convnet,
author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie},
title = {A ConvNet for the 2020s},
journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2022},
}
* `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
@article{Woo2023ConvNeXtV2,
title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders},
author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie},
year={2023},
journal={arXiv preprint arXiv:2301.00808},
}
Original code and weights from:
* https://github.com/facebookresearch/ConvNeXt, original copyright below
* https://github.com/facebookresearch/ConvNeXt-V2, original copyright below
Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals.
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# ConvNeXt
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
# ConvNeXt-V2
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0))
# No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially.
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \
LayerNorm2d, LayerNorm, create_conv2d, get_act_layer, make_divisible, to_ntuple
from timm.layers import NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this
class Downsample(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1):
super().__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
if in_chs != out_chs:
self.conv = create_conv2d(in_chs, out_chs, 1, stride=1)
else:
self.conv = nn.Identity()
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Union[int, Tuple[int, int]] = (1, 1),
mlp_ratio: float = 4,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
ls_init_value: Optional[float] = 1e-6,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Callable] = None,
drop_path: float = 0.,
):
"""
Args:
in_chs: Block input channels.
out_chs: Block output channels (same as in_chs if None).
kernel_size: Depthwise convolution kernel size.
stride: Stride of depthwise convolution.
dilation: Tuple specifying input and output dilation of block.
mlp_ratio: MLP expansion ratio.
conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True.
conv_bias: Apply bias for all convolution (linear) layers.
use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2)
ls_init_value: Layer-scale init values, layer-scale applied if not None.
act_layer: Activation layer.
norm_layer: Normalization layer (defaults to LN if not specified).
drop_path: Stochastic depth probability.
"""
super().__init__()
out_chs = out_chs or in_chs
dilation = to_ntuple(2)(dilation)
act_layer = get_act_layer(act_layer)
if not norm_layer:
norm_layer = LayerNorm2d if conv_mlp else LayerNorm
mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp)
self.use_conv_mlp = conv_mlp
self.conv_dw = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation[0],
depthwise=True,
bias=conv_bias,
)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0])
else:
self.shortcut = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + self.shortcut(shortcut)
return x
class ConvNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=7,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
conv_mlp=False,
conv_bias=True,
use_grn=False,
act_layer='gelu',
norm_layer=None,
norm_layer_cl=None
):
super().__init__()
self.grad_checkpointing = False
if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]:
ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1
pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used
self.downsample = nn.Sequential(
norm_layer(in_chs),
create_conv2d(
in_chs,
out_chs,
kernel_size=ds_ks,
stride=stride,
dilation=dilation[0],
padding=pad,
bias=conv_bias,
),
)
in_chs = out_chs
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(ConvNeXtBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=kernel_size,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer if conv_mlp else norm_layer_cl,
))
in_chs = out_chs
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
"""
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
output_stride: int = 32,
depths: Tuple[int, ...] = (3, 3, 9, 3),
dims: Tuple[int, ...] = (96, 192, 384, 768),
kernel_sizes: Union[int, Tuple[int, ...]] = 7,
ls_init_value: Optional[float] = 1e-6,
stem_type: str = 'patch',
patch_size: int = 4,
head_init_scale: float = 1.,
head_norm_first: bool = False,
head_hidden_size: Optional[int] = None,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Union[str, Callable]] = None,
norm_eps: Optional[float] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
depths: Number of blocks at each stage.
dims: Feature dimension at each stage.
kernel_sizes: Depthwise convolution kernel-sizes for each stage.
ls_init_value: Init value for Layer Scale, disabled if None.
stem_type: Type of stem.
patch_size: Stem patch size for patch stem.
head_init_scale: Init scaling value for classifier weights and biases.
head_norm_first: Apply normalization before global pool + head.
head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False.
conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last.
conv_bias: Use bias layers w/ all convolutions.
use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP.
act_layer: Activation layer type.
norm_layer: Normalization layer type.
drop_rate: Head pre-classifier dropout rate.
drop_path_rate: Stochastic depth drop rate.
"""
super().__init__()
assert output_stride in (8, 16, 32)
kernel_sizes = to_ntuple(4)(kernel_sizes)
if norm_layer is None:
norm_layer = LayerNorm2d
norm_layer_cl = norm_layer if conv_mlp else LayerNorm
if norm_eps is not None:
norm_layer = partial(norm_layer, eps=norm_eps)
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
norm_layer_cl = norm_layer
if norm_eps is not None:
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
assert stem_type in ('patch', 'overlap', 'overlap_tiered')
if stem_type == 'patch':
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias),
norm_layer(dims[0]),
)
stem_stride = patch_size
else:
mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0]
self.stem = nn.Sequential(
nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias),
norm_layer(dims[0]),
)
stem_stride = 4
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
prev_chs = dims[0]
curr_stride = stem_stride
dilation = 1
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(4):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
stages.append(ConvNeXtStage(
prev_chs,
out_chs,
kernel_size=kernel_sizes[i],
stride=stride,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = self.head_hidden_size = prev_chs
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
if head_norm_first:
assert not head_hidden_size
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
act_layer='gelu',
)
self.head_hidden_size = self.head.num_features
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
# forward pass
feat_idx = 0 # stem is index 0
x = self.stem(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index]
for stage in stages:
feat_idx += 1
x = stage(x)
if feat_idx in take_indices:
# NOTE not bothering to apply norm_pre when norm=True as almost no models have it enabled
intermediates.append(x)
if intermediates_only:
return intermediates
x = self.norm_pre(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm_pre = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
nn.init.zeros_(module.bias)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
if 'visual.trunk.stem.0.weight' in state_dict:
out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')}
if 'visual.head.proj.weight' in state_dict:
out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0])
elif 'visual.head.mlp.fc1.weight' in state_dict:
out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight']
out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias']
out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0])
return out_dict
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
if 'grn' in k:
k = k.replace('grn.beta', 'mlp.grn.bias')
k = k.replace('grn.gamma', 'mlp.grn.weight')
v = v.reshape(v.shape[-1])
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_convnext(variant, pretrained=False, **kwargs):
if kwargs.get('pretrained_cfg', '') == 'fcmae':
# NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`)
# This is workaround loading with num_classes=0 w/o removing norm-layer.
kwargs.setdefault('pretrained_strict', False)
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
def _cfgv2(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808',
'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders',
'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2',
**kwargs
}
default_cfgs = generate_default_cfgs({
# timm specific variants
'convnext_tiny.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_atto.d2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_atto_ols.a2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_femto.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_femto_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano_ols.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny_hnf.a2h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_nano.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_tiny.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_small.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_tiny.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_xlarge.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_tiny.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_small.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_base.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_large.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_xlarge.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt",
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'),
'convnextv2_atto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_femto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_pico.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_nano.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_huge.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_atto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_femto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_pico.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_nano.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_tiny.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_base.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_large.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_huge.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_small.untrained': _cfg(),
# CLIP weights, fine-tuned on in1k or in12k + in1k
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0
),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'
),
'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
# CLIP original image tower weights
'convnext_base.clip_laion2b': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laion2b_augreg': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_320': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_augreg_320': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_large_mlp.clip_laion2b_augreg': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_320': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_xxlarge.clip_laion2b_soup': _cfg(
hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
'convnext_xxlarge.clip_laion2b_rewind': _cfg(
hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
})
@register_model
def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True)
model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True)
model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True)
model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with overlapping 3x3 conv stem
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True)
model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt:
# experimental nano variant with overlapping conv stem
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap')
model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt:
# experimental tiny variant with norm before pooling in head (head norm first)
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True)
model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768))
model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768])
model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024])
model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536])
model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536)
model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048])
model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5))
model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(
depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(
depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k',
'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k',
'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k',
'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k',
'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k',
'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384',
'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384',
'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384',
'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384',
'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384',
'convnext_tiny_in22k': 'convnext_tiny.fb_in22k',
'convnext_small_in22k': 'convnext_small.fb_in22k',
'convnext_base_in22k': 'convnext_base.fb_in22k',
'convnext_large_in22k': 'convnext_large.fb_in22k',
'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k',
})
| pytorch-image-models/timm/models/convnext.py/0 | {
"file_path": "pytorch-image-models/timm/models/convnext.py",
"repo_id": "pytorch-image-models",
"token_count": 25705
} | 206 |
# FastViT for PyTorch
#
# Original implementation and weights from https://github.com/apple/ml-fastvit
#
# For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main
# Original work is copyright (C) 2023 Apple Inc. All Rights Reserved.
#
import os
from functools import partial
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \
ClassifierHead
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['FastVit']
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MobileOneBlock(nn.Module):
"""MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
inference_mode: bool = False,
use_se: bool = False,
use_act: bool = True,
use_scale_branch: bool = True,
num_conv_branches: int = 1,
act_layer: nn.Module = nn.GELU,
) -> None:
"""Construct a MobileOneBlock module.
Args:
in_chs: Number of channels in the input.
out_chs: Number of channels produced by the block.
kernel_size: Size of the convolution kernel.
stride: Stride size.
dilation: Kernel dilation factor.
group_size: Convolution group size.
inference_mode: If True, instantiates model in inference mode.
use_se: Whether to use SE-ReLU activations.
use_act: Whether to use activation. Default: ``True``
use_scale_branch: Whether to use scale branch. Default: ``True``
num_conv_branches: Number of linear conv branches.
"""
super(MobileOneBlock, self).__init__()
self.inference_mode = inference_mode
self.groups = num_groups(group_size, in_chs)
self.stride = stride
self.dilation = dilation
self.kernel_size = kernel_size
self.in_chs = in_chs
self.out_chs = out_chs
self.num_conv_branches = num_conv_branches
# Check if SE-ReLU is requested
self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity()
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=self.groups,
bias=True,
)
else:
# Re-parameterizable skip connection
self.reparam_conv = None
self.identity = (
nn.BatchNorm2d(num_features=in_chs)
if out_chs == in_chs and stride == 1
else None
)
# Re-parameterizable conv branches
if num_conv_branches > 0:
self.conv_kxk = nn.ModuleList([
ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
) for _ in range(self.num_conv_branches)
])
else:
self.conv_kxk = None
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1 and use_scale_branch:
self.conv_scale = ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=1,
stride=self.stride,
groups=self.groups,
apply_act=False
)
self.act = act_layer() if use_act else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Apply forward pass."""
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.se(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Identity branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other kxk conv branches
out = scale_out + identity_out
if self.conv_kxk is not None:
for rc in self.conv_kxk:
out += rc(x)
return self.act(self.se(out))
def reparameterize(self):
"""Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = create_conv2d(
in_channels=self.in_chs,
out_channels=self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("conv_kxk")
self.__delattr__("conv_scale")
if hasattr(self, "identity"):
self.__delattr__("identity")
self.inference_mode = True
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.kernel_size // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
if self.conv_kxk is not None:
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(
self, branch: Union[nn.Sequential, nn.BatchNorm2d]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with preceeding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
Args:
branch: Sequence of ops to be fused.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, "id_tensor"):
input_dim = self.in_chs // self.groups
kernel_value = torch.zeros(
(self.in_chs, input_dim, self.kernel_size, self.kernel_size),
dtype=branch.weight.dtype,
device=branch.weight.device,
)
for i in range(self.in_chs):
kernel_value[
i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2
] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class ReparamLargeKernelConv(nn.Module):
"""Building Block of RepLKNet
This class defines overparameterized large kernel conv block
introduced in `RepLKNet <https://arxiv.org/abs/2203.06717>`_
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int,
group_size: int,
small_kernel: Optional[int] = None,
use_se: bool = False,
act_layer: Optional[nn.Module] = None,
inference_mode: bool = False,
) -> None:
"""Construct a ReparamLargeKernelConv module.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
kernel_size: Kernel size of the large kernel conv branch.
stride: Stride size. Default: 1
group_size: Group size. Default: 1
small_kernel: Kernel size of small kernel conv branch.
act_layer: Activation module. Default: ``nn.GELU``
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super(ReparamLargeKernelConv, self).__init__()
self.stride = stride
self.groups = num_groups(group_size, in_chs)
self.in_chs = in_chs
self.out_chs = out_chs
self.kernel_size = kernel_size
self.small_kernel = small_kernel
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=1,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.large_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
if small_kernel is not None:
assert (
small_kernel <= kernel_size
), "The kernel size for re-param cannot be larger than the large kernel!"
self.small_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=small_kernel,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
self.se = SqueezeExcite(out_chs, rd_ratio=0.25) if use_se else nn.Identity()
# FIXME output of this act was not used in original impl, likely due to bug
self.act = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
out = self.reparam_conv(x)
else:
out = self.large_conv(x)
if self.small_conv is not None:
out = out + self.small_conv(x)
out = self.se(out)
out = self.act(out)
return out
def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn)
if hasattr(self, "small_conv"):
small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn)
eq_b += small_b
eq_k += nn.functional.pad(
small_k, [(self.kernel_size - self.small_kernel) // 2] * 4
)
return eq_k, eq_b
def reparameterize(self) -> None:
"""
Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
eq_k, eq_b = self.get_kernel_bias()
self.reparam_conv = create_conv2d(
self.in_chs,
self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = eq_k
self.reparam_conv.bias.data = eq_b
self.__delattr__("large_conv")
if hasattr(self, "small_conv"):
self.__delattr__("small_conv")
@staticmethod
def _fuse_bn(
conv: nn.Conv2d, bn: nn.BatchNorm2d
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with conv layer.
Args:
conv: Convolutional kernel weights.
bn: Batchnorm 2d layer.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def convolutional_stem(
in_chs: int,
out_chs: int,
act_layer: nn.Module = nn.GELU,
inference_mode: bool = False
) -> nn.Sequential:
"""Build convolutional stem with MobileOne blocks.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
Returns:
nn.Sequential object with stem elements.
"""
return nn.Sequential(
MobileOneBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
group_size=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=1,
stride=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
)
class Attention(nn.Module):
"""Multi-headed Self Attention module.
Source modified from:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
head_dim: int = 32,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""Build MHSA module that can handle 3D or 4D input tensors.
Args:
dim: Number of embedding dimensions.
head_dim: Number of hidden dimensions per head. Default: ``32``
qkv_bias: Use bias or not. Default: ``False``
attn_drop: Dropout rate for attention tensor.
proj_drop: Dropout rate for projection tensor.
"""
super().__init__()
assert dim % head_dim == 0, "dim should be divisible by head_dim"
self.head_dim = head_dim
self.num_heads = dim // head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(-2, -1) # (B, N, C)
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
x = x.transpose(-2, -1).reshape(B, C, H, W)
return x
class PatchEmbed(nn.Module):
"""Convolutional patch embedding layer."""
def __init__(
self,
patch_size: int,
stride: int,
in_chs: int,
embed_dim: int,
act_layer: nn.Module = nn.GELU,
lkc_use_act: bool = False,
use_se: bool = False,
inference_mode: bool = False,
) -> None:
"""Build patch embedding layer.
Args:
patch_size: Patch size for embedding computation.
stride: Stride for convolutional embedding layer.
in_chs: Number of channels of input tensor.
embed_dim: Number of embedding dimensions.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
"""
super().__init__()
self.proj = nn.Sequential(
ReparamLargeKernelConv(
in_chs=in_chs,
out_chs=embed_dim,
kernel_size=patch_size,
stride=stride,
group_size=1,
small_kernel=3,
use_se=use_se,
act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=embed_dim,
out_chs=embed_dim,
kernel_size=1,
stride=1,
use_se=False,
act_layer=act_layer,
inference_mode=inference_mode,
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RepMixer(nn.Module):
"""Reparameterizable token mixer.
For more details, please refer to our paper:
`FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
dim,
kernel_size=3,
layer_scale_init_value=1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Module.
Args:
dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`.
kernel_size: Kernel size for spatial mixing. Default: 3
layer_scale_init_value: Initial value for layer scale. Default: 1e-5
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super().__init__()
self.dim = dim
self.kernel_size = kernel_size
self.inference_mode = inference_mode
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
groups=self.dim,
bias=True,
)
else:
self.reparam_conv = None
self.norm = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
use_scale_branch=False,
num_conv_branches=0,
)
self.mixer = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = x + self.layer_scale(self.mixer(x) - self.norm(x))
return x
def reparameterize(self) -> None:
"""Reparameterize mixer and norm into a single
convolutional layer for efficient inference.
"""
if self.inference_mode:
return
self.mixer.reparameterize()
self.norm.reparameterize()
if isinstance(self.layer_scale, LayerScale2d):
w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * (
self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight
)
b = torch.squeeze(self.layer_scale.gamma) * (
self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
)
else:
w = (
self.mixer.id_tensor
+ self.mixer.reparam_conv.weight
- self.norm.reparam_conv.weight
)
b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
self.reparam_conv = create_conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
groups=self.dim,
bias=True,
)
self.reparam_conv.weight.data = w
self.reparam_conv.bias.data = b
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("mixer")
self.__delattr__("norm")
self.__delattr__("layer_scale")
class ConvMlp(nn.Module):
"""Convolutional FFN Module."""
def __init__(
self,
in_chs: int,
hidden_channels: Optional[int] = None,
out_chs: Optional[int] = None,
act_layer: nn.Module = nn.GELU,
drop: float = 0.0,
) -> None:
"""Build convolutional FFN module.
Args:
in_chs: Number of input channels.
hidden_channels: Number of channels after expansion. Default: None
out_chs: Number of output channels. Default: None
act_layer: Activation layer. Default: ``GELU``
drop: Dropout rate. Default: ``0.0``.
"""
super().__init__()
out_chs = out_chs or in_chs
hidden_channels = hidden_channels or in_chs
self.conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=7,
groups=in_chs,
apply_act=False,
)
self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepConditionalPosEnc(nn.Module):
"""Implementation of conditional positional encoding.
For more details refer to paper:
`Conditional Positional Encodings for Vision Transformers <https://arxiv.org/pdf/2102.10882.pdf>`_
In our implementation, we can reparameterize this module to eliminate a skip connection.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
spatial_shape: Union[int, Tuple[int, int]] = (7, 7),
inference_mode=False,
) -> None:
"""Build reparameterizable conditional positional encoding
Args:
dim: Number of input channels.
dim_out: Number of embedding dimensions. Default: 768
spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7)
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super(RepConditionalPosEnc, self).__init__()
if isinstance(spatial_shape, int):
spatial_shape = tuple([spatial_shape] * 2)
assert isinstance(spatial_shape, Tuple), (
f'"spatial_shape" must by a sequence or int, '
f"get {type(spatial_shape)} instead."
)
assert len(spatial_shape) == 2, (
f'Length of "spatial_shape" should be 2, '
f"got {len(spatial_shape)} instead."
)
self.spatial_shape = spatial_shape
self.dim = dim
self.dim_out = dim_out or dim
self.groups = dim
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=spatial_shape[0] // 2,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.pos_enc = nn.Conv2d(
self.dim,
self.dim_out,
spatial_shape,
1,
int(spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = self.pos_enc(x) + x
return x
def reparameterize(self) -> None:
# Build equivalent Id tensor
input_dim = self.dim // self.groups
kernel_value = torch.zeros(
(
self.dim,
input_dim,
self.spatial_shape[0],
self.spatial_shape[1],
),
dtype=self.pos_enc.weight.dtype,
device=self.pos_enc.weight.device,
)
for i in range(self.dim):
kernel_value[
i,
i % input_dim,
self.spatial_shape[0] // 2,
self.spatial_shape[1] // 2,
] = 1
id_tensor = kernel_value
# Reparameterize Id tensor and conv
w_final = id_tensor + self.pos_enc.weight
b_final = self.pos_enc.bias
# Introduce reparam conv
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=int(self.spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = w_final
self.reparam_conv.bias.data = b_final
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("pos_enc")
class RepMixerBlock(nn.Module):
"""Implementation of Metaformer block with RepMixer as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Block.
Args:
dim: Number of embedding dimensions.
kernel_size: Kernel size for repmixer. Default: 3
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super().__init__()
self.token_mixer = RepMixer(
dim,
kernel_size=kernel_size,
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
)
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = self.token_mixer(x)
x = x + self.drop_path(self.layer_scale(self.mlp(x)))
return x
class AttentionBlock(nn.Module):
"""Implementation of metaformer block with MHSA as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.BatchNorm2d,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
):
"""Build Attention Block.
Args:
dim: Number of embedding dimensions.
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
norm_layer: Normalization layer. Default: ``nn.BatchNorm2d``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
"""
super().__init__()
self.norm = norm_layer(dim)
self.token_mixer = Attention(dim=dim)
if layer_scale_init_value is not None:
self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_1 = nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_2 = nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x))))
x = x + self.drop_path2(self.layer_scale_2(self.mlp(x)))
return x
class FastVitStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
token_mixer_type: str,
downsample: bool = True,
se_downsample: bool = False,
down_patch_size: int = 7,
down_stride: int = 2,
pos_emb_layer: Optional[nn.Module] = None,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.BatchNorm2d,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: Optional[float] = 1e-5,
lkc_use_act=False,
inference_mode=False,
):
"""FastViT stage.
Args:
dim: Number of embedding dimensions.
depth: Number of blocks in stage
token_mixer_type: Token mixer type.
kernel_size: Kernel size for repmixer.
mlp_ratio: MLP expansion ratio.
act_layer: Activation layer.
norm_layer: Normalization layer.
proj_drop_rate: Dropout rate.
drop_path_rate: Drop path rate.
layer_scale_init_value: Layer scale value at initialization.
inference_mode: Flag to instantiate block in inference mode.
"""
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = PatchEmbed(
patch_size=down_patch_size,
stride=down_stride,
in_chs=dim,
embed_dim=dim_out,
use_se=se_downsample,
act_layer=act_layer,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
else:
assert dim == dim_out
self.downsample = nn.Identity()
if pos_emb_layer is not None:
self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode)
else:
self.pos_emb = nn.Identity()
blocks = []
for block_idx in range(depth):
if token_mixer_type == "repmixer":
blocks.append(RepMixerBlock(
dim_out,
kernel_size=kernel_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
))
elif token_mixer_type == "attention":
blocks.append(AttentionBlock(
dim_out,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
raise ValueError(
"Token mixer type: {} not supported".format(token_mixer_type)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.pos_emb(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class FastVit(nn.Module):
fork_feat: torch.jit.Final[bool]
"""
This class implements `FastViT architecture <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
in_chans: int = 3,
layers: Tuple[int, ...] = (2, 2, 6, 2),
token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"),
embed_dims: Tuple[int, ...] = (64, 128, 256, 512),
mlp_ratios: Tuple[float, ...] = (4,) * 4,
downsamples: Tuple[bool, ...] = (False, True, True, True),
se_downsamples: Tuple[bool, ...] = (False, False, False, False),
repmixer_kernel_size: int = 3,
num_classes: int = 1000,
pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4,
down_patch_size: int = 7,
down_stride: int = 2,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: float = 1e-5,
lkc_use_act: bool = False,
fork_feat: bool = False,
cls_ratio: float = 2.0,
global_pool: str = 'avg',
norm_layer: nn.Module = nn.BatchNorm2d,
act_layer: nn.Module = nn.GELU,
inference_mode: bool = False,
) -> None:
super().__init__()
self.num_classes = 0 if fork_feat else num_classes
self.fork_feat = fork_feat
self.global_pool = global_pool
self.feature_info = []
# Convolutional stem
self.stem = convolutional_stem(
in_chans,
embed_dims[0],
act_layer,
inference_mode,
)
# Build the main stages of the network architecture
prev_dim = embed_dims[0]
scale = 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
stages = []
for i in range(len(layers)):
downsample = downsamples[i] or prev_dim != embed_dims[i]
stage = FastVitStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=layers[i],
downsample=downsample,
se_downsample=se_downsamples[i],
down_patch_size=down_patch_size,
down_stride=down_stride,
pos_emb_layer=pos_embs[i],
token_mixer_type=token_mixers[i],
kernel_size=repmixer_kernel_size,
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop_rate=proj_drop_rate,
drop_path_rate=dpr[i],
layer_scale_init_value=layer_scale_init_value,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
stages.append(stage)
prev_dim = embed_dims[i]
if downsample:
scale *= 2
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_stages = len(self.stages)
self.num_features = self.head_hidden_size = prev_dim
# For segmentation and detection, extract intermediate output
if self.fork_feat:
# Add a norm layer for each output. self.stages is slightly different than self.network
# in the original code, the PatchEmbed layer is part of self.stages in this code where
# it was part of self.network in the original code. So we do not need to skip out indices.
self.out_indices = [0, 1, 2, 3]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get("FORK_LAST3", None):
"""For RetinaNet, `start_level=1`. The first norm layer will not used.
cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...`
"""
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
else:
# Classifier head
self.num_features = self.head_hidden_size = final_features = int(embed_dims[-1] * cls_ratio)
self.final_conv = MobileOneBlock(
in_chs=embed_dims[-1],
out_chs=final_features,
kernel_size=3,
stride=1,
group_size=1,
inference_mode=inference_mode,
use_se=True,
act_layer=act_layer,
num_conv_branches=1,
)
self.head = ClassifierHead(
final_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
"""Init. for classification"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return set()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+).pos_emb', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.stem(x)
last_idx = self.num_stages - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
feat_idx = 0
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
x = self.final_conv(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
# input embedding
x = self.stem(x)
outs = []
for idx, block in enumerate(self.stages):
x = block(x)
if self.fork_feat:
if idx in self.out_indices:
norm_layer = getattr(self, f"norm{idx}")
x_out = norm_layer(x)
outs.append(x_out)
if self.fork_feat:
# output the features of four stages for dense prediction
return outs
x = self.final_conv(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
if self.fork_feat:
return x
x = self.forward_head(x)
return x
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 256, 256),
"pool_size": (8, 8),
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'),
"classifier": "head.fc",
**kwargs,
}
default_cfgs = generate_default_cfgs({
"fastvit_t8.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa24.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa36.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_ma36.apple_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
"fastvit_t8.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa24.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_ma36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95
),
"fastvit_mci0.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s0_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci1.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s1_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s1.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci2.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s2_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s2.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
})
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.conv_kxk.0.conv.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
state_dict = state_dict.get('state_dict', state_dict)
if 'image_encoder.model.patch_embed.0.rbr_conv.0.conv.weight' in state_dict:
# remap MobileCLIP checkpoints
prefix = 'image_encoder.model.'
else:
prefix = ''
import re
import bisect
# find stage ends by locating downsample layers
stage_ends = []
for k, v in state_dict.items():
match = re.match(r'^(.*?)network\.(\d+)\.proj.*', k)
if match:
stage_ends.append(int(match.group(2)))
stage_ends = list(sorted(set(stage_ends)))
out_dict = {}
for k, v in state_dict.items():
if prefix:
if prefix not in k:
continue
k = k.replace(prefix, '')
# remap renamed layers
k = k.replace('patch_embed', 'stem')
k = k.replace('rbr_conv', 'conv_kxk')
k = k.replace('rbr_scale', 'conv_scale')
k = k.replace('rbr_skip', 'identity')
k = k.replace('conv_exp', 'final_conv') # to match byobnet, regnet, nfnet
k = k.replace('lkb_origin', 'large_conv')
k = k.replace('convffn', 'mlp')
k = k.replace('se.reduce', 'se.fc1')
k = k.replace('se.expand', 'se.fc2')
k = re.sub(r'layer_scale_([0-9])', r'layer_scale_\1.gamma', k)
if k.endswith('layer_scale'):
k = k.replace('layer_scale', 'layer_scale.gamma')
k = k.replace('dist_head', 'head_dist')
if k.startswith('head.'):
if k == 'head.proj' and hasattr(model.head, 'fc') and isinstance(model.head.fc, nn.Linear):
# if CLIP projection, map to head.fc w/ bias = zeros
k = k.replace('head.proj', 'head.fc.weight')
v = v.T
out_dict['head.fc.bias'] = torch.zeros(v.shape[0])
else:
k = k.replace('head.', 'head.fc.')
# remap flat sequential network to stages
match = re.match(r'^network\.(\d+)', k)
stage_idx, net_idx = None, None
if match:
net_idx = int(match.group(1))
stage_idx = bisect.bisect_right(stage_ends, net_idx)
if stage_idx is not None:
net_prefix = f'network.{net_idx}'
stage_prefix = f'stages.{stage_idx}'
if net_prefix + '.proj' in k:
k = k.replace(net_prefix + '.proj', stage_prefix + '.downsample.proj')
elif net_prefix + '.pe' in k:
k = k.replace(net_prefix + '.pe', stage_prefix + '.pos_emb.pos_enc')
else:
k = k.replace(net_prefix, stage_prefix + '.blocks')
out_dict[k] = v
return out_dict
def _create_fastvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
FastVit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def fastvit_t8(pretrained=False, **kwargs):
"""Instantiate FastViT-T8 model variant."""
model_args = dict(
layers=(2, 2, 4, 2),
embed_dims=(48, 96, 192, 384),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer")
)
return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_t12(pretrained=False, **kwargs):
"""Instantiate FastViT-T12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_s12(pretrained=False, **kwargs):
"""Instantiate FastViT-S12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa12(pretrained=False, **kwargs):
"""Instantiate FastViT-SA12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa24(pretrained=False, **kwargs):
"""Instantiate FastViT-SA24 model variant."""
model_args = dict(
layers=(4, 4, 12, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa36(pretrained=False, **kwargs):
"""Instantiate FastViT-SA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_ma36(pretrained=False, **kwargs):
"""Instantiate FastViT-MA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(76, 152, 304, 608),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention")
)
return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci0(pretrained=False, **kwargs):
"""Instantiate MCi0 model variant."""
model_args = dict(
layers=(2, 6, 10, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci1(pretrained=False, **kwargs):
"""Instantiate MCi1 model variant."""
model_args = dict(
layers=(4, 12, 20, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci2(pretrained=False, **kwargs):
"""Instantiate MCi2 model variant."""
model_args = dict(
layers=(4, 12, 24, 4),
embed_dims=(80, 160, 320, 640),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci2', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/fastvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/fastvit.py",
"repo_id": "pytorch-image-models",
"token_count": 29316
} | 207 |
""" Pytorch Inception-V4 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['InceptionV4']
class Mixed3a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = conv_block(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed4a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed4a, self).__init__()
self.branch0 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed5a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed5a, self).__init__()
self.conv = conv_block(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class InceptionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionA, self).__init__()
self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionA, self).__init__()
self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(384, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=3, stride=1, padding=1),
conv_block(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionB, self).__init__()
self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.branch2 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionB, self).__init__()
self.branch0 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1024, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionC(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionC, self).__init__()
self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV4, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = self.head_hidden_size = 1536
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
features = [
conv_block(in_chans, 32, kernel_size=3, stride=2),
conv_block(32, 32, kernel_size=3, stride=1),
conv_block(32, 64, kernel_size=3, stride=1, padding=1),
Mixed3a(conv_block),
Mixed4a(conv_block),
Mixed5a(conv_block),
]
features += [InceptionA(conv_block) for _ in range(4)]
features += [ReductionA(conv_block)] # Mixed6a
features += [InceptionB(conv_block) for _ in range(7)]
features += [ReductionB(conv_block)] # Mixed7a
features += [InceptionC(conv_block) for _ in range(3)]
self.features = nn.Sequential(*features)
self.feature_info = [
dict(num_chs=64, reduction=2, module='features.2'),
dict(num_chs=160, reduction=4, module='features.3'),
dict(num_chs=384, reduction=8, module='features.9'),
dict(num_chs=1024, reduction=16, module='features.17'),
dict(num_chs=1536, reduction=32, module='features.21'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^features\.[012]\.',
blocks=r'^features\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.last_linear
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
return build_model_with_cfg(
InceptionV4,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
default_cfgs = generate_default_cfgs({
'inception_v4.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'features.0.conv', 'classifier': 'last_linear',
}
})
@register_model
def inception_v4(pretrained=False, **kwargs):
return _create_inception_v4('inception_v4', pretrained, **kwargs)
| pytorch-image-models/timm/models/inception_v4.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_v4.py",
"repo_id": "pytorch-image-models",
"token_count": 5547
} | 208 |
"""
RDNet
Copyright (c) 2024-present NAVER Cloud Corp.
Apache-2.0
"""
from functools import partial
from typing import List, Optional, Tuple, Union, Callable
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, NormMlpClassifierHead, ClassifierHead, EffectiveSEModule, \
make_divisible, get_act_layer, get_norm_layer
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ["RDNet"]
class Block(nn.Module):
def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3),
norm_layer(in_chs),
nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0),
act_layer(),
nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0),
)
def forward(self, x):
return self.layers(x)
class BlockESE(nn.Module):
def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3),
norm_layer(in_chs),
nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0),
act_layer(),
nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0),
EffectiveSEModule(out_chs),
)
def forward(self, x):
return self.layers(x)
def _get_block_type(block: str):
block = block.lower().strip()
if block == "block":
return Block
elif block == "blockese":
return BlockESE
else:
assert False, f"Unknown block type ({block})."
class DenseBlock(nn.Module):
def __init__(
self,
num_input_features: int = 64,
growth_rate: int = 64,
bottleneck_width_ratio: float = 4.0,
drop_path_rate: float = 0.0,
drop_rate: float = 0.0,
rand_gather_step_prob: float = 0.0,
block_idx: int = 0,
block_type: str = "Block",
ls_init_value: float = 1e-6,
norm_layer: str = "layernorm2d",
act_layer: str = "gelu",
):
super().__init__()
self.drop_rate = drop_rate
self.drop_path_rate = drop_path_rate
self.rand_gather_step_prob = rand_gather_step_prob
self.block_idx = block_idx
self.growth_rate = growth_rate
self.gamma = nn.Parameter(ls_init_value * torch.ones(growth_rate)) if ls_init_value > 0 else None
growth_rate = int(growth_rate)
inter_chs = int(num_input_features * bottleneck_width_ratio / 8) * 8
self.drop_path = DropPath(drop_path_rate)
self.layers = _get_block_type(block_type)(
in_chs=num_input_features,
inter_chs=inter_chs,
out_chs=growth_rate,
norm_layer=norm_layer,
act_layer=act_layer,
)
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
x = torch.cat(x, 1)
x = self.layers(x)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x)
return x
class DenseStage(nn.Sequential):
def __init__(self, num_block, num_input_features, drop_path_rates, growth_rate, **kwargs):
super().__init__()
for i in range(num_block):
layer = DenseBlock(
num_input_features=num_input_features,
growth_rate=growth_rate,
drop_path_rate=drop_path_rates[i],
block_idx=i,
**kwargs,
)
num_input_features += growth_rate
self.add_module(f"dense_block{i}", layer)
self.num_out_features = num_input_features
def forward(self, init_feature: torch.Tensor) -> torch.Tensor:
features = [init_feature]
for module in self:
new_feature = module(features)
features.append(new_feature)
return torch.cat(features, 1)
class RDNet(nn.Module):
def __init__(
self,
in_chans: int = 3, # timm option [--in-chans]
num_classes: int = 1000, # timm option [--num-classes]
global_pool: str = 'avg', # timm option [--gp]
growth_rates: Union[List[int], Tuple[int]] = (64, 104, 128, 128, 128, 128, 224),
num_blocks_list: Union[List[int], Tuple[int]] = (3, 3, 3, 3, 3, 3, 3),
block_type: Union[List[int], Tuple[int]] = ("Block",) * 2 + ("BlockESE",) * 5,
is_downsample_block: Union[List[bool], Tuple[bool]] = (None, True, True, False, False, False, True),
bottleneck_width_ratio: float = 4.0,
transition_compression_ratio: float = 0.5,
ls_init_value: float = 1e-6,
stem_type: str = 'patch',
patch_size: int = 4,
num_init_features: int = 64,
head_init_scale: float = 1.,
head_norm_first: bool = False,
conv_bias: bool = True,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: str = "layernorm2d",
norm_eps: Optional[float] = None,
drop_rate: float = 0.0, # timm option [--drop: dropout ratio]
drop_path_rate: float = 0.0, # timm option [--drop-path: drop-path ratio]
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
global_pool: Global pooling type.
growth_rates: Growth rate at each stage.
num_blocks_list: Number of blocks at each stage.
is_downsample_block: Whether to downsample at each stage.
bottleneck_width_ratio: Bottleneck width ratio (similar to mlp expansion ratio).
transition_compression_ratio: Channel compression ratio of transition layers.
ls_init_value: Init value for Layer Scale, disabled if None.
stem_type: Type of stem.
patch_size: Stem patch size for patch stem.
num_init_features: Number of features of stem.
head_init_scale: Init scaling value for classifier weights and biases.
head_norm_first: Apply normalization before global pool + head.
conv_bias: Use bias layers w/ all convolutions.
act_layer: Activation layer type.
norm_layer: Normalization layer type.
norm_eps: Small value to avoid division by zero in normalization.
drop_rate: Head pre-classifier dropout rate.
drop_path_rate: Stochastic depth drop rate.
"""
super().__init__()
assert len(growth_rates) == len(num_blocks_list) == len(is_downsample_block)
act_layer = get_act_layer(act_layer)
norm_layer = get_norm_layer(norm_layer)
if norm_eps is not None:
norm_layer = partial(norm_layer, eps=norm_eps)
self.num_classes = num_classes
self.drop_rate = drop_rate
# stem
assert stem_type in ('patch', 'overlap', 'overlap_tiered')
if stem_type == 'patch':
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, num_init_features, kernel_size=patch_size, stride=patch_size, bias=conv_bias),
norm_layer(num_init_features),
)
stem_stride = patch_size
else:
mid_chs = make_divisible(num_init_features // 2) if 'tiered' in stem_type else num_init_features
self.stem = nn.Sequential(
nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.Conv2d(mid_chs, num_init_features, kernel_size=3, stride=2, padding=1, bias=conv_bias),
norm_layer(num_init_features),
)
stem_stride = 4
# features
self.feature_info = []
self.num_stages = len(growth_rates)
curr_stride = stem_stride
num_features = num_init_features
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(num_blocks_list)).split(num_blocks_list)]
dense_stages = []
for i in range(self.num_stages):
dense_stage_layers = []
if i != 0:
compressed_num_features = int(num_features * transition_compression_ratio / 8) * 8
k_size = stride = 1
if is_downsample_block[i]:
curr_stride *= 2
k_size = stride = 2
dense_stage_layers.append(norm_layer(num_features))
dense_stage_layers.append(
nn.Conv2d(num_features, compressed_num_features, kernel_size=k_size, stride=stride, padding=0)
)
num_features = compressed_num_features
stage = DenseStage(
num_block=num_blocks_list[i],
num_input_features=num_features,
growth_rate=growth_rates[i],
bottleneck_width_ratio=bottleneck_width_ratio,
drop_rate=drop_rate,
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
block_type=block_type[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
dense_stage_layers.append(stage)
num_features += num_blocks_list[i] * growth_rates[i]
if i + 1 == self.num_stages or (i + 1 != self.num_stages and is_downsample_block[i + 1]):
self.feature_info += [
dict(
num_chs=num_features,
reduction=curr_stride,
module=f'dense_stages.{i}',
growth_rate=growth_rates[i],
)
]
dense_stages.append(nn.Sequential(*dense_stage_layers))
self.dense_stages = nn.Sequential(*dense_stages)
self.num_features = self.head_hidden_size = num_features
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default RDNet ordering (pretrained NV weights)
if head_norm_first:
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.dense_stages) + 1, indices)
# forward pass
feat_idx = 0 # stem is index 0
x = self.stem(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
dense_stages = self.dense_stages
else:
dense_stages = self.dense_stages[:max_index]
for stage in dense_stages:
feat_idx += 1
x = stage(x)
if feat_idx in take_indices:
# NOTE not bothering to apply norm_pre when norm=True as almost no models have it enabled
intermediates.append(x)
if intermediates_only:
return intermediates
x = self.norm_pre(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.dense_stages) + 1, indices)
self.dense_stages = self.dense_stages[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm_pre = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.dense_stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@torch.jit.ignore
def group_matcher(self, coarse=False):
assert not coarse, "coarse grouping is not implemented for RDNet"
return dict(
stem=r'^stem',
blocks=r'^dense_stages\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.dense_stages:
s.grad_checkpointing = enable
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
nn.init.constant_(module.bias, 0)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap NV checkpoints -> timm """
if 'stem.0.weight' in state_dict:
return state_dict # non-NV checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
for k, v in state_dict.items():
k = k.replace('stem.stem.', 'stem.')
out_dict[k] = v
return out_dict
def _create_rdnet(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
RDNet, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
"url": url,
"num_classes": 1000, "input_size": (3, 224, 224), "pool_size": (7, 7),
"crop_pct": 0.9, "interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN, "std": IMAGENET_DEFAULT_STD,
"first_conv": "stem.0", "classifier": "head.fc",
"paper_ids": "arXiv:2403.19588",
"paper_name": "DenseNets Reloaded: Paradigm Shift Beyond ResNets and ViTs",
"origin_url": "https://github.com/naver-ai/rdnet",
**kwargs,
}
default_cfgs = generate_default_cfgs({
'rdnet_tiny.nv_in1k': _cfg(
hf_hub_id='naver-ai/rdnet_tiny.nv_in1k'),
'rdnet_small.nv_in1k': _cfg(
hf_hub_id='naver-ai/rdnet_small.nv_in1k'),
'rdnet_base.nv_in1k': _cfg(
hf_hub_id='naver-ai/rdnet_base.nv_in1k'),
'rdnet_large.nv_in1k': _cfg(
hf_hub_id='naver-ai/rdnet_large.nv_in1k'),
'rdnet_large.nv_in1k_ft_in1k_384': _cfg(
hf_hub_id='naver-ai/rdnet_large.nv_in1k_ft_in1k_384',
input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
})
@register_model
def rdnet_tiny(pretrained=False, **kwargs):
n_layer = 7
model_args = {
"num_init_features": 64,
"growth_rates": [64] + [104] + [128] * 4 + [224],
"num_blocks_list": [3] * n_layer,
"is_downsample_block": (None, True, True, False, False, False, True),
"transition_compression_ratio": 0.5,
"block_type": ["Block"] + ["Block"] + ["BlockESE"] * 4 + ["BlockESE"],
}
model = _create_rdnet("rdnet_tiny", pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def rdnet_small(pretrained=False, **kwargs):
n_layer = 11
model_args = {
"num_init_features": 72,
"growth_rates": [64] + [128] + [128] * (n_layer - 4) + [240] * 2,
"num_blocks_list": [3] * n_layer,
"is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False),
"transition_compression_ratio": 0.5,
"block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2,
}
model = _create_rdnet("rdnet_small", pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def rdnet_base(pretrained=False, **kwargs):
n_layer = 11
model_args = {
"num_init_features": 120,
"growth_rates": [96] + [128] + [168] * (n_layer - 4) + [336] * 2,
"num_blocks_list": [3] * n_layer,
"is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False),
"transition_compression_ratio": 0.5,
"block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2,
}
model = _create_rdnet("rdnet_base", pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def rdnet_large(pretrained=False, **kwargs):
n_layer = 12
model_args = {
"num_init_features": 144,
"growth_rates": [128] + [192] + [256] * (n_layer - 4) + [360] * 2,
"num_blocks_list": [3] * n_layer,
"is_downsample_block": (None, True, True, False, False, False, False, False, False, False, True, False),
"transition_compression_ratio": 0.5,
"block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2,
}
model = _create_rdnet("rdnet_large", pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/rdnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/rdnet.py",
"repo_id": "pytorch-image-models",
"token_count": 9321
} | 209 |
""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/pdf/2111.09883
Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below
This implementation is experimental and subject to change in manners that will break weight compat:
* Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads?
* currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head)
* The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at
GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial.
* num_heads per stage is not detailed for Huge and Giant model variants
* 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts
* experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme
Noteworthy additions over official Swin v1:
* MLP relative position embedding is looking promising and adapts to different image/window sizes
* This impl has been designed to allow easy change of image size with matching window size changes
* Non-square image size and window size are supported
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer V2 reimplementation
# Copyright (c) 2021 Christoph Reich
# Licensed under The MIT License [see LICENSE for details]
# Written by Christoph Reich
# --------------------------------------------------------
import logging
import math
from typing import Tuple, Optional, List, Union, Any, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import named_apply
from ._registry import generate_default_cfgs, register_model
__all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """
return x.permute(0, 2, 3, 1)
def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """
return x.permute(0, 3, 1, 2)
def window_partition(x, window_size: Tuple[int, int]):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
"""
Args:
windows: (num_windows * B, window_size[0], window_size[1], C)
window_size (Tuple[int, int]): Window size
img_size (Tuple[int, int]): Image size
Returns:
x: (B, H, W, C)
"""
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowMultiHeadAttention(nn.Module):
r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias.
Args:
dim (int): Number of input features
window_size (int): Window size
num_heads (int): Number of attention heads
drop_attn (float): Dropout rate of attention map
drop_proj (float): Dropout rate after projection
meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network
sequential_attn (bool): If true sequential self-attention is performed
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
drop_attn: float = 0.0,
drop_proj: float = 0.0,
meta_hidden_dim: int = 384, # FIXME what's the optimal value?
sequential_attn: bool = False,
) -> None:
super(WindowMultiHeadAttention, self).__init__()
assert dim % num_heads == 0, \
"The number of input features (in_features) are not divisible by the number of heads (num_heads)."
self.in_features: int = dim
self.window_size: Tuple[int, int] = to_2tuple(window_size)
self.num_heads: int = num_heads
self.sequential_attn: bool = sequential_attn
self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True)
self.attn_drop = nn.Dropout(drop_attn)
self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True)
self.proj_drop = nn.Dropout(drop_proj)
# meta network for positional encodings
self.meta_mlp = Mlp(
2, # x, y
hidden_features=meta_hidden_dim,
out_features=num_heads,
act_layer=nn.ReLU,
drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without?
)
# NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads)))
self._make_pair_wise_relative_positions()
def _make_pair_wise_relative_positions(self) -> None:
"""Method initializes the pair-wise relative positions to compute the positional biases."""
device = self.logit_scale.device
coordinates = torch.stack(ndgrid(
torch.arange(self.window_size[0], device=device),
torch.arange(self.window_size[1], device=device)
), dim=0).flatten(1)
relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :]
relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float()
relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(
1.0 + relative_coordinates.abs())
self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
def set_window_size(self, window_size: Tuple[int, int]) -> None:
"""Update window size & interpolate position embeddings
Args:
window_size (int): New window size
"""
window_size = to_2tuple(window_size)
if window_size != self.window_size:
self.window_size = window_size
self._make_pair_wise_relative_positions()
def _relative_positional_encodings(self) -> torch.Tensor:
"""Method computes the relative positional encodings
Returns:
relative_position_bias (torch.Tensor): Relative positional encodings
(1, number of heads, window size ** 2, window size ** 2)
"""
window_area = self.window_size[0] * self.window_size[1]
relative_position_bias = self.meta_mlp(self.relative_coordinates_log)
relative_position_bias = relative_position_bias.transpose(1, 0).reshape(
self.num_heads, window_area, window_area
)
relative_position_bias = relative_position_bias.unsqueeze(0)
return relative_position_bias
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
""" Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape (B * windows, N, C)
mask (Optional[torch.Tensor]): Attention mask for the shift case
Returns:
Output tensor of the shape [B * windows, N, C]
"""
Bw, L, C = x.shape
qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
query, key, value = qkv.unbind(0)
# compute attention map with scaled cosine attention
attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1))
logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp()
attn = attn * logit_scale
attn = attn + self._relative_positional_encodings()
if mask is not None:
# Apply mask if utilized
num_win: int = mask.shape[0]
attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L)
attn = attn + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, L, L)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerV2CrBlock(nn.Module):
r"""This class implements the Swin transformer block.
Args:
dim (int): Number of input channels
num_heads (int): Number of attention heads to be utilized
feat_size (Tuple[int, int]): Input resolution
window_size (Tuple[int, int]): Window size to be utilized
shift_size (int): Shifting size to be used
mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
proj_drop (float): Dropout in input mapping
drop_attn (float): Dropout rate of attention map
drop_path (float): Dropout in main path
extra_norm (bool): Insert extra norm on 'main' branch if True
sequential_attn (bool): If true sequential self-attention is performed
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized
"""
def __init__(
self,
dim: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
shift_size: Tuple[int, int] = (0, 0),
always_partition: bool = False,
dynamic_mask: bool = False,
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0,
proj_drop: float = 0.0,
drop_attn: float = 0.0,
drop_path: float = 0.0,
extra_norm: bool = False,
sequential_attn: bool = False,
norm_layer: Type[nn.Module] = nn.LayerNorm,
):
super(SwinTransformerV2CrBlock, self).__init__()
self.dim: int = dim
self.feat_size: Tuple[int, int] = feat_size
self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size)
self.always_partition = always_partition
self.dynamic_mask = dynamic_mask
self.window_size, self.shift_size = self._calc_window_shift(window_size)
self.window_area = self.window_size[0] * self.window_size[1]
self.init_values: Optional[float] = init_values
# attn branch
self.attn = WindowMultiHeadAttention(
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
drop_attn=drop_attn,
drop_proj=proj_drop,
sequential_attn=sequential_attn,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# mlp branch
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
drop=proj_drop,
out_features=dim,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# Extra main branch norm layer mentioned for Huge/Giant models in V2 paper.
# Also being used as final network norm and optional stage ending norm while still in a C-last format.
self.norm3 = norm_layer(dim) if extra_norm else nn.Identity()
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
self.init_weights()
def _calc_window_shift(
self,
target_window_size: Tuple[int, int],
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
target_window_size = to_2tuple(target_window_size)
target_shift_size = self.target_shift_size
if any(target_shift_size):
# if non-zero, recalculate shift from current window size in case window size has changed
target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2)
if self.always_partition:
return target_window_size, target_shift_size
window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)]
shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, target_shift_size)]
return tuple(window_size), tuple(shift_size)
def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
"""Method generates the attention mask used in shift case."""
# Make masks for shift case
if any(self.shift_size):
# calculate attention mask for SW-MSA
if x is None:
img_mask = torch.zeros((1, *self.feat_size, 1)) # 1 H W 1
else:
img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1
cnt = 0
for h in (
(0, -self.window_size[0]),
(-self.window_size[0], -self.shift_size[0]),
(-self.shift_size[0], None),
):
for w in (
(0, -self.window_size[1]),
(-self.window_size[1], -self.shift_size[1]),
(-self.shift_size[1], None),
):
img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def init_weights(self):
# extra, module specific weight init
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int]) -> None:
"""Method updates the image resolution to be processed and window size and so the pair-wise relative positions.
Args:
feat_size (Tuple[int, int]): New input resolution
window_size (int): New window size
"""
# Update input resolution
self.feat_size: Tuple[int, int] = feat_size
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.attn.set_window_size(self.window_size)
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
def _shifted_window_attn(self, x):
B, H, W, C = x.shape
# cyclic shift
sh, sw = self.shift_size
do_shift: bool = any(self.shift_size)
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, sh:], x[:, :sh]], dim=1)
# x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2)
x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2))
pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
x = torch.nn.functional.pad(x, (0, 0, 0, pad_w, 0, pad_h))
_, Hp, Wp, _ = x.shape
# partition windows
x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
# W-MSA/SW-MSA
if getattr(self, 'dynamic_mask', False):
attn_mask = self.get_attn_mask(x)
else:
attn_mask = self.attn_mask
attn_windows = self.attn(x_windows, mask=attn_mask) # num_windows * B, window_size * window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C
x = x[:, :H, :W, :].contiguous()
# reverse cyclic shift
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1)
# x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2)
x = torch.roll(x, shifts=(sh, sw), dims=(1, 2))
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W]
Returns:
output (torch.Tensor): Output tensor of the shape [B, C, H, W]
"""
# post-norm branches (op -> norm -> drop)
x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x)))
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.norm2(self.mlp(x)))
x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant)
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" This class implements the patch merging as a strided convolution with a normalization before.
Args:
dim (int): Number of input channels
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized.
"""
def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None:
super(PatchMerging, self).__init__()
self.norm = norm_layer(4 * dim)
self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
""" Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W]
Returns:
output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
"""
B, H, W, C = x.shape
pad_values = (0, 0, 0, H % 2, 0, W % 2)
x = nn.functional.pad(x, pad_values)
_, H, W, _ = x.shape
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.norm(x)
x = self.reduction(x)
return x
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding """
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
strict_img_size=True,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.strict_img_size = strict_img_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def set_input_size(self, img_size: Tuple[int, int]):
img_size = to_2tuple(img_size)
if img_size != self.img_size:
self.img_size = img_size
self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
def forward(self, x):
B, C, H, W = x.shape
if self.strict_img_size:
_assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return x
class SwinTransformerV2CrStage(nn.Module):
r"""This class implements a stage of the Swin transformer including multiple layers.
Args:
embed_dim (int): Number of input channels
depth (int): Depth of the stage (number of layers)
downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper)
feat_size (Tuple[int, int]): input feature map size (H, W)
num_heads (int): Number of attention heads to be utilized
window_size (int): Window size to be utilized
mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
proj_drop (float): Dropout in input mapping
drop_attn (float): Dropout rate of attention map
drop_path (float): Dropout in main path
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm
extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks
extra_norm_stage (bool): End each stage with an extra norm layer in main branch
sequential_attn (bool): If true sequential self-attention is performed
"""
def __init__(
self,
embed_dim: int,
depth: int,
downscale: bool,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
always_partition: bool = False,
dynamic_mask: bool = False,
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0.0,
proj_drop: float = 0.0,
drop_attn: float = 0.0,
drop_path: Union[List[float], float] = 0.0,
norm_layer: Type[nn.Module] = nn.LayerNorm,
extra_norm_period: int = 0,
extra_norm_stage: bool = False,
sequential_attn: bool = False,
):
super(SwinTransformerV2CrStage, self).__init__()
self.downscale: bool = downscale
self.grad_checkpointing: bool = False
self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size
if downscale:
self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer)
embed_dim = embed_dim * 2
else:
self.downsample = nn.Identity()
def _extra_norm(index):
i = index + 1
if extra_norm_period and i % extra_norm_period == 0:
return True
return i == depth if extra_norm_stage else False
self.blocks = nn.Sequential(*[
SwinTransformerV2CrBlock(
dim=embed_dim,
num_heads=num_heads,
feat_size=self.feat_size,
window_size=window_size,
always_partition=always_partition,
dynamic_mask=dynamic_mask,
shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]),
mlp_ratio=mlp_ratio,
init_values=init_values,
proj_drop=proj_drop,
drop_attn=drop_attn,
drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path,
extra_norm=_extra_norm(index),
sequential_attn=sequential_attn,
norm_layer=norm_layer,
)
for index in range(depth)]
)
def set_input_size(
self,
feat_size: Tuple[int, int],
window_size: int,
always_partition: Optional[bool] = None,
):
""" Updates the resolution to utilize and the window size and so the pair-wise relative positions.
Args:
window_size (int): New window size
feat_size (Tuple[int, int]): New input resolution
"""
self.feat_size = (feat_size[0] // 2, feat_size[1] // 2) if self.downscale else feat_size
for block in self.blocks:
block.set_input_size(
feat_size=self.feat_size,
window_size=window_size,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C]
Returns:
output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
"""
x = bchw_to_bhwc(x)
x = self.downsample(x)
for block in self.blocks:
# Perform checkpointing if utilized
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(block, x)
else:
x = block(x)
x = bhwc_to_bchw(x)
return x
class SwinTransformerV2Cr(nn.Module):
r""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` -
https://arxiv.org/pdf/2111.09883
Args:
img_size: Input resolution.
window_size: Window size. If None, grid_size // window_div
window_ratio: Window size to patch grid ratio.
patch_size: Patch size.
in_chans: Number of input channels.
depths: Depth of the stage (number of layers).
num_heads: Number of attention heads to be utilized.
embed_dim: Patch embedding dimension.
num_classes: Number of output classes.
mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels.
drop_rate: Dropout rate.
proj_drop_rate: Projection dropout rate.
attn_drop_rate: Dropout rate of attention map.
drop_path_rate: Stochastic depth rate.
norm_layer: Type of normalization layer to be utilized.
extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage
extra_norm_stage: End each stage with an extra norm layer in main branch
sequential_attn: If true sequential self-attention is performed.
"""
def __init__(
self,
img_size: Tuple[int, int] = (224, 224),
patch_size: int = 4,
window_size: Optional[int] = None,
window_ratio: int = 8,
always_partition: bool = False,
strict_img_size: bool = True,
in_chans: int = 3,
num_classes: int = 1000,
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0.,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
norm_layer: Type[nn.Module] = nn.LayerNorm,
extra_norm_period: int = 0,
extra_norm_stage: bool = False,
sequential_attn: bool = False,
global_pool: str = 'avg',
weight_init='skip',
**kwargs: Any
) -> None:
super(SwinTransformerV2Cr, self).__init__()
img_size = to_2tuple(img_size)
self.num_classes: int = num_classes
self.patch_size: int = patch_size
self.img_size: Tuple[int, int] = img_size
self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.feature_info = []
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer,
strict_img_size=strict_img_size,
)
grid_size = self.patch_embed.grid_size
if window_size is None:
self.window_size = tuple([s // window_ratio for s in grid_size])
else:
self.window_size = to_2tuple(window_size)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
in_dim = embed_dim
in_scale = 1
for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)):
stages += [SwinTransformerV2CrStage(
embed_dim=in_dim,
depth=depth,
downscale=stage_idx != 0,
feat_size=(grid_size[0] // in_scale, grid_size[1] // in_scale),
num_heads=num_heads,
window_size=self.window_size,
always_partition=always_partition,
dynamic_mask=not strict_img_size,
mlp_ratio=mlp_ratio,
init_values=init_values,
proj_drop=proj_drop_rate,
drop_attn=attn_drop_rate,
drop_path=dpr[stage_idx],
extra_norm_period=extra_norm_period,
extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm
sequential_attn=sequential_attn,
norm_layer=norm_layer,
)]
if stage_idx != 0:
in_dim *= 2
in_scale *= 2
self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')]
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
# current weight init skips custom init and uses pytorch layer defaults, seems to work well
# FIXME more experiments needed
if weight_init != 'skip':
named_apply(init_weights, self)
def set_input_size(
self,
img_size: Optional[Tuple[int, int]] = None,
window_size: Optional[Tuple[int, int]] = None,
window_ratio: int = 8,
always_partition: Optional[bool] = None,
) -> None:
"""Updates the image resolution, window size and so the pair-wise relative positions.
Args:
img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used
window_size (Optional[int]): New window size, if None based on new_img_size // window_div
window_ratio (int): divisor for calculating window size from patch grid size
always_partition: always partition / shift windows even if feat size is < window
"""
if img_size is not None:
self.patch_embed.set_input_size(img_size=img_size)
grid_size = self.patch_embed.grid_size
if window_size is None and window_ratio is not None:
window_size = tuple([s // window_ratio for s in grid_size])
for index, stage in enumerate(self.stages):
stage_scale = 2 ** max(index - 1, 0)
stage.set_input_size(
feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale),
window_size=window_size,
always_partition=always_partition,
)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore()
def get_classifier(self) -> nn.Module:
"""Method returns the classification head of the model.
Returns:
head (nn.Module): Current classification head
"""
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
"""Method results the classification head
Args:
num_classes (int): Number of classes to be predicted
global_pool (str): Unused
"""
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.patch_embed(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
for i, stage in enumerate(stages):
x = stage(x)
if i in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights(module: nn.Module, name: str = ''):
# FIXME WIP determining if there's a better weight init
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
elif 'head' in name:
nn.init.zeros_(module.weight)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
if 'head.fc.weight' in state_dict:
return state_dict
out_dict = {}
for k, v in state_dict.items():
if 'tau' in k:
# convert old tau based checkpoints -> logit_scale (inverse)
v = torch.log(1 / v)
k = k.replace('tau', 'logit_scale')
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformerV2Cr, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.9,
'interpolation': 'bicubic',
'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj',
'classifier': 'head.fc',
**kwargs,
}
default_cfgs = generate_default_cfgs({
'swinv2_cr_tiny_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_tiny_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_tiny_ns_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_small_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_ns_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_ns_256.untrained': _cfg(
url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)),
'swinv2_cr_base_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_base_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_base_ns_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_large_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_large_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_huge_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_huge_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_giant_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_giant_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
})
@register_model
def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms.
** Experimental, may make default if results are improved. **
"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 256x256, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-L V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
)
return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-L V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
)
return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-H V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=352,
depths=(2, 2, 18, 2),
num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-H V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=352,
depths=(2, 2, 18, 2),
num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-G V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=512,
depths=(2, 2, 42, 2),
num_heads=(16, 32, 64, 128),
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-G V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=512,
depths=(2, 2, 42, 2),
num_heads=(16, 32, 64, 128),
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/swin_transformer_v2_cr.py/0 | {
"file_path": "pytorch-image-models/timm/models/swin_transformer_v2_cr.py",
"repo_id": "pytorch-image-models",
"token_count": 21270
} | 210 |
""" Cross-Covariance Image Transformer (XCiT) in PyTorch
Paper:
- https://arxiv.org/abs/2106.09681
Same as the official implementation, with some minor adaptations, original copyright below
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import math
from functools import partial
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, to_2tuple, use_fused_attn
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
from .cait import ClassAttn
from .vision_transformer import Mlp
__all__ = ['Xcit'] # model_registry will add each entrypoint fn to this
@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper.
Based on the official XCiT code
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int):
device = self.token_projection.weight.device
dtype = self.token_projection.weight.dtype
y_embed = torch.arange(1, H + 1, device=device).to(torch.float32).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W + 1, device=device).to(torch.float32).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, device=device).to(torch.float32)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos.to(dtype))
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution + batch norm"""
return torch.nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes)
)
class ConvPatchEmbed(nn.Module):
"""Image to Patch Embedding using multiple convolutional layers"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU):
super().__init__()
img_size = to_2tuple(img_size)
num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size == 16:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 8, 2),
act_layer(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 8:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise('For convolutional projection, patch size has to be in [8, 16]')
def forward(self, x):
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2) # (B, N, C)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the
implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable
3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
eta=1.,
tokens_norm=False,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim))
self.gamma2 = nn.Parameter(eta * torch.ones(dim))
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
self.tokens_norm = tokens_norm
def forward(self, x):
x_norm1 = self.norm1(x)
x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1)
x = x + self.drop_path(self.gamma1 * x_attn)
if self.tokens_norm:
x = self.norm2(x)
else:
x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1)
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
fused_attn: torch.jit.Final[bool]
""" Cross-Covariance Attention (XCA)
Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax
normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.fused_attn = use_fused_attn(experimental=True)
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N)
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
q = torch.nn.functional.normalize(q, dim=-1) * self.temperature
k = torch.nn.functional.normalize(k, dim=-1)
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, scale=1.0)
else:
# Paper section 3.2 l2-Normalization and temperature scaling
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
eta=1.,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
self.gamma1 = nn.Parameter(eta * torch.ones(dim))
self.gamma3 = nn.Parameter(eta * torch.ones(dim))
self.gamma2 = nn.Parameter(eta * torch.ones(dim))
def forward(self, x, H: int, W: int):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
# NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class Xcit(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_layer=None,
norm_layer=None,
cls_attn_layers=2,
use_pos_embed=True,
eta=1.,
tokens_norm=False,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP
pos_drop_rate: position embedding dropout rate
proj_drop_rate (float): projection dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate (constant across all layers)
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos_embed: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
Notes:
- Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch
interaction (class LPI) and the patch embedding (class ConvPatchEmbed)
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
img_size = to_2tuple(img_size)
assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \
'`patch_size` should divide image dimensions evenly'
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim
self.global_pool = global_pool
self.grad_checkpointing = False
self.patch_embed = ConvPatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
act_layer=act_layer,
)
r = patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_rate,
act_layer=act_layer,
norm_layer=norm_layer,
eta=eta,
)
for _ in range(depth)])
self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)]
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=drop_rate,
attn_drop=attn_drop_rate,
act_layer=act_layer,
norm_layer=norm_layer,
eta=eta,
tokens_norm=tokens_norm,
)
for _ in range(cls_attn_layers)])
# Classifier head
self.norm = norm_layer(embed_dim)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=r'^blocks\.(\d+)',
cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass
B, _, height, width = x.shape
x, (Hp, Wp) = self.patch_embed(x)
if self.pos_embed is not None:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x, Hp, Wp)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
# process intermediates
if reshape:
# reshape to BCHW output format
intermediates = [y.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
if intermediates_only:
return intermediates
# NOTE not supporting return of class tokens
x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.cls_attn_blocks = nn.ModuleList() # prune token blocks with head
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
B = x.shape[0]
# x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)
x, (Hp, Wp) = self.patch_embed(x)
if self.pos_embed is not None:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, Hp, Wp)
else:
x = blk(x, Hp, Wp)
x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1)
for blk in self.cls_attn_blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x)
else:
x = blk(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict:
state_dict = state_dict['model']
# For consistency with timm's transformer models while being compatible with official weights source we rename
# pos_embeder to pos_embed. Also account for use_pos_embed == False
use_pos_embed = getattr(model, 'pos_embed', None) is not None
pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]
for k in pos_embed_keys:
if use_pos_embed:
state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)
else:
del state_dict[k]
# timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors
# for all tokens, just the class token. To use official weights source we must split qkv into q, k, v
if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():
num_ca_blocks = len(model.cls_attn_blocks)
for i in range(num_ca_blocks):
qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')
qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]
qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)
if qkv_bias is not None:
qkv_bias = qkv_bias.reshape(3, -1)
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]
return state_dict
def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
model = build_model_with_cfg(
Xcit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Patch size 16
'xcit_nano_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'),
'xcit_nano_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'),
'xcit_nano_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'),
'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'),
'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'),
'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'),
'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'),
'xcit_small_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'),
'xcit_small_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'),
'xcit_small_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'),
'xcit_small_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_medium_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'),
'xcit_medium_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'),
'xcit_medium_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_large_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'),
'xcit_large_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'),
'xcit_large_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)),
# Patch size 8
'xcit_nano_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'),
'xcit_nano_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'),
'xcit_nano_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'),
'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'),
'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'),
'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'),
'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'),
'xcit_small_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'),
'xcit_small_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'),
'xcit_small_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'),
'xcit_small_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_medium_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'),
'xcit_medium_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'),
'xcit_medium_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_large_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'),
'xcit_large_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'),
'xcit_large_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)),
})
@register_model
def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384)
model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
# Patch size 8x8 models
@register_model
def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
# Patch size 16
'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k',
'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k',
'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k',
'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k',
'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k',
'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k',
'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k',
'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k',
'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k',
'xcit_small_24_p16_384_dist': 'xcit_small_24_p16_384.fb_dist_in1k',
'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k',
'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k',
'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k',
'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k',
# Patch size 8
'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k',
'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k',
'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k',
'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k',
'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k',
'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k',
'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k',
'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k',
'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k',
'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k',
'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k',
'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k',
'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k',
'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k',
})
| pytorch-image-models/timm/models/xcit.py/0 | {
"file_path": "pytorch-image-models/timm/models/xcit.py",
"repo_id": "pytorch-image-models",
"token_count": 20451
} | 211 |
""" Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
from itertools import islice
from typing import Optional, Callable, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
from timm.models import group_parameters
from .adabelief import AdaBelief
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .adan import Adan
from .lamb import Lamb
from .lars import Lars
from .lion import Lion
from .lookahead import Lookahead
from .madgrad import MADGRAD
from .nadam import Nadam
from .nadamw import NAdamW
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
from .sgdw import SGDW
_logger = logging.getLogger(__name__)
# optimizers to default to multi-tensor
_DEFAULT_FOREACH = {
'lion',
}
def param_groups_weight_decay(
model: nn.Module,
weight_decay=1e-5,
no_weight_decay_list=()
):
no_weight_decay_list = set(no_weight_decay_list)
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def _group(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def _layer_map(model, layers_per_group=12, num_groups=None):
def _in_head(n, hp):
if not hp:
return True
elif isinstance(hp, (tuple, list)):
return any([n.startswith(hpi) for hpi in hp])
else:
return n.startswith(hp)
head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None)
names_trunk = []
names_head = []
for n, _ in model.named_parameters():
names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n)
# group non-head layers
num_trunk_layers = len(names_trunk)
if num_groups is not None:
layers_per_group = -(num_trunk_layers // -num_groups)
names_trunk = list(_group(names_trunk, layers_per_group))
num_trunk_groups = len(names_trunk)
layer_map = {n: i for i, l in enumerate(names_trunk) for n in l}
layer_map.update({n: num_trunk_groups for n in names_head})
return layer_map
def param_groups_layer_decay(
model: nn.Module,
weight_decay: float = 0.05,
no_weight_decay_list: Tuple[str] = (),
layer_decay: float = .75,
end_layer_decay: Optional[float] = None,
verbose: bool = False,
):
"""
Parameter groups for layer-wise lr decay & weight decay
Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
no_weight_decay_list = set(no_weight_decay_list)
param_group_names = {} # NOTE for debugging
param_groups = {}
if hasattr(model, 'group_matcher'):
# FIXME interface needs more work
layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True)
else:
# fallback
layer_map = _layer_map(model)
num_layers = max(layer_map.values()) + 1
layer_max = num_layers - 1
layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers))
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# no decay: all 1D parameters and model specific ones
if param.ndim == 1 or name in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.
else:
g_decay = "decay"
this_decay = weight_decay
layer_id = layer_map.get(name, layer_max)
group_name = "layer_%d_%s" % (layer_id, g_decay)
if group_name not in param_groups:
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"param_names": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["param_names"].append(name)
param_groups[group_name]["params"].append(param)
if verbose:
import json
_logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
return list(param_groups.values())
def optimizer_kwargs(cfg):
""" cfg/argparse to kwargs helper
Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
"""
kwargs = dict(
opt=cfg.opt,
lr=cfg.lr,
weight_decay=cfg.weight_decay,
momentum=cfg.momentum,
)
if getattr(cfg, 'opt_eps', None) is not None:
kwargs['eps'] = cfg.opt_eps
if getattr(cfg, 'opt_betas', None) is not None:
kwargs['betas'] = cfg.opt_betas
if getattr(cfg, 'layer_decay', None) is not None:
kwargs['layer_decay'] = cfg.layer_decay
if getattr(cfg, 'opt_args', None) is not None:
kwargs.update(cfg.opt_args)
if getattr(cfg, 'opt_foreach', None) is not None:
kwargs['foreach'] = cfg.opt_foreach
return kwargs
def create_optimizer(args, model, filter_bias_and_bn=True):
""" Legacy optimizer factory for backwards compatibility.
NOTE: Use create_optimizer_v2 for new code.
"""
return create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
filter_bias_and_bn=filter_bias_and_bn,
)
def create_optimizer_v2(
model_or_params,
opt: str = 'sgd',
lr: Optional[float] = None,
weight_decay: float = 0.,
momentum: float = 0.9,
foreach: Optional[bool] = None,
filter_bias_and_bn: bool = True,
layer_decay: Optional[float] = None,
param_group_fn: Optional[Callable] = None,
**kwargs,
):
""" Create an optimizer.
TODO currently the model is passed in and all parameters are selected for optimization.
For more general use an interface that allows selection of parameters to optimize and lr groups, one of:
* a filter fn interface that further breaks params into groups in a weight_decay compatible fashion
* expose the parameters interface and leave it up to caller
Args:
model_or_params (nn.Module): model containing parameters to optimize
opt: name of optimizer to create
lr: initial learning rate
weight_decay: weight decay to apply in optimizer
momentum: momentum for momentum based optimizers (others may use betas via kwargs)
foreach: Enable / disable foreach (multi-tensor) operation if True / False. Choose safe default if None
filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay
**kwargs: extra optimizer specific kwargs to pass through
Returns:
Optimizer
"""
if isinstance(model_or_params, nn.Module):
# a model was passed in, extract parameters and add weight decays to appropriate layers
no_weight_decay = {}
if hasattr(model_or_params, 'no_weight_decay'):
no_weight_decay = model_or_params.no_weight_decay()
if param_group_fn:
parameters = param_group_fn(model_or_params)
elif layer_decay is not None:
parameters = param_groups_layer_decay(
model_or_params,
weight_decay=weight_decay,
layer_decay=layer_decay,
no_weight_decay_list=no_weight_decay,
)
weight_decay = 0.
elif weight_decay and filter_bias_and_bn:
parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay)
weight_decay = 0.
else:
parameters = model_or_params.parameters()
else:
# iterable of parameters or param groups passed in
parameters = model_or_params
opt_lower = opt.lower()
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower.startswith('fused'):
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
if opt_lower.startswith('bnb'):
try:
import bitsandbytes as bnb
has_bnb = True
except ImportError:
has_bnb = False
assert has_bnb and torch.cuda.is_available(), 'bitsandbytes and CUDA required for bnb optimizers'
opt_args = dict(weight_decay=weight_decay, **kwargs)
if lr is not None:
opt_args.setdefault('lr', lr)
if foreach is None:
if opt in _DEFAULT_FOREACH:
opt_args.setdefault('foreach', True)
else:
opt_args['foreach'] = foreach
# basic SGD & related
if opt_lower == 'sgd' or opt_lower == 'nesterov':
# NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'sgdw' or opt_lower == 'nesterovw':
# NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons
opt_args.pop('eps', None)
optimizer = SGDW(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentumw':
opt_args.pop('eps', None)
optimizer = SGDW(parameters, momentum=momentum, nesterov=False, **opt_args)
# adaptive
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'nadam':
try:
# NOTE PyTorch >= 1.10 should have native NAdam
optimizer = optim.Nadam(parameters, **opt_args)
except AttributeError:
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'nadamw':
optimizer = NAdamW(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamax':
optimizer = optim.Adamax(parameters, **opt_args)
elif opt_lower == 'adabelief':
optimizer = AdaBelief(parameters, rectify=False, **opt_args)
elif opt_lower == 'radabelief':
optimizer = AdaBelief(parameters, rectify=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adagrad':
opt_args.setdefault('eps', 1e-8)
optimizer = optim.Adagrad(parameters, **opt_args)
elif opt_lower == 'adafactor':
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adanp':
optimizer = Adan(parameters, no_prox=False, **opt_args)
elif opt_lower == 'adanw':
optimizer = Adan(parameters, no_prox=True, **opt_args)
elif opt_lower == 'lamb':
optimizer = Lamb(parameters, **opt_args)
elif opt_lower == 'lambc':
optimizer = Lamb(parameters, trust_clip=True, **opt_args)
elif opt_lower == 'larc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args)
elif opt_lower == 'lars':
optimizer = Lars(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'nlarc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args)
elif opt_lower == 'nlars':
optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'madgrad':
optimizer = MADGRAD(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'madgradw':
optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args)
elif opt_lower == 'novograd' or opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif opt_lower == 'lion':
opt_args.pop('eps', None)
optimizer = Lion(parameters, **opt_args)
# second order
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
# NVIDIA fused optimizers, require APEX to be installed
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
# bitsandbytes optimizers, require bitsandbytes to be installed
elif opt_lower == 'bnbsgd':
opt_args.pop('eps', None)
optimizer = bnb.optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'bnbsgd8bit':
opt_args.pop('eps', None)
optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'bnbmomentum':
opt_args.pop('eps', None)
optimizer = bnb.optim.SGD(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'bnbmomentum8bit':
opt_args.pop('eps', None)
optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'bnbadam':
optimizer = bnb.optim.Adam(parameters, **opt_args)
elif opt_lower == 'bnbadam8bit':
optimizer = bnb.optim.Adam8bit(parameters, **opt_args)
elif opt_lower == 'bnbadamw':
optimizer = bnb.optim.AdamW(parameters, **opt_args)
elif opt_lower == 'bnbadamw8bit':
optimizer = bnb.optim.AdamW8bit(parameters, **opt_args)
elif opt_lower == 'bnblamb':
optimizer = bnb.optim.LAMB(parameters, **opt_args)
elif opt_lower == 'bnblamb8bit':
optimizer = bnb.optim.LAMB8bit(parameters, **opt_args)
elif opt_lower == 'bnblars':
optimizer = bnb.optim.LARS(parameters, **opt_args)
elif opt_lower == 'bnblarsb8bit':
optimizer = bnb.optim.LAMB8bit(parameters, **opt_args)
elif opt_lower == 'bnblion':
optimizer = bnb.optim.Lion(parameters, **opt_args)
elif opt_lower == 'bnblion8bit':
optimizer = bnb.optim.Lion8bit(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| pytorch-image-models/timm/optim/optim_factory.py/0 | {
"file_path": "pytorch-image-models/timm/optim/optim_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 6927
} | 212 |
import fnmatch
import re
from collections import OrderedDict
from typing import Union, Optional, List
import torch
class AttentionExtract(torch.nn.Module):
# defaults should cover a significant number of timm models with attention maps.
default_node_names = ['*attn.softmax']
default_module_names = ['*attn_drop']
def __init__(
self,
model: Union[torch.nn.Module],
names: Optional[List[str]] = None,
mode: str = 'eval',
method: str = 'fx',
hook_type: str = 'forward',
use_regex: bool = False,
):
""" Extract attention maps (or other activations) from a model by name.
Args:
model: Instantiated model to extract from.
names: List of concrete or wildcard names to extract. Names are nodes for fx and modules for hooks.
mode: 'train' or 'eval' model mode.
method: 'fx' or 'hook' extraction method.
hook_type: 'forward' or 'forward_pre' hooks used.
use_regex: Use regex instead of fnmatch
"""
super().__init__()
assert mode in ('train', 'eval')
if mode == 'train':
model = model.train()
else:
model = model.eval()
assert method in ('fx', 'hook')
if method == 'fx':
# names are activation node names
from timm.models._features_fx import get_graph_node_names, GraphExtractNet
node_names = get_graph_node_names(model)[0 if mode == 'train' else 1]
names = names or self.default_node_names
if use_regex:
regexes = [re.compile(r) for r in names]
matched = [g for g in node_names if any([r.match(g) for r in regexes])]
else:
matched = [g for g in node_names if any([fnmatch.fnmatch(g, n) for n in names])]
if not matched:
raise RuntimeError(f'No node names found matching {names}.')
self.model = GraphExtractNet(model, matched, return_dict=True)
self.hooks = None
else:
# names are module names
assert hook_type in ('forward', 'forward_pre')
from timm.models._features import FeatureHooks
module_names = [n for n, m in model.named_modules()]
names = names or self.default_module_names
if use_regex:
regexes = [re.compile(r) for r in names]
matched = [m for m in module_names if any([r.match(m) for r in regexes])]
else:
matched = [m for m in module_names if any([fnmatch.fnmatch(m, n) for n in names])]
if not matched:
raise RuntimeError(f'No module names found matching {names}.')
self.model = model
self.hooks = FeatureHooks(matched, model.named_modules(), default_hook_type=hook_type)
self.names = matched
self.mode = mode
self.method = method
def forward(self, x):
if self.hooks is not None:
self.model(x)
output = self.hooks.get_output(device=x.device)
else:
output = self.model(x)
return output
| pytorch-image-models/timm/utils/attention_extract.py/0 | {
"file_path": "pytorch-image-models/timm/utils/attention_extract.py",
"repo_id": "pytorch-image-models",
"token_count": 1467
} | 213 |
#!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import importlib
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
from functools import partial
import torch
import torch.nn as nn
import torchvision.utils
import yaml
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm import utils
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.layers import convert_splitbn_model, convert_sync_batchnorm, set_fast_norm
from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, BinaryCrossEntropy, LabelSmoothingCrossEntropy
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, model_parameters
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler_v2, scheduler_kwargs
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
group = parser.add_argument_group('Dataset parameters')
# Keep this argument outside the dataset group because it is positional.
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (positional is *deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
group.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
group.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--train-num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in train split, for IterableDatasets.')
parser.add_argument('--val-num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in validation split, for IterableDatasets.')
group.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
group.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
group.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
group.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
group.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
# Model parameters
group = parser.add_argument_group('Model parameters')
group.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50")')
group.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
group.add_argument('--pretrained-path', default=None, type=str,
help='Load this checkpoint as if they were the pretrained weights (with adaptation).')
group.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Load this checkpoint into model after initialization (default: none)')
group.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
group.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
group.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
group.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
group.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image size (default: None => model default)')
group.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
group.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
group.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
group.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
group.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of dataset')
group.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
group.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='Input batch size for training (default: 128)')
group.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='Validation batch size override (default: None)')
group.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
group.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
group.add_argument('--grad-accum-steps', type=int, default=1, metavar='N',
help='The number of steps to accumulate gradients (default: 1)')
group.add_argument('--grad-checkpointing', action='store_true', default=False,
help='Enable gradient checkpointing through model blocks/stages')
group.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
group.add_argument('--model-kwargs', nargs='*', default={}, action=utils.ParseKwargs)
group.add_argument('--head-init-scale', default=None, type=float,
help='Head initialization scale')
group.add_argument('--head-init-bias', default=None, type=float,
help='Head initialization bias value')
# scripting / codegen
scripting_group = group.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
# Device & distributed
group = parser.add_argument_group('Device parameters')
group.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
group.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
group.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
group.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
group.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
group.add_argument('--synchronize-step', action='store_true', default=False,
help='torch.cuda.synchronize() end of each step')
group.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--device-modules', default=None, type=str, nargs='+',
help="Python imports for device backend modules.")
# Optimizer parameters
group = parser.add_argument_group('Optimizer parameters')
group.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd")')
group.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
group.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
group.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
group.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
group.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
group.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
group.add_argument('--layer-decay', type=float, default=None,
help='layer-wise learning rate decay (default: None)')
group.add_argument('--opt-kwargs', nargs='*', default={}, action=utils.ParseKwargs)
# Learning rate schedule parameters
group = parser.add_argument_group('Learning rate schedule parameters')
group.add_argument('--sched', type=str, default='cosine', metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
group.add_argument('--sched-on-updates', action='store_true', default=False,
help='Apply LR scheduler step on update instead of epoch end.')
group.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate, overrides lr-base if set (default: None)')
group.add_argument('--lr-base', type=float, default=0.1, metavar='LR',
help='base learning rate: lr = lr_base * global_batch_size / base_size')
group.add_argument('--lr-base-size', type=int, default=256, metavar='DIV',
help='base learning rate batch size (divisor, default: 256).')
group.add_argument('--lr-base-scale', type=str, default='', metavar='SCALE',
help='base learning rate vs batch_size scaling ("linear", "sqrt", based on opt if empty)')
group.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
group.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
group.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
group.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
group.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
group.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
group.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
group.add_argument('--warmup-lr', type=float, default=1e-5, metavar='LR',
help='warmup learning rate (default: 1e-5)')
group.add_argument('--min-lr', type=float, default=0, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (default: 0)')
group.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
group.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
group.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
group.add_argument('--decay-milestones', default=[90, 180, 270], type=int, nargs='+', metavar="MILESTONES",
help='list of decay epoch indices for multistep lr. must be increasing')
group.add_argument('--decay-epochs', type=float, default=90, metavar='N',
help='epoch interval to decay LR')
group.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
group.add_argument('--warmup-prefix', action='store_true', default=False,
help='Exclude warmup period from decay schedule.'),
group.add_argument('--cooldown-epochs', type=int, default=0, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
group.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10)')
group.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
group = parser.add_argument_group('Augmentation and regularization parameters')
group.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
group.add_argument('--train-crop-mode', type=str, default=None,
help='Crop-mode in train'),
group.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
group.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
group.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
group.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
group.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
group.add_argument('--color-jitter-prob', type=float, default=None, metavar='PCT',
help='Probability of applying any color jitter.')
group.add_argument('--grayscale-prob', type=float, default=None, metavar='PCT',
help='Probability of applying random grayscale conversion.')
group.add_argument('--gaussian-blur-prob', type=float, default=None, metavar='PCT',
help='Probability of applying gaussian blur.')
group.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
group.add_argument('--aug-repeats', type=float, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
group.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
group.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
group.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
group.add_argument('--bce-sum', action='store_true', default=False,
help='Sum over classes when using BCE loss.')
group.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled).')
group.add_argument('--bce-pos-weight', type=float, default=None,
help='Positive weighting for BCE loss.')
group.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
group.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
group.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
group.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
group.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
group.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
group.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
group.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
group.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
group.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
group.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
group.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
group.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
group.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
group.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
group.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
group.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
group = parser.add_argument_group('Batch norm parameters', 'Only works with gen_efficientnet based models currently.')
group.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
group.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
group.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
group.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
group.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
group = parser.add_argument_group('Model exponential moving average parameters')
group.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights.')
group.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
group.add_argument('--model-ema-decay', type=float, default=0.9998,
help='Decay factor for model weights moving average (default: 0.9998)')
group.add_argument('--model-ema-warmup', action='store_true',
help='Enable warmup for model EMA decay.')
# Misc
group = parser.add_argument_group('Miscellaneous parameters')
group.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
group.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
group.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
group.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
group.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
group.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
group.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
group.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
group.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
group.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
group.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
group.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
group.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
group.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
group.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
utils.setup_default_logging()
args, args_text = _parse_args()
if args.device_modules:
for module in args.device_modules:
importlib.import_module(module)
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
args.prefetcher = not args.no_prefetcher
args.grad_accum_steps = max(1, args.grad_accum_steps)
device = utils.init_distributed_device(args)
if args.distributed:
_logger.info(
'Training in distributed mode with multiple processes, 1 device per process.'
f'Process {args.rank}, total {args.world_size}, device {args.device}.')
else:
_logger.info(f'Training with a single process on 1 device ({args.device}).')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_dtype = torch.float16
if args.amp:
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
use_amp = 'apex'
assert args.amp_dtype == 'float16'
else:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
use_amp = 'native'
assert args.amp_dtype in ('float16', 'bfloat16')
if args.amp_dtype == 'bfloat16':
amp_dtype = torch.bfloat16
utils.random_seed(args.seed, args.rank)
if args.fuser:
utils.set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
factory_kwargs = {}
if args.pretrained_path:
# merge with pretrained_cfg of model, 'file' has priority over 'url' and 'hf_hub'.
factory_kwargs['pretrained_cfg_overlay'] = dict(
file=args.pretrained_path,
num_classes=-1, # force head adaptation
)
model = create_model(
args.model,
pretrained=args.pretrained,
in_chans=in_chans,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
**factory_kwargs,
**args.model_kwargs,
)
if args.head_init_scale is not None:
with torch.no_grad():
model.get_classifier().weight.mul_(args.head_init_scale)
model.get_classifier().bias.mul_(args.head_init_scale)
if args.head_init_bias is not None:
nn.init.constant_(model.get_classifier().bias, args.head_init_bias)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.grad_checkpointing:
model.set_grad_checkpointing(enable=True)
if utils.is_primary(args):
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=utils.is_primary(args))
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.to(device=device)
if args.channels_last:
model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
args.dist_bn = '' # disable dist_bn when sync BN active
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN used with Apex AMP
# WARNING this won't currently work with models using BatchNormAct2d
model = convert_syncbn_model(model)
else:
model = convert_sync_batchnorm(model)
if utils.is_primary(args):
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not args.torchcompile
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
if not args.lr:
global_batch_size = args.batch_size * args.world_size * args.grad_accum_steps
batch_ratio = global_batch_size / args.lr_base_size
if not args.lr_base_scale:
on = args.opt.lower()
args.lr_base_scale = 'sqrt' if any([o in on for o in ('ada', 'lamb')]) else 'linear'
if args.lr_base_scale == 'sqrt':
batch_ratio = batch_ratio ** 0.5
args.lr = args.lr_base * batch_ratio
if utils.is_primary(args):
_logger.info(
f'Learning rate ({args.lr}) calculated from base learning rate ({args.lr_base}) '
f'and effective global batch size ({global_batch_size}) with {args.lr_base_scale} scaling.')
optimizer = create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
**args.opt_kwargs,
)
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
assert device.type == 'cuda'
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if utils.is_primary(args):
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
try:
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
except (AttributeError, TypeError):
# fallback to CUDA only AMP for PyTorch < 1.10
assert device.type == 'cuda'
amp_autocast = torch.cuda.amp.autocast
if device.type == 'cuda' and amp_dtype == torch.float16:
# loss scaler only used for float16 (half) dtype, bfloat16 does not need it
loss_scaler = NativeScaler()
if utils.is_primary(args):
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if utils.is_primary(args):
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model,
args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=utils.is_primary(args),
)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before DDP wrapper
model_ema = utils.ModelEmaV3(
model,
decay=args.model_ema_decay,
use_warmup=args.model_ema_warmup,
device='cpu' if args.model_ema_force_cpu else None,
)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
if args.torchcompile:
model_ema = torch.compile(model_ema, backend=args.torchcompile)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if utils.is_primary(args):
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if utils.is_primary(args):
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[device], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
if args.torchcompile:
# torch compile should be done after DDP
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
model = torch.compile(model, backend=args.torchcompile)
# create the train and eval datasets
if args.data and not args.data_dir:
args.data_dir = args.data
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset_train = create_dataset(
args.dataset,
root=args.data_dir,
split=args.train_split,
is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
seed=args.seed,
repeats=args.epoch_repeats,
input_img_mode=input_img_mode,
input_key=args.input_key,
target_key=args.target_key,
num_samples=args.train_num_samples,
)
if args.val_split:
dataset_eval = create_dataset(
args.dataset,
root=args.data_dir,
split=args.val_split,
is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
input_img_mode=input_img_mode,
input_key=args.input_key,
target_key=args.target_key,
num_samples=args.val_num_samples,
)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.num_classes
)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support de-interleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeline
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
train_crop_mode=args.train_crop_mode,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
color_jitter_prob=args.color_jitter_prob,
grayscale_prob=args.grayscale_prob,
gaussian_blur_prob=args.gaussian_blur_prob,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
device=device,
use_prefetcher=args.prefetcher,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = None
if args.val_split:
eval_workers = args.workers
if args.distributed and ('tfds' in args.dataset or 'wds' in args.dataset):
# FIXME reduces validation padding issues when using TFDS, WDS w/ workers and distributed training
eval_workers = min(2, args.workers)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=eval_workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
device=device,
use_prefetcher=args.prefetcher,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(
target_threshold=args.bce_target_thresh,
sum_classes=args.bce_sum,
pos_weight=args.bce_pos_weight,
)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(
smoothing=args.smoothing,
target_threshold=args.bce_target_thresh,
sum_classes=args.bce_sum,
pos_weight=args.bce_pos_weight,
)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.to(device=device)
validate_loss_fn = nn.CrossEntropyLoss().to(device=device)
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric if loader_eval is not None else 'loss'
decreasing_metric = eval_metric == 'loss'
best_metric = None
best_epoch = None
saver = None
output_dir = None
if utils.is_primary(args):
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = utils.get_outdir(args.output if args.output else './output/train', exp_name)
saver = utils.CheckpointSaver(
model=model,
optimizer=optimizer,
args=args,
model_ema=model_ema,
amp_scaler=loss_scaler,
checkpoint_dir=output_dir,
recovery_dir=output_dir,
decreasing=decreasing_metric,
max_history=args.checkpoint_hist
)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
if utils.is_primary(args) and args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning(
"You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
# setup learning rate schedule and starting epoch
updates_per_epoch = (len(loader_train) + args.grad_accum_steps - 1) // args.grad_accum_steps
lr_scheduler, num_epochs = create_scheduler_v2(
optimizer,
**scheduler_kwargs(args, decreasing_metric=decreasing_metric),
updates_per_epoch=updates_per_epoch,
)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
if args.sched_on_updates:
lr_scheduler.step_update(start_epoch * updates_per_epoch)
else:
lr_scheduler.step(start_epoch)
if utils.is_primary(args):
_logger.info(
f'Scheduled epochs: {num_epochs}. LR stepped per {"epoch" if lr_scheduler.t_in_epochs else "update"}.')
results = []
try:
for epoch in range(start_epoch, num_epochs):
if hasattr(dataset_train, 'set_epoch'):
dataset_train.set_epoch(epoch)
elif args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch,
model,
loader_train,
optimizer,
train_loss_fn,
args,
lr_scheduler=lr_scheduler,
saver=saver,
output_dir=output_dir,
amp_autocast=amp_autocast,
loss_scaler=loss_scaler,
model_ema=model_ema,
mixup_fn=mixup_fn,
num_updates_total=num_epochs * updates_per_epoch,
)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if utils.is_primary(args):
_logger.info("Distributing BatchNorm running means and vars")
utils.distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
if loader_eval is not None:
eval_metrics = validate(
model,
loader_eval,
validate_loss_fn,
args,
device=device,
amp_autocast=amp_autocast,
)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
utils.distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema,
loader_eval,
validate_loss_fn,
args,
device=device,
amp_autocast=amp_autocast,
log_suffix=' (EMA)',
)
eval_metrics = ema_eval_metrics
else:
eval_metrics = None
if output_dir is not None:
lrs = [param_group['lr'] for param_group in optimizer.param_groups]
utils.update_summary(
epoch,
train_metrics,
eval_metrics,
filename=os.path.join(output_dir, 'summary.csv'),
lr=sum(lrs) / len(lrs),
write_header=best_metric is None,
log_wandb=args.log_wandb and has_wandb,
)
if eval_metrics is not None:
latest_metric = eval_metrics[eval_metric]
else:
latest_metric = train_metrics[eval_metric]
if saver is not None:
# save proper checkpoint with eval metric
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=latest_metric)
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, latest_metric)
results.append({
'epoch': epoch,
'train': train_metrics,
'validation': eval_metrics,
})
except KeyboardInterrupt:
pass
results = {'all': results}
if best_metric is not None:
results['best'] = results['all'][best_epoch - start_epoch]
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
print(f'--result\n{json.dumps(results, indent=4)}')
def train_one_epoch(
epoch,
model,
loader,
optimizer,
loss_fn,
args,
device=torch.device('cuda'),
lr_scheduler=None,
saver=None,
output_dir=None,
amp_autocast=suppress,
loss_scaler=None,
model_ema=None,
mixup_fn=None,
num_updates_total=None,
):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
has_no_sync = hasattr(model, "no_sync")
update_time_m = utils.AverageMeter()
data_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
model.train()
accum_steps = args.grad_accum_steps
last_accum_steps = len(loader) % accum_steps
updates_per_epoch = (len(loader) + accum_steps - 1) // accum_steps
num_updates = epoch * updates_per_epoch
last_batch_idx = len(loader) - 1
last_batch_idx_to_accum = len(loader) - last_accum_steps
data_start_time = update_start_time = time.time()
optimizer.zero_grad()
update_sample_count = 0
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_batch_idx
need_update = last_batch or (batch_idx + 1) % accum_steps == 0
update_idx = batch_idx // accum_steps
if batch_idx >= last_batch_idx_to_accum:
accum_steps = last_accum_steps
if not args.prefetcher:
input, target = input.to(device), target.to(device)
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# multiply by accum steps to get equivalent for full update
data_time_m.update(accum_steps * (time.time() - data_start_time))
def _forward():
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if loss_scaler is not None:
loss_scaler(
_loss,
optimizer,
clip_grad=args.clip_grad,
clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order,
need_update=need_update,
)
else:
_loss.backward(create_graph=second_order)
if need_update:
if args.clip_grad is not None:
utils.dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad,
mode=args.clip_mode,
)
optimizer.step()
if has_no_sync and not need_update:
with model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
if not args.distributed:
losses_m.update(loss.item() * accum_steps, input.size(0))
update_sample_count += input.size(0)
if not need_update:
data_start_time = time.time()
continue
num_updates += 1
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model, step=num_updates)
if args.synchronize_step and device.type == 'cuda':
torch.cuda.synchronize()
time_now = time.time()
update_time_m.update(time.time() - update_start_time)
update_start_time = time_now
if update_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item() * accum_steps, input.size(0))
update_sample_count *= args.world_size
if utils.is_primary(args):
_logger.info(
f'Train: {epoch} [{update_idx:>4d}/{updates_per_epoch} '
f'({100. * (update_idx + 1) / updates_per_epoch:>3.0f}%)] '
f'Loss: {losses_m.val:#.3g} ({losses_m.avg:#.3g}) '
f'Time: {update_time_m.val:.3f}s, {update_sample_count / update_time_m.val:>7.2f}/s '
f'({update_time_m.avg:.3f}s, {update_sample_count / update_time_m.avg:>7.2f}/s) '
f'LR: {lr:.3e} '
f'Data: {data_time_m.val:.3f} ({data_time_m.avg:.3f})'
)
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True
)
if saver is not None and args.recovery_interval and (
(update_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=update_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
update_sample_count = 0
data_start_time = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(
model,
loader,
loss_fn,
args,
device=torch.device('cuda'),
amp_autocast=suppress,
log_suffix=''
):
batch_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
top1_m = utils.AverageMeter()
top5_m = utils.AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.to(device)
target = target.to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
acc1 = utils.reduce_tensor(acc1, args.world_size)
acc5 = utils.reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
if device.type == 'cuda':
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if utils.is_primary(args) and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
f'{log_name}: [{batch_idx:>4d}/{last_idx}] '
f'Time: {batch_time_m.val:.3f} ({batch_time_m.avg:.3f}) '
f'Loss: {losses_m.val:>7.3f} ({losses_m.avg:>6.3f}) '
f'Acc@1: {top1_m.val:>7.3f} ({top1_m.avg:>7.3f}) '
f'Acc@5: {top5_m.val:>7.3f} ({top5_m.avg:>7.3f})'
)
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| pytorch-image-models/train.py/0 | {
"file_path": "pytorch-image-models/train.py",
"repo_id": "pytorch-image-models",
"token_count": 24460
} | 214 |
# Text Generation Inference - TensorRT-LLM Backend Implementation
## Description
This folder provides the sources of the TensorRT-LLM backend implementation powered by TensorRT-LLM Executor new API
## Simplified Request Sequence
```mermaid
sequenceDiagram
actor User
participant TextGenerationInference.HttpServer
participant TextGenerationInference.TensorRtLlmBackend
participant TextGenerationInference.TensorRtLlmWorkerThread
participant TensorRtLlm.Executor
participant Nvidia.Gpu
User ->> TextGenerationInference.HttpServer: POST /generate
TextGenerationInference.HttpServer ->> TextGenerationInference.TensorRtLlmBackend: Validate and forward inputs & parameters
TextGenerationInference.TensorRtLlmBackend ->> TextGenerationInference.TensorRtLlmWorkerThread: Allocate a new context and spawn a new thread to handle the request
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Submit the request to the In-Flight Batcher
activate Nvidia.Gpu
TensorRtLlm.Executor ->> Nvidia.Gpu: Add the request to the poll for execution
TensorRtLlm.Executor -->> TextGenerationInference.TensorRtLlmWorkerThread: Response with an unique request identifier
rect rgb(10, 92, 54)
loop every 100us
rect rgb(15, 81, 50)
alt Acquire lock to query executor
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Poll request number of new token(s) generated
else There are new generated tokens
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Retrieve newly generated tokens
TensorRtLlm.Executor -->> TextGenerationInference.TensorRtLlmWorkerThread: Return decoded token information and potential error (omitted)
rect rgb(11, 110, 79)
alt Generated token is final
TensorRtLlm.Executor ->> Nvidia.Gpu: Remove request from the scheduler and from the GPU
TextGenerationInference.TensorRtLlmWorkerThread -->> User: Stream the remaining decoded tokens and flush the connection
else Generated token is not final
TextGenerationInference.TensorRtLlmWorkerThread -->> User: Stream token back to the user as they get decoded
end
end
end
end
deactivate Nvidia.Gpu
end
end
```
| text-generation-inference/backends/trtllm/README.md/0 | {
"file_path": "text-generation-inference/backends/trtllm/README.md",
"repo_id": "text-generation-inference",
"token_count": 1019
} | 215 |
use clap::Parser;
use std::collections::HashMap;
use std::path::PathBuf;
use text_generation_backends_trtllm::errors::TensorRtLlmBackendError;
use text_generation_backends_trtllm::TensorRtLlmBackend;
use text_generation_router::server;
use tokenizers::{FromPretrainedParameters, Tokenizer};
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[clap(default_value = "128", long, env)]
max_concurrent_requests: usize,
#[clap(default_value = "2", long, env)]
max_best_of: usize,
#[clap(default_value = "4", long, env)]
max_stop_sequences: usize,
#[clap(default_value = "5", long, env)]
max_top_n_tokens: u32,
#[clap(default_value = "1024", long, env)]
max_input_tokens: usize,
#[clap(default_value = "2048", long, env)]
max_total_tokens: usize,
#[clap(default_value = "4096", long, env)]
max_batch_prefill_tokens: u32,
#[clap(long, env)]
max_batch_total_tokens: Option<u32>,
#[clap(default_value = "0.0.0.0", long, env)]
hostname: String,
#[clap(default_value = "3000", long, short, env)]
port: u16,
#[clap(long, env, required = true)]
tokenizer_name: String,
#[clap(long, env)]
tokenizer_config_path: Option<String>,
#[clap(long, env)]
revision: Option<String>,
#[clap(long, env)]
model_id: String,
#[clap(default_value = "2", long, env)]
validation_workers: usize,
#[clap(long, env)]
json_output: bool,
#[clap(long, env)]
otlp_endpoint: Option<String>,
#[clap(default_value = "text-generation-inference.router", long, env)]
otlp_service_name: String,
#[clap(long, env)]
cors_allow_origin: Option<Vec<String>>,
#[clap(long, env, default_value_t = false)]
messages_api_enabled: bool,
#[clap(default_value = "4", long, env)]
max_client_batch_size: usize,
#[clap(long, env)]
auth_token: Option<String>,
#[clap(long, env, help = "Path to the TensorRT-LLM Orchestrator worker")]
executor_worker: PathBuf,
}
#[tokio::main]
async fn main() -> Result<(), TensorRtLlmBackendError> {
// Get args
let args = Args::parse();
// Pattern match configuration
let Args {
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
max_batch_prefill_tokens,
max_batch_total_tokens,
hostname,
port,
tokenizer_name,
tokenizer_config_path,
revision,
model_id,
validation_workers,
json_output,
otlp_endpoint,
otlp_service_name,
cors_allow_origin,
messages_api_enabled,
max_client_batch_size,
auth_token,
executor_worker,
} = args;
// Launch Tokio runtime
text_generation_router::logging::init_logging(otlp_endpoint, otlp_service_name, json_output);
// Validate args
if max_input_tokens >= max_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(
"`max_input_tokens` must be < `max_total_tokens`".to_string(),
));
}
if max_input_tokens as u32 > max_batch_prefill_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_tokens`. Given: {max_batch_prefill_tokens} and {max_input_tokens}")));
}
if validation_workers == 0 {
return Err(TensorRtLlmBackendError::ArgumentValidation(
"`validation_workers` must be > 0".to_string(),
));
}
if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
if max_batch_prefill_tokens > *max_batch_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
}
if max_total_tokens as u32 > *max_batch_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
}
}
if !executor_worker.exists() {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!(
"`executor_work` specified path doesn't exists: {}",
executor_worker.display()
)));
}
// Run server
let tokenizer = Tokenizer::from_pretrained(
tokenizer_name.clone(),
Some(FromPretrainedParameters {
revision: revision.clone().unwrap_or(String::from("main")),
user_agent: HashMap::new(),
auth_token,
}),
)
.map_err(|e| TensorRtLlmBackendError::Tokenizer(e.to_string()))?;
let backend = TensorRtLlmBackend::new(tokenizer, model_id, executor_worker)?;
server::run(
backend,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
validation_workers,
None,
tokenizer_name,
tokenizer_config_path,
revision,
hostname,
port,
cors_allow_origin,
false,
None,
None,
messages_api_enabled,
true,
max_client_batch_size,
false,
false,
)
.await?;
Ok(())
}
| text-generation-inference/backends/trtllm/src/main.rs/0 | {
"file_path": "text-generation-inference/backends/trtllm/src/main.rs",
"repo_id": "text-generation-inference",
"token_count": 2552
} | 216 |
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs
use crate::generation::{Decode, Message, Prefill};
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use text_generation_client::ClientError;
use tokio::sync::mpsc;
use tui::backend::Backend;
use tui::layout::{Alignment, Constraint, Direction, Layout};
use tui::style::{Color, Modifier, Style};
use tui::text::{Line, Span};
use tui::widgets::{
Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs,
};
use tui::{symbols, Frame};
/// TUI powered App
pub(crate) struct App {
pub(crate) running: bool,
pub(crate) data: Data,
completed_runs: Vec<usize>,
completed_batch: usize,
current_batch: usize,
current_tab: usize,
touched_tab: bool,
zoom: bool,
is_error: bool,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
receiver: mpsc::Receiver<Result<Message, ClientError>>,
}
impl App {
pub(crate) fn new(
receiver: mpsc::Receiver<Result<Message, ClientError>>,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
batch_size: Vec<u32>,
) -> Self {
let current_tab = 0;
let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect();
let completed_batch = 0;
let current_batch = 0;
let is_error = false;
let data = Data::new(n_run, batch_size);
Self {
running: true,
data,
completed_runs,
completed_batch,
current_batch,
current_tab,
touched_tab: false,
zoom: false,
is_error,
tokenizer_name,
sequence_length,
decode_length,
n_run,
receiver,
}
}
/// Handle crossterm key events
pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) {
match key_event {
// Increase and wrap tab
KeyEvent {
code: KeyCode::Right,
..
}
| KeyEvent {
code: KeyCode::Tab, ..
} => {
self.touched_tab = true;
self.current_tab = (self.current_tab + 1) % self.data.batch_size.len();
}
// Decrease and wrap tab
KeyEvent {
code: KeyCode::Left,
..
} => {
self.touched_tab = true;
if self.current_tab > 0 {
self.current_tab -= 1;
} else {
self.current_tab = self.data.batch_size.len() - 1;
}
}
// Zoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('+'),
..
} => {
self.zoom = true;
}
// Unzoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('-'),
..
} => {
self.zoom = false;
}
// Quit
KeyEvent {
code: KeyCode::Char('q'),
..
}
| KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
} => {
self.running = false;
}
_ => (),
}
}
/// Get all pending messages from generation task
pub(crate) fn tick(&mut self) {
while let Ok(message) = self.receiver.try_recv() {
match message {
Ok(message) => match message {
Message::Prefill(step) => self.data.push_prefill(step, self.current_batch),
Message::Decode(step) => self.data.push_decode(step, self.current_batch),
Message::EndRun => {
self.completed_runs[self.current_batch] += 1;
}
Message::EndBatch => {
self.data.end_batch(self.current_batch);
self.completed_batch += 1;
if self.current_batch < self.data.batch_size.len() - 1 {
// Only go to next tab if the user never touched the tab keys
if !self.touched_tab {
self.current_tab += 1;
}
self.current_batch += 1;
}
}
Message::Warmup => {}
},
Err(_) => self.is_error = true,
}
}
}
/// Render frame
pub fn render<B: Backend>(&mut self, f: &mut Frame<'_, B>) {
let batch_progress =
(self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0);
let run_progress =
(self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0);
// Vertical layout
let row5 = Layout::default()
.direction(Direction::Vertical)
.constraints(
[
Constraint::Length(1),
Constraint::Length(3),
Constraint::Length(3),
Constraint::Length(13),
Constraint::Min(10),
]
.as_ref(),
)
.split(f.size());
// Top row horizontal layout
let top = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[2]);
// Mid row horizontal layout
let mid = Layout::default()
.direction(Direction::Horizontal)
.constraints(
[
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
]
.as_ref(),
)
.split(row5[3]);
// Left mid row vertical layout
let prefill_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[0]);
// Right mid row vertical layout
let decode_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[2]);
let decode_text_latency = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(decode_text[0]);
// Bottom row horizontal layout
let bottom = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[4]);
// Title
let title = Block::default()
.borders(Borders::NONE)
.title(format!(
"Model: {} | Sequence Length: {} | Decode Length: {}",
self.tokenizer_name, self.sequence_length, self.decode_length
))
.style(
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::White),
);
f.render_widget(title, row5[0]);
// Helper
let helper = Block::default()
.borders(Borders::NONE)
.title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom")
.title_alignment(Alignment::Right)
.style(Style::default().fg(Color::White));
f.render_widget(helper, row5[0]);
// Batch tabs
let titles = self
.data
.batch_size
.iter()
.map(|b| {
Line::from(vec![Span::styled(
format!("Batch: {b}"),
Style::default().fg(Color::White),
)])
})
.collect();
let tabs = Tabs::new(titles)
.block(Block::default().borders(Borders::ALL).title("Tabs"))
.select(self.current_tab)
.style(Style::default().fg(Color::LightCyan))
.highlight_style(
Style::default()
.add_modifier(Modifier::BOLD)
.bg(Color::Black),
);
f.render_widget(tabs, row5[1]);
// Total progress bar
let color = if self.is_error {
Color::Red
} else {
Color::LightGreen
};
let batch_gauge = progress_gauge(
"Total Progress",
format!("{} / {}", self.completed_batch, self.data.batch_size.len()),
batch_progress,
color,
);
f.render_widget(batch_gauge, top[0]);
// Batch progress Bar
let color = if self.is_error {
Color::Red
} else {
Color::LightBlue
};
let run_gauge = progress_gauge(
"Batch Progress",
format!(
"{} / {}",
self.completed_runs[self.current_batch], self.n_run
),
run_progress,
color,
);
f.render_widget(run_gauge, top[1]);
// Prefill text infos
let prefill_latency_block = latency_paragraph(
&mut self.data.prefill_latencies[self.current_tab],
"Prefill",
);
let prefill_throughput_block =
throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill");
f.render_widget(prefill_latency_block, prefill_text[0]);
f.render_widget(prefill_throughput_block, prefill_text[1]);
// Prefill latency histogram
let histo_width = 7;
let bins = if mid[1].width < 2 {
0
} else {
(mid[1].width as usize - 2) / (histo_width + 1)
}
.max(2);
let histo_data =
latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let prefill_histogram =
latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16);
f.render_widget(prefill_histogram, mid[1]);
// Decode text info
let decode_latency_block = latency_paragraph(
&mut self.data.decode_latencies[self.current_tab],
"Decode Total",
);
let decode_token_latency_block = latency_paragraph(
&mut self.data.decode_token_latencies[self.current_tab],
"Decode Token",
);
let decode_throughput_block =
throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode");
f.render_widget(decode_latency_block, decode_text_latency[0]);
f.render_widget(decode_token_latency_block, decode_text_latency[1]);
f.render_widget(decode_throughput_block, decode_text[1]);
// Decode latency histogram
let histo_data =
latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let decode_histogram =
latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16);
f.render_widget(decode_histogram, mid[3]);
// Prefill latency/throughput chart
let prefill_latency_throughput_chart = latency_throughput_chart(
&self.data.prefill_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Prefill",
);
f.render_widget(prefill_latency_throughput_chart, bottom[0]);
// Decode latency/throughput chart
let decode_latency_throughput_chart = latency_throughput_chart(
&self.data.decode_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Decode",
);
f.render_widget(decode_latency_throughput_chart, bottom[1]);
}
}
/// App internal data struct
pub(crate) struct Data {
pub(crate) batch_size: Vec<u32>,
pub(crate) prefill_latencies: Vec<Vec<f64>>,
pub(crate) prefill_throughputs: Vec<Vec<f64>>,
pub(crate) decode_latencies: Vec<Vec<f64>>,
pub(crate) decode_token_latencies: Vec<Vec<f64>>,
pub(crate) decode_throughputs: Vec<Vec<f64>>,
pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>,
pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>,
}
impl Data {
fn new(n_run: usize, batch_size: Vec<u32>) -> Self {
let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len())
.map(|_| Vec::with_capacity(n_run))
.collect();
let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone();
let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone();
let prefill_batch_latency_throughput: Vec<(f64, f64)> =
Vec::with_capacity(batch_size.len());
let decode_batch_latency_throughput: Vec<(f64, f64)> =
prefill_batch_latency_throughput.clone();
Self {
batch_size,
prefill_latencies,
prefill_throughputs,
decode_latencies,
decode_token_latencies,
decode_throughputs,
prefill_batch_latency_throughput,
decode_batch_latency_throughput,
}
}
fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) {
let latency = prefill.latency.as_micros() as f64 / 1000.0;
self.prefill_latencies[batch_idx].push(latency);
self.prefill_throughputs[batch_idx].push(prefill.throughput);
}
fn push_decode(&mut self, decode: Decode, batch_idx: usize) {
let latency = decode.latency.as_micros() as f64 / 1000.0;
let token_latency = decode.token_latency.as_micros() as f64 / 1000.0;
self.decode_latencies[batch_idx].push(latency);
self.decode_token_latencies[batch_idx].push(token_latency);
self.decode_throughputs[batch_idx].push(decode.throughput);
}
fn end_batch(&mut self, batch_idx: usize) {
self.prefill_batch_latency_throughput.push((
self.prefill_latencies[batch_idx].iter().sum::<f64>()
/ self.prefill_latencies[batch_idx].len() as f64,
self.prefill_throughputs[batch_idx].iter().sum::<f64>()
/ self.prefill_throughputs[batch_idx].len() as f64,
));
self.decode_batch_latency_throughput.push((
self.decode_latencies[batch_idx].iter().sum::<f64>()
/ self.decode_latencies[batch_idx].len() as f64,
self.decode_throughputs[batch_idx].iter().sum::<f64>()
/ self.decode_throughputs[batch_idx].len() as f64,
));
}
}
/// Progress bar
fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge {
Gauge::default()
.block(Block::default().title(title).borders(Borders::ALL))
.gauge_style(Style::default().fg(color))
.label(Span::raw(label))
.ratio(progress)
}
/// Throughput paragraph
fn throughput_paragraph<'a>(throughput: &[f64], name: &'static str) -> Paragraph<'a> {
// Throughput average/high/low texts
let throughput_texts = statis_spans(throughput, "tokens/secs");
// Throughput block
Paragraph::new(throughput_texts).block(
Block::default()
.title(Span::raw(format!("{name} Throughput")))
.borders(Borders::ALL),
)
}
/// Latency paragraph
fn latency_paragraph<'a>(latency: &mut [f64], name: &'static str) -> Paragraph<'a> {
// Latency average/high/low texts
let mut latency_texts = statis_spans(latency, "ms");
// Sort latency for percentiles
float_ord::sort(latency);
let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]);
// Latency p50/p90/p99 texts
let colors = [Color::LightGreen, Color::LightYellow, Color::LightRed];
for (i, (name, value)) in latency_percentiles.iter().enumerate() {
let span = Line::from(vec![Span::styled(
format!("{name}: {value:.2} ms"),
Style::default().fg(colors[i]),
)]);
latency_texts.push(span);
}
Paragraph::new(latency_texts).block(
Block::default()
.title(Span::raw(format!("{name} Latency")))
.borders(Borders::ALL),
)
}
/// Average/High/Low spans
fn statis_spans<'a>(data: &[f64], unit: &'static str) -> Vec<Line<'a>> {
vec![
Line::from(vec![Span::styled(
format!(
"Average: {:.2} {unit}",
data.iter().sum::<f64>() / data.len() as f64
),
Style::default().fg(Color::LightBlue),
)]),
Line::from(vec![Span::styled(
format!(
"Lowest: {:.2} {unit}",
data.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
Line::from(vec![Span::styled(
format!(
"Highest: {:.2} {unit}",
data.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
]
}
/// Latency histogram data
fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> {
let histo_data: Vec<(String, u64)> = {
let histo = crate::utils::histogram(latency, bins);
histo
.into_iter()
.map(|(label, v)| (format!("{label:.2}"), v as u64))
.collect()
};
histo_data
}
/// Latency Histogram
fn latency_histogram<'a>(
histo_data_str: &'a Vec<(&'a str, u64)>,
name: &'static str,
) -> BarChart<'a> {
BarChart::default()
.block(
Block::default()
.title(format!("{name} latency histogram"))
.style(Style::default().fg(Color::LightYellow).bg(Color::Reset))
.borders(Borders::ALL),
)
.data(histo_data_str.as_slice())
}
/// Latency/Throughput chart
fn latency_throughput_chart<'a>(
latency_throughput: &'a [(f64, f64)],
batch_sizes: &'a [u32],
zoom: bool,
name: &'static str,
) -> Chart<'a> {
let latency_iter = latency_throughput.iter().map(|(l, _)| l);
let throughput_iter = latency_throughput.iter().map(|(_, t)| t);
// Get extreme values
let min_latency: f64 = *latency_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let max_latency: f64 = *latency_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let min_throughput: f64 = *throughput_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let max_throughput: f64 = *throughput_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
// Char min max values
let min_x = if zoom {
((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0
} else {
0.0
};
let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0;
let step_x = (max_x - min_x) / 4.0;
// Chart min max values
let min_y = if zoom {
((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0
} else {
0.0
};
let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0;
let step_y = (max_y - min_y) / 4.0;
// Labels
let mut x_labels = vec![Span::styled(
format!("{min_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
x_labels.push(Span::styled(
format!("{:.2}", min_x + ((i + 1) as f64 * step_x)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
x_labels.push(Span::styled(
format!("{max_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Labels
let mut y_labels = vec![Span::styled(
format!("{min_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
y_labels.push(Span::styled(
format!("{:.2}", min_y + ((i + 1) as f64 * step_y)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
y_labels.push(Span::styled(
format!("{max_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Chart dataset
let colors = color_vec();
let datasets: Vec<Dataset> = (0..latency_throughput.len())
.map(|i| {
let color_idx = i % colors.len();
Dataset::default()
.name(batch_sizes[i].to_string())
.marker(symbols::Marker::Block)
.style(Style::default().fg(colors[color_idx]))
.graph_type(GraphType::Scatter)
.data(&latency_throughput[i..(i + 1)])
})
.collect();
// Chart
Chart::new(datasets)
.style(Style::default().fg(Color::Cyan).bg(Color::Reset))
.block(
Block::default()
.title(Span::styled(
format!("{name} throughput over latency"),
Style::default().fg(Color::Gray).bg(Color::Reset),
))
.borders(Borders::ALL),
)
.x_axis(
Axis::default()
.title("ms")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(x_labels)
.bounds([min_x, max_x]),
)
.y_axis(
Axis::default()
.title("tokens/secs")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(y_labels)
.bounds([min_y, max_y]),
)
}
// Colors for latency/throughput chart
fn color_vec() -> Vec<Color> {
vec![
Color::Red,
Color::Green,
Color::Yellow,
Color::Blue,
Color::Magenta,
Color::Cyan,
Color::Gray,
Color::DarkGray,
Color::LightRed,
Color::LightGreen,
Color::LightYellow,
Color::LightBlue,
Color::LightMagenta,
Color::LightCyan,
]
}
| text-generation-inference/benchmark/src/app.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/app.rs",
"repo_id": "text-generation-inference",
"token_count": 12196
} | 217 |
import pytest
from text_generation.types import Parameters, Request
from text_generation.errors import ValidationError
def test_parameters_validation():
# Test best_of
Parameters(best_of=1)
with pytest.raises(ValidationError):
Parameters(best_of=0)
with pytest.raises(ValidationError):
Parameters(best_of=-1)
Parameters(best_of=2, do_sample=True)
with pytest.raises(ValidationError):
Parameters(best_of=2)
with pytest.raises(ValidationError):
Parameters(best_of=2, seed=1)
# Test repetition_penalty
Parameters(repetition_penalty=1)
with pytest.raises(ValidationError):
Parameters(repetition_penalty=0)
with pytest.raises(ValidationError):
Parameters(repetition_penalty=-1)
# Test seed
Parameters(seed=1)
with pytest.raises(ValidationError):
Parameters(seed=-1)
# Test temperature
Parameters(temperature=1)
with pytest.raises(ValidationError):
Parameters(temperature=0)
with pytest.raises(ValidationError):
Parameters(temperature=-1)
# Test top_k
Parameters(top_k=1)
with pytest.raises(ValidationError):
Parameters(top_k=0)
with pytest.raises(ValidationError):
Parameters(top_k=-1)
# Test top_p
Parameters(top_p=0.5)
with pytest.raises(ValidationError):
Parameters(top_p=0)
with pytest.raises(ValidationError):
Parameters(top_p=-1)
with pytest.raises(ValidationError):
Parameters(top_p=1)
# Test truncate
Parameters(truncate=1)
with pytest.raises(ValidationError):
Parameters(truncate=0)
with pytest.raises(ValidationError):
Parameters(truncate=-1)
# Test typical_p
Parameters(typical_p=0.5)
with pytest.raises(ValidationError):
Parameters(typical_p=0)
with pytest.raises(ValidationError):
Parameters(typical_p=-1)
with pytest.raises(ValidationError):
Parameters(typical_p=1)
def test_request_validation():
Request(inputs="test")
with pytest.raises(ValidationError):
Request(inputs="")
Request(inputs="test", stream=True)
Request(inputs="test", parameters=Parameters(best_of=2, do_sample=True))
with pytest.raises(ValidationError):
Request(
inputs="test", parameters=Parameters(best_of=2, do_sample=True), stream=True
)
| text-generation-inference/clients/python/tests/test_types.py/0 | {
"file_path": "text-generation-inference/clients/python/tests/test_types.py",
"repo_id": "text-generation-inference",
"token_count": 984
} | 218 |
# Model safety.
[Pytorch uses pickle](https://pytorch.org/docs/master/generated/torch.load.html) by default meaning that for quite a long while
*Every* model using that format is potentially executing unintended code while purely loading the model.
There is a big red warning on Python's page for pickle [link](https://docs.python.org/3/library/pickle.html) but for quite a while
this was ignored by the community. Now that AI/ML is getting used much more ubiquitously we need to switch away from this format.
HuggingFace is leading the effort here by creating a new format which contains pure data ([safetensors](https://github.com/huggingface/safetensors))
and moving slowly but surely all the libs to make use of it by default.
The move is intentionnally slow in order to make breaking changes as little impact as possible on users throughout.
# TGI 2.0
Since the release of TGI 2.0, we take the opportunity of this major version increase to break backward compatibility for these pytorch
models (since they are a huge security risk for anyone deploying them).
From now on, TGI will not convert automatically pickle files without having `--trust-remote-code` flag or `TRUST_REMOTE_CODE=true` in the environment variables.
This flag is already used for community defined inference code, and is therefore quite representative of the level of confidence you are giving the model providers.
If you want to use a model that uses pickle, but you still do not want to trust the authors entirely we recommend making a convertion on our space made for that.
https://huggingface.co/spaces/safetensors/convert
This space will create a PR on the original model, which you are use directly regardless of merge status from the original authors. Just use
```
docker run .... --revision refs/pr/#ID # Or use REVISION=refs/pr/#ID in the environment
```
| text-generation-inference/docs/source/basic_tutorials/safety.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/safety.md",
"repo_id": "text-generation-inference",
"token_count": 465
} | 219 |
# Using TGI with AMD GPUs
TGI is supported and tested on [AMD Instinct MI210](https://www.amd.com/en/products/accelerators/instinct/mi200/mi210.html), [MI250](https://www.amd.com/en/products/accelerators/instinct/mi200/mi250.html) and [MI300](https://www.amd.com/en/products/accelerators/instinct/mi300.html) GPUs. The support may be extended in the future. The recommended usage is through Docker. Make sure to check the [AMD documentation](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html) on how to use Docker with AMD GPUs.
On a server powered by AMD GPUs, TGI can be launched with the following command:
```bash
model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--device=/dev/kfd --device=/dev/dri --group-add video \
--ipc=host --shm-size 256g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0-rocm \
--model-id $model
```
The launched TGI server can then be queried from clients, make sure to check out the [Consuming TGI](./basic_tutorials/consuming_tgi) guide.
## TunableOp
TGI's docker image for AMD GPUs integrates [PyTorch's TunableOp](https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/cuda/tunable), which allows to do an additional warmup to select the best performing matrix multiplication (GEMM) kernel from rocBLAS or hipBLASLt.
Experimentally, on MI300X, we noticed a 6-8% latency improvement when using TunableOp on top of ROCm 6.1 and PyTorch 2.3.
TunableOp is enabled by default, the warmup may take 1-2 minutes. In case you would like to disable TunableOp, please pass `--env PYTORCH_TUNABLEOP_ENABLED="0"` when launcher TGI's docker container.
## Flash attention implementation
Two implementations of Flash Attention are available for ROCm, the first is [ROCm/flash-attention](https://github.com/ROCm/flash-attention) based on a [Composable Kernel](https://github.com/ROCm/composable_kernel) (CK) implementation, and the second is a [Triton implementation](https://github.com/huggingface/text-generation-inference/blob/main/server/text_generation_server/layers/attention/flash_attn_triton.py).
By default, the Composable Kernel implementation is used. However, the Triton implementation has slightly lower latency on MI250 and MI300, but requires a warmup which can be prohibitive as it needs to be done again for each new prompt length. If needed, FA Triton impelmentation can be enabled with `--env ROCM_USE_FLASH_ATTN_V2_TRITON="0"` when launching TGI's docker container.
## Unsupported features
The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future:
* Loading [AWQ](https://huggingface.co/docs/transformers/quantization#awq) checkpoints.
* Kernel for sliding window attention (Mistral)
| text-generation-inference/docs/source/installation_amd.md/0 | {
"file_path": "text-generation-inference/docs/source/installation_amd.md",
"repo_id": "text-generation-inference",
"token_count": 916
} | 220 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5703125,
"text": " dรฉg"
},
{
"id": 21543,
"logprob": -0.14746094,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9277344,
"text": " un"
},
{
"id": 46341,
"logprob": -15.421875,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5820312,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4013672,
"text": ","
},
{
"id": 1669,
"logprob": -1.5595703,
"text": " il"
},
{
"id": 11580,
"logprob": -0.9428711,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.703125,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.7763672,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 578,
"logprob": -1.7822266,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.4882812,
"special": false,
"text": " faire"
},
{
"id": 7735,
"logprob": -2.4199219,
"special": false,
"text": " fond"
},
{
"id": 289,
"logprob": 0.0,
"special": false,
"text": "re"
},
{
"id": 693,
"logprob": -2.4628906,
"special": false,
"text": " ร "
},
{
"id": 366,
"logprob": -1.1308594,
"special": false,
"text": " la"
},
{
"id": 48844,
"logprob": -1.7900391,
"special": false,
"text": " cass"
},
{
"id": 1744,
"logprob": 0.0,
"special": false,
"text": "ero"
},
{
"id": 327,
"logprob": 0.0,
"special": false,
"text": "le"
},
{
"id": 2940,
"logprob": -1.9306641,
"special": false,
"text": " avec"
}
],
"top_tokens": null
},
"generated_text": " le faire fondre ร la casserole avec"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json",
"repo_id": "text-generation-inference",
"token_count": 1548
} | 221 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 100000,
"logprob": null,
"text": "<๏ฝbeginโofโsentence๏ฝ>"
},
{
"id": 3533,
"logprob": -9.625,
"text": "Test"
},
{
"id": 3102,
"logprob": -11.25,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 100000,
"logprob": null,
"text": "<๏ฝbeginโofโsentence๏ฝ>"
},
{
"id": 3533,
"logprob": -9.625,
"text": "Test"
},
{
"id": 3102,
"logprob": -11.25,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 100000,
"logprob": null,
"text": "<๏ฝbeginโofโsentence๏ฝ>"
},
{
"id": 3533,
"logprob": -9.625,
"text": "Test"
},
{
"id": 3102,
"logprob": -11.25,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 100000,
"logprob": null,
"text": "<๏ฝbeginโofโsentence๏ฝ>"
},
{
"id": 3533,
"logprob": -9.625,
"text": "Test"
},
{
"id": 3102,
"logprob": -11.25,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_load.json",
"repo_id": "text-generation-inference",
"token_count": 4940
} | 222 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.03125,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04244995,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4863281,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32714844,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7685547,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33666992,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.01008606,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64160156,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.5,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46557617,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5341797,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022907257,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1024,
"logprob": -10.578125,
"text": "name"
},
{
"id": 29901,
"logprob": -3.0332031,
"text": ":"
},
{
"id": 13260,
"logprob": -9.171875,
"text": "dav"
},
{
"id": 333,
"logprob": -0.04257202,
"text": "id"
},
{
"id": 29889,
"logprob": -2.4785156,
"text": "."
},
{
"id": 4876,
"logprob": -10.7890625,
"text": "email"
},
{
"id": 29901,
"logprob": -0.32495117,
"text": ":"
},
{
"id": 259,
"logprob": -9.4921875,
"text": " "
}
],
"seed": null,
"tokens": [
{
"id": 29896,
"logprob": -0.7709961,
"special": false,
"text": "1"
},
{
"id": 29906,
"logprob": -0.33740234,
"special": false,
"text": "2"
},
{
"id": 29941,
"logprob": -0.00995636,
"special": false,
"text": "3"
},
{
"id": 29946,
"logprob": -0.64208984,
"special": false,
"text": "4"
},
{
"id": 29945,
"logprob": -0.4970703,
"special": false,
"text": "5"
},
{
"id": 29953,
"logprob": -0.46533203,
"special": false,
"text": "6"
},
{
"id": 29992,
"logprob": -0.5336914,
"special": false,
"text": "@"
},
{
"id": 21980,
"logprob": -0.5361328,
"special": false,
"text": "gmail"
},
{
"id": 29889,
"logprob": -0.00088739395,
"special": false,
"text": "."
},
{
"id": 510,
"logprob": -0.0022735596,
"special": false,
"text": "com"
}
],
"top_tokens": null
},
"generated_text": "[email protected]"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json",
"repo_id": "text-generation-inference",
"token_count": 6602
} | 223 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -12.390625,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.0625,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 5229,
"logprob": -1.2607422,
"special": false,
"text": " failed"
},
{
"id": 29901,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 6527,
"logprob": -0.11450195,
"special": false,
"text": " Could"
},
{
"id": 451,
"logprob": 0.0,
"special": false,
"text": " not"
},
{
"id": 4511,
"logprob": -0.2286377,
"special": false,
"text": " connect"
},
{
"id": 304,
"logprob": 0.0,
"special": false,
"text": " to"
},
{
"id": 1923,
"logprob": -1.2568359,
"special": false,
"text": " server"
},
{
"id": 13,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.15905762,
"special": false,
"text": "\n"
},
{
"id": 29902,
"logprob": -0.21618652,
"special": false,
"text": "I"
}
],
"top_tokens": null
},
"generated_text": "Test request failed: Could not connect to server\n\nI"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin/test_flash_llama_marlin_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin/test_flash_llama_marlin_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1037
} | 224 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 8,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 2502,
"logprob": -1.7890625,
"special": false,
"text": "image"
},
{
"id": 2196,
"logprob": -0.53125,
"special": false,
"text": " result"
},
{
"id": 604,
"logprob": -0.0077209473,
"special": false,
"text": " for"
},
{
"id": 12254,
"logprob": -1.703125,
"special": false,
"text": " chicken"
},
{
"id": 611,
"logprob": -0.21582031,
"special": false,
"text": " on"
},
{
"id": 573,
"logprob": -0.734375,
"special": false,
"text": " the"
},
{
"id": 8318,
"logprob": -0.026000977,
"special": false,
"text": " beach"
},
{
"id": 1,
"logprob": -0.2109375,
"special": true,
"text": "<eos>"
}
],
"top_tokens": null
},
"generated_text": "image result for chicken on the beach"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json",
"repo_id": "text-generation-inference",
"token_count": 719
} | 225 |
{
"details": {
"finish_reason": "length",
"generated_tokens": 40,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.0488281,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.0800781,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -2.1152344,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -1.6748047,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -0.097229004,
"special": false,
"text": "1"
},
{
"id": 28723,
"logprob": -0.16467285,
"special": false,
"text": "."
},
{
"id": 7615,
"logprob": -2.2246094,
"special": false,
"text": " News"
},
{
"id": 13,
"logprob": -1.0488281,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.69189453,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.013343811,
"special": false,
"text": " "
},
{
"id": 28750,
"logprob": -0.011230469,
"special": false,
"text": "2"
},
{
"id": 28723,
"logprob": -0.00096845627,
"special": false,
"text": "."
},
{
"id": 21095,
"logprob": -2.5605469,
"special": false,
"text": " Blog"
},
{
"id": 13,
"logprob": -0.19458008,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.031280518,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.0030708313,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.0029277802,
"special": false,
"text": "3"
},
{
"id": 28723,
"logprob": -0.0012350082,
"special": false,
"text": "."
},
{
"id": 20108,
"logprob": -2.1582031,
"special": false,
"text": " Article"
},
{
"id": 13,
"logprob": -0.05810547,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.35083008,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.034332275,
"special": false,
"text": " "
},
{
"id": 28781,
"logprob": -0.009666443,
"special": false,
"text": "4"
},
{
"id": 28723,
"logprob": -0.0013113022,
"special": false,
"text": "."
},
{
"id": 8349,
"logprob": -2.6191406,
"special": false,
"text": " Review"
},
{
"id": 13,
"logprob": -0.04031372,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.45239258,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.045410156,
"special": false,
"text": " "
},
{
"id": 28782,
"logprob": -0.0041236877,
"special": false,
"text": "5"
},
{
"id": 28723,
"logprob": -0.0010223389,
"special": false,
"text": "."
},
{
"id": 5299,
"logprob": -2.8066406,
"special": false,
"text": " Other"
},
{
"id": 13,
"logprob": -0.12054443,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.44580078,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.4921875,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.3574219,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.0039062,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.5859375,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.43481445,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.2783203,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.20410156,
"special": false,
"text": "\n"
}
]
},
"generated_text": "\n\n### 1. News\n### 2. Blog\n### 3. Article\n### 4. Review\n### 5. Other\n\n\n\n\n\n\n\n\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_adapter.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 3130
} | 226 |
import pytest
@pytest.fixture(scope="module")
def flash_gemma_handle(launcher):
with launcher("google/gemma-2b", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_gemma(flash_gemma_handle):
await flash_gemma_handle.health(300)
return flash_gemma_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma(flash_gemma, response_snapshot):
response = await flash_gemma.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma_all_params(flash_gemma, response_snapshot):
response = await flash_gemma.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma_load(flash_gemma, generate_load, response_snapshot):
responses = await generate_load(flash_gemma, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_gemma.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_gemma.py",
"repo_id": "text-generation-inference",
"token_count": 676
} | 227 |
import pytest
@pytest.fixture(scope="module")
def flash_phi_handle(launcher):
with launcher("microsoft/phi-2", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_phi(flash_phi_handle):
await flash_phi_handle.health(300)
return flash_phi_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi(flash_phi, response_snapshot):
response = await flash_phi.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response.generated_text == ': {request}")\n response = self'
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi_all_params(flash_phi, response_snapshot):
response = await flash_phi.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["network"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 6
assert response.generated_text == "Test request to send data over a network"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi_load(flash_phi, generate_load, response_snapshot):
responses = await generate_load(flash_phi, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses[0].generated_text == ': {request}")\n response = self'
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_phi.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_phi.py",
"repo_id": "text-generation-inference",
"token_count": 749
} | 228 |
import pytest
@pytest.fixture(scope="module")
def neox_sharded_handle(launcher):
with launcher(
"OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2, use_flash_attention=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def neox_sharded(neox_sharded_handle):
await neox_sharded_handle.health(300)
return neox_sharded_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox(neox_sharded, response_snapshot):
response = await neox_sharded.generate(
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox_load(neox_sharded, generate_load, response_snapshot):
responses = await generate_load(
neox_sharded,
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_neox_sharded.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_neox_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 523
} | 229 |
{ pkgs, nix-filter }:
let
filter = nix-filter.lib;
in
with pkgs;
defaultCrateOverrides
// {
aws-lc-rs = attrs: {
# aws-lc-rs does its own custom parsing of Cargo environment
# variables like DEP_.*_INCLUDE. However buildRustCrate does
# not use the version number, so the parsing fails.
postPatch = ''
substituteInPlace build.rs \
--replace-fail \
"assert!(!selected.is_empty()" \
"// assert!(!selected.is_empty()"
'';
};
rav1e = attrs: { env.CARGO_ENCODED_RUSTFLAGS = "-C target-feature=-crt-static"; };
grpc-metadata = attrs: {
src = filter {
root = ../backends/grpc-metadata;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-benchmark = attrs: {
src = filter {
root = ../benchmark;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-client = attrs: {
src = filter {
root = ../.;
include = with filter; [
isDirectory
(and (inDirectory "backends/client") (matchExt "rs"))
(and (inDirectory "proto") (matchExt "proto"))
];
};
postPatch = "cd backends/client";
buildInputs = [ protobuf ];
};
text-generation-launcher = attrs: {
src = filter {
root = ../launcher;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-router = attrs: {
src = filter {
root = ../router;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-router-v3 = attrs: {
# We need to do the src/source root dance so that the build
# has access to the protobuf file.
src = filter {
root = ../.;
include = with filter; [
isDirectory
(and (inDirectory "backends/v3") (matchExt "rs"))
(and (inDirectory "proto") (matchExt "proto"))
];
};
postPatch = "cd backends/v3";
buildInputs = [ protobuf ];
};
}
| text-generation-inference/nix/crate-overrides.nix/0 | {
"file_path": "text-generation-inference/nix/crate-overrides.nix",
"repo_id": "text-generation-inference",
"token_count": 908
} | 230 |
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{filter::LevelFilter, EnvFilter, Layer};
/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
/// - otlp_endpoint is an optional URL to an Open Telemetry collector
/// - otlp_service_name service name to appear in APM
/// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
/// - LOG_FORMAT may be TEXT or JSON (default to TEXT)
/// - LOG_COLORIZE may be "false" or "true" (default to "true" or ansi supported platforms)
pub fn init_logging(otlp_endpoint: Option<String>, otlp_service_name: String, json_output: bool) {
let mut layers = Vec::new();
// STDOUT/STDERR layer
let ansi = std::env::var("LOG_COLORIZE") != Ok("1".to_string());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_file(true)
.with_ansi(ansi)
.with_line_number(true);
let fmt_layer = match json_output {
true => fmt_layer.json().flatten_event(true).boxed(),
false => fmt_layer.boxed(),
};
layers.push(fmt_layer);
// OpenTelemetry tracing layer
if let Some(otlp_endpoint) = otlp_endpoint {
global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(otlp_endpoint),
)
.with_trace_config(
trace::config()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
otlp_service_name,
)]))
.with_sampler(Sampler::AlwaysOn),
)
.install_batch(opentelemetry::runtime::Tokio);
if let Ok(tracer) = tracer {
layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
init_tracing_opentelemetry::init_propagator().unwrap();
};
}
// Filter events with LOG_LEVEL
let varname = "LOG_LEVEL";
let env_filter = if let Ok(log_level) = std::env::var(varname) {
// Override to avoid simple logs to be spammed with tokio level informations
let log_level = match &log_level[..] {
"warn" => "text_generation_launcher=warn,text_generation_router=warn",
"info" => "text_generation_launcher=info,text_generation_router=info",
"debug" => "text_generation_launcher=debug,text_generation_router=debug",
log_level => log_level,
};
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.parse_lossy(log_level)
} else {
EnvFilter::new("info")
};
tracing_subscriber::registry()
.with(env_filter)
.with(layers)
.init();
}
| text-generation-inference/router/src/logging.rs/0 | {
"file_path": "text-generation-inference/router/src/logging.rs",
"repo_id": "text-generation-inference",
"token_count": 1445
} | 231 |
lorax_punica_commit := c71861a653412267dc27ec86013dd945ce3474bc
build-lorax-punica:
if [ ! -d 'lorax-punica' ]; then \
git clone --no-checkout https://github.com/predibase/lorax.git lorax-punica; \
fi
cd lorax-punica && git sparse-checkout set server/punica_kernels && git checkout $(lorax_punica_commit)
cd lorax-punica && git submodule update --init --recursive
cd lorax-punica/server/punica_kernels && python setup.py build
install-lorax-punica: build-lorax-punica
cd lorax-punica/server/punica_kernels && python setup.py install
| text-generation-inference/server/Makefile-lorax-punica/0 | {
"file_path": "text-generation-inference/server/Makefile-lorax-punica",
"repo_id": "text-generation-inference",
"token_count": 208
} | 232 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include "util.cuh"
#include "tuning.h"
#include "cuda_buffers.cuh"
#include "cuda_func/q4_matrix.cuh"
#include "cuda_func/q4_matmul.cuh"
#include "cuda_func/column_remap.cuh"
// Check CUDA return code. We don't want to include Torch headers in the .cu files because parsing them adds almost a
// minute to the compile time on a 12900K. Also passing exceptions back to Python is super tricky, so in place of
// exceptions, CUDA functions return with a cudaError_t which we can parse and dump to the console.
void check_cuda(cudaError_t ret)
{
switch (ret)
{
case cudaSuccess:
break;
case cudaUnspecified:
printf(" **** Unspecified error\n");
TORCH_CHECK(false, "CUDA error");
break;
default:
printf(" **** CUDA error\n"); \
printf(" **** %s\n", cudaGetErrorString(ret)); \
TORCH_CHECK(false, "CUDA error"); \
break;
}
}
// Some decluttering macros
#define STRINGIFY_(__x) #__x
#define STRINGIFY(__x) STRINGIFY_(__x)
#define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPE_MOD(__x, __dim_x, __mod) TORCH_CHECK((__x).size(__dim_x) % __mod == 0, #__x ".shape[" STRINGIFY(__dim_x) "] must be a multiple of " STRINGIFY(__mod))
#define TORCH_CHECK_DEVICE_INDEX(__index) \
do { \
TORCH_CHECK(__index >= 0, "no device index"); \
TORCH_CHECK(__index < CUDA_MAX_DEVICES, "invalid device index"); \
} while(0)
#define TORCH_CHECK_QUANT(__w, __w_scales, __w_zeros, __seq_g_idx, __x_map) \
do { \
TORCH_CHECK_DTYPE(__w, kInt); \
TORCH_CHECK_DTYPE(__w_scales, kHalf); \
TORCH_CHECK_DTYPE(__w_zeros, kInt); \
TORCH_CHECK_DTYPE_OPT(__seq_g_idx, kShort); \
TORCH_CHECK_DTYPE_OPT(__x_map, kInt); \
TORCH_CHECK_SHAPES_OPT(__seq_g_idx, 0, __w, 0, 2 * 8); \
TORCH_CHECK_SHAPES_OPT(__x_map, 0, __w, 0, 8); \
} while(0)
int get_groupsize(torch::Tensor w, torch::Tensor w_zeros)
{
int groupsize = w.size(0) * 8 / w_zeros.size(0);
TORCH_CHECK(groupsize * w_zeros.size(0) == w.size(0) * 8, "w.shape[-2] must be a multiple of zeros.shape[-2]")
return groupsize;
}
// Tuning parameters
ExLlamaTuning tuningParams;
void set_tuning_params
(
int matmul_recons_thd,
bool matmul_fused_remap,
bool matmul_no_half2
)
{
tuningParams.matmul_recons_thd = matmul_recons_thd;
tuningParams.matmul_fused_remap = matmul_fused_remap;
tuningParams.matmul_no_half2 = matmul_no_half2;
}
// Release all unmanaged objects allocated by the extension
void cleanup()
{
cleanup_buffers_cuda();
g_q4_free_matrices();
}
// Prepare buffers for forward pass
void prepare_buffers
(
torch::Device device,
torch::Tensor temp_state,
torch::Tensor temp_dq
)
{
int device_index = device.index();
TORCH_CHECK_DEVICE_INDEX(device_index);
const at::cuda::OptionalCUDAGuard device_guard(device);
prepare_buffers_cuda
(
device_index,
(half*) temp_state.data_ptr(),
(half*) temp_dq.data_ptr()
);
}
// Create Q4Matrix, return handle
uintptr_t make_q4
(
torch::Tensor qweight,
torch::Tensor qzeros,
torch::Tensor scales,
torch::Tensor g_idx,
int device
)
{
TORCH_CHECK_DTYPE(qweight, kInt);
TORCH_CHECK_DTYPE(qzeros, kInt);
TORCH_CHECK_DTYPE(scales, kHalf);
TORCH_CHECK_DTYPE_OPT(g_idx, kInt);
TORCH_CHECK_SHAPES(qweight, 1, qzeros, 1, 8);
TORCH_CHECK_SHAPES(scales, 1, qweight, 1, 1);
TORCH_CHECK_SHAPES(qzeros, 0, scales, 0, 1);
int width = qweight.size(1);
int height = qweight.size(0) * 8;
int groups = qzeros.size(0);
Q4Matrix* m = new Q4Matrix
(
height,
width,
groups,
(uint32_t*) qweight.data_ptr(),
(uint32_t*) qzeros.data_ptr(),
(half*) scales.data_ptr(),
g_idx.device().is_meta() ? NULL : (uint32_t*) g_idx.data_ptr(),
device
);
g_q4_keep_matrix(m);
return reinterpret_cast<uintptr_t> (m);
}
// Matmul half @ quant -> half
void q4_matmul
(
torch::Tensor x,
uintptr_t w,
torch::Tensor out
)
{
Q4Matrix* wm = reinterpret_cast<Q4Matrix*> (w);
TORCH_CHECK_DTYPE(x, kHalf);
TORCH_CHECK_DTYPE(out, kHalf);
TORCH_CHECK_SHAPES(x, 0, out, 0, 1);
TORCH_CHECK(wm->height == x.size(-1), "x and w have incompatible shapes")
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
int x_height = x.size(0);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (tuningParams.matmul_recons_thd == 0 || x_height < tuningParams.matmul_recons_thd)
{
q4_matmul_cuda
(
&tuningParams,
(half*) x.data_ptr(),
x_height,
wm,
(half*) out.data_ptr(),
false,
stream
);
}
else
{
q4_matmul_recons_cuda
(
&tuningParams,
(half*) x.data_ptr(),
x_height,
wm,
(half*) out.data_ptr(),
false,
at::cuda::getCurrentCUDABlasHandle()
);
}
}
// Remap columns in half tensor
void column_remap
(
torch::Tensor x,
torch::Tensor x_new,
torch::Tensor x_map
)
{
TORCH_CHECK_DTYPE(x, kHalf);
TORCH_CHECK_DTYPE(x_new, kHalf);
TORCH_CHECK_DTYPE(x_map, kInt);
TORCH_CHECK_SHAPES(x_map, 0, x, 1, 1);
int height = x.size(0);
int width = x.size(1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
column_remap_cuda
(
(half*) x.data_ptr(),
(half*) x_new.data_ptr(),
height,
width,
(uint32_t*) x_map.data_ptr()
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("set_tuning_params", &set_tuning_params, "set_tuning_params");
m.def("prepare_buffers", &prepare_buffers, "prepare_buffers");
m.def("cleanup", &cleanup, "cleanup");
m.def("make_q4", &make_q4, "make_q4");
m.def("q4_matmul", &q4_matmul, "q4_matmul");
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp",
"repo_id": "text-generation-inference",
"token_count": 3279
} | 233 |
#ifndef _qdq_2_cuh
#define _qdq_2_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_2BIT == 1
// Permutation:
//
// ffddbb99 77553311 eeccaa88 66442200
__forceinline__ __device__ void shuffle_2bit_16
(
uint32_t* q,
int stride
)
{
uint32_t qa = q[0];
uint32_t qb = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
uint32_t qa0 = qa & 0x03;
uint32_t qa1 = (qa & 0x0c) >> 2;
qa >>= 4;
qb |= (qa1 << (i * 2 + 16));
qb |= (qa0 << (i * 2));
}
q[0] = qb;
}
__forceinline__ __device__ void dequant_2bit_16
(
const uint32_t q_0,
half2 (&dq)[8],
int stride
)
{
const uint32_t c0 = 0x64006400;
const half y4_ = __float2half_rn(1.0f / 4.0f);
const half y16_ = __float2half_rn(1.0f / 16.0f);
const half y64_ = __float2half_rn(1.0f / 64.0f);
const half2 y4 = __halves2half2(y4_, y4_);
const half2 y16 = __halves2half2(y16_, y16_);
const half2 y64 = __halves2half2(y64_, y64_);
const half z1_ = __float2half_rn(-1024.0f - 2.0f);
const half z4_ = __float2half_rn(-1024.0f / 4.0f - 2.0f);
const half z16_ = __float2half_rn(-1024.0f / 16.0f - 2.0f);
const half z64_ = __float2half_rn(-1024.0f / 64.0f - 2.0f);
const half2 z1 = __halves2half2(z1_, z1_);
const half2 z4 = __halves2half2(z4_, z4_);
const half2 z16 = __halves2half2(z16_, z16_);
const half2 z64 = __halves2half2(z64_, z64_);
uint32_t qa = q_0;
half2_uint32 q0((qa & 0x00030003) | c0); // half2(q[ 0], q[ 1]) + 1024
half2_uint32 q1((qa & 0x000c000c) | c0); // half2(q[ 2], q[ 3]) * 4 + 1024
half2_uint32 q2((qa & 0x00300030) | c0); // half2(q[ 4], q[ 5]) * 16 + 1024
half2_uint32 q3((qa & 0x00c000c0) | c0); // half2(q[ 6], q[ 7]) * 64 + 1024
qa >>= 8;
half2_uint32 q4((qa & 0x00030003) | c0); // half2(q[ 8], q[ 8]) + 1024
half2_uint32 q5((qa & 0x000c000c) | c0); // half2(q[10], q[11]) * 4 + 1024
half2_uint32 q6((qa & 0x00300030) | c0); // half2(q[12], q[13]) * 16 + 1024
half2_uint32 q7((qa & 0x00c000c0) | c0); // half2(q[14], q[15]) * 64 + 1024
dq[0] = __hadd2(q0.as_half2, z1);
dq[1] = __hfma2(q1.as_half2, y4, z4);
dq[2] = __hfma2(q2.as_half2, y16, z16);
dq[3] = __hfma2(q3.as_half2, y64, z64);
dq[4] = __hadd2(q4.as_half2, z1);
dq[5] = __hfma2(q5.as_half2, y4, z4);
dq[6] = __hfma2(q6.as_half2, y16, z16);
dq[7] = __hfma2(q7.as_half2, y64, z64);
}
#else
__forceinline__ __device__ void shuffle_2bit_16
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_2bit_16
(
const uint32_t q_0,
half2 (&dq)[8],
int stride
)
{
half dqh[16];
for (int i = 0; i < 16; i++) dqh[i] = dq_ns(exb(q_0, i * 2, 0x03), 2);
for (int i = 0; i < 8; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh",
"repo_id": "text-generation-inference",
"token_count": 1589
} | 234 |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLMBatch
from text_generation_server.utils import weight_hub_files, download_weights
from text_generation_server.models.bloom import BloomCausalLMBatch, BLOOMSharded
from text_generation_server.models.custom_modeling.bloom_modeling import (
BloomForCausalLM,
)
@pytest.fixture(scope="session")
def default_bloom():
model_id = "bigscience/bloom-560m"
revision = "main"
filenames = weight_hub_files(model_id, revision, ".safetensors")
download_weights(filenames, model_id, revision)
return BLOOMSharded(
model_id,
model_class=BloomForCausalLM,
)
@pytest.fixture(scope="session")
def bloom_560m_tokenizer():
return AutoTokenizer.from_pretrained("bigscience/bloom-560m", padding_side="left")
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_bloom_batch(default_pb_batch, bloom_560m_tokenizer):
return BloomCausalLMBatch.from_pb(
default_pb_batch, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_bloom_batch(default_pb_request, bloom_560m_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
return BloomCausalLMBatch.from_pb(
batch_pb, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_bloom_batch):
batch = default_bloom_batch
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert len(batch.input_ids) == default_pb_batch.size
assert batch.input_ids[0][-1] == 10264
assert torch.all(batch.input_ids[0][:-1] == 3)
assert batch.attention_mask[0][0] == 1
assert torch.all(batch.attention_mask[0][1:] == 0)
assert batch.past_key_values is None
assert all(
[
torch.equal(input_ids, all_input_ids[:, 0])
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
]
)
assert batch.input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
def test_batch_concatenate_no_prefill(default_bloom_batch):
with pytest.raises(ValueError):
BloomCausalLMBatch.concatenate([default_bloom_batch, default_bloom_batch])
def test_causal_lm_batch_type(default_bloom):
assert default_bloom.batch_type == BloomCausalLMBatch
def test_causal_lm_generate_token(default_bloom, default_bloom_batch):
sequence_length = len(default_bloom_batch.all_input_ids[0])
generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch)
assert len(generations) == len(default_bloom_batch)
assert isinstance(next_batch, CausalLMBatch)
assert not next_batch.keys_head_dim_last
assert len(next_batch.all_input_ids) == len(next_batch)
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
assert len(next_batch.attention_mask[0]) == 11
assert torch.all(next_batch.all_input_ids[0][-2:] == 10264)
assert torch.all(next_batch.all_input_ids[0][:-2] == 3)
assert torch.all(next_batch.attention_mask[0][:2] == 1)
assert torch.all(next_batch.attention_mask[0][2:] == 0)
assert next_batch.input_ids.shape == (len(next_batch), 1)
assert next_batch.input_ids[0, 0] == 10264
assert next_batch.input_lengths == [2]
assert next_batch.max_input_length == next_batch.input_lengths[0]
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (16, 64, sequence_length) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (16, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 10264
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == "Test"
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch):
next_batch = default_bloom_batch
for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_causal_lm_generate_token_completion_multi(
default_bloom, default_multi_requests_bloom_batch
):
next_batch = default_multi_requests_bloom_batch
for i in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_multi_requests_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == "TestTestTestTestTest"
assert (
generations[1].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[1].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
# Copy stopping_criterias before filtering
stopping_criterias = default_multi_requests_bloom_batch.stopping_criterias.copy()
next_batch = next_batch.filter([next_batch.requests[0].id])
for _ in range(
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_batch_concatenate(
default_bloom, default_bloom_batch, default_multi_requests_bloom_batch
):
next_batch_0 = default_bloom_batch
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_bloom_batch
_, next_batch_1, _ = default_bloom.generate_token(next_batch_1)
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
]
next_batch = BloomCausalLMBatch.concatenate([next_batch_0, next_batch_1])
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
assert torch.all(
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
)
assert torch.all(
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
)
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
assert next_batch.batch_id == 0
assert torch.all(next_batch.input_ids == 10264)
assert next_batch.input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == next_batch_1.requests
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all([p[0].shape == (3, 16, 64, 2) for p in next_batch.past_key_values])
assert all([p[1].shape == (3, 16, 2, 64) for p in next_batch.past_key_values])
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][:, :, -2:], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:],
past[0][1:, :, :, -1].reshape(-1, 64, 1),
)
assert torch.equal(next_batch_0_past_key_values[i][1][:, -2:, :], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, -1:, :],
past[1][1:, :, -1, :].reshape(-1, 1, 64),
)
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == "TestTestTestTestTest"
assert (
generations[2].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[2].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
for _ in range(
default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
next_batch = next_batch.filter([next_batch.requests[1].id])
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
- default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 4
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
| text-generation-inference/server/tests/models/test_bloom.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_bloom.py",
"repo_id": "text-generation-inference",
"token_count": 5400
} | 235 |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/adapters/weights.py
# License: Apache License Version 2.0, January 2004
from abc import ABC, abstractclassmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional, Set, Type
import torch
@dataclass
class AdapterBatchMetadata:
# [batch_size]
adapter_indices: torch.Tensor
# [num_adapters]
adapter_set: Set[int]
# [num_segments + 1]
adapter_segments: torch.Tensor
# [num_segments]
# maps from segment index to adapter index, i.e.:
# segment_indices[s] == adapter_indices[i]
segment_indices: List[int]
class AdapterWeights(ABC):
@abstractclassmethod
def get_batch_types(cls) -> List[Type["BatchAdapterWeights"]]:
pass
@property
def speculative_tokens(self) -> int:
return 0
class BatchAdapterWeights(ABC):
@abstractclassmethod
def has_adapter(self, adapter_index: int) -> bool:
pass
@abstractclassmethod
def load(
cls,
adapter_weights: Dict[int, AdapterWeights],
meta: "AdapterBatchMetadata",
prefill: bool,
prefill_head_indices: torch.Tensor,
) -> Optional["BatchAdapterWeights"]:
pass
class LayerAdapterWeights:
"""Adapter weights that apply to a particular layer."""
def __init__(self):
self.adapter_weights: Dict[int, AdapterWeights] = {}
def add_adapter(self, adapter_idx: int, weights: AdapterWeights):
self.adapter_weights[adapter_idx] = weights
def remove_adapter(self, adapter_idx: int):
if adapter_idx not in self.adapter_weights:
return
del self.adapter_weights[adapter_idx]
def is_empty(self) -> bool:
return len(self.adapter_weights) == 0
def get_data(
self,
meta: AdapterBatchMetadata,
prefill: bool,
prefill_head_indices: Optional[torch.Tensor],
) -> Dict[str, BatchAdapterWeights]:
# bucket adapters by batch class
adapter_batch_types: Dict[
Type[BatchAdapterWeights], Dict[int, AdapterWeights]
] = defaultdict(dict)
for adapter_index, adapter_weights in self.adapter_weights.items():
for batch_type in adapter_weights.get_batch_types():
adapter_batch_types[batch_type][adapter_index] = adapter_weights
batch_data = {}
for batch_type, adapter_weights in adapter_batch_types.items():
batched_weights = batch_type.load(
adapter_weights, meta, prefill, prefill_head_indices
)
if batched_weights is not None:
batch_data = batched_weights
return batch_data
@dataclass
class AdapterBatchData:
meta: AdapterBatchMetadata
# layer type -> adapter type -> batch weight data
data: Dict[str, Dict[str, BatchAdapterWeights]]
prefill: bool
@staticmethod
def from_meta(
meta: AdapterBatchMetadata,
weights: Dict[str, LayerAdapterWeights],
prefill: bool,
prefill_head_indices: Optional[torch.Tensor],
) -> "AdapterBatchData":
data = {}
for k, v in weights.items():
if v.is_empty():
continue
data[k] = v.get_data(
meta, prefill, prefill_head_indices if k == "lm_head" else None
)
return AdapterBatchData(meta=meta, data=data, prefill=prefill)
def ranks(self) -> Set[int]:
# TODO(travis): refactor to be less coupled to lora implementation
ranks = set()
for lora_data in self.data.values():
if lora_data is None:
continue
for rank_data in lora_data.rank_data.values():
ranks.add(rank_data.rank)
return ranks
def layer_names(self) -> Set[str]:
return set(self.data.keys())
def adapter_keys(self) -> Set[str]:
adapter_keys = set()
for layer_data in self.data.values():
adapter_keys.update(layer_data.keys())
return adapter_keys
@property
def max_rank(self) -> int:
ranks = self.ranks()
return max(ranks) if len(ranks) > 0 else 0
| text-generation-inference/server/text_generation_server/adapters/weights.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/adapters/weights.py",
"repo_id": "text-generation-inference",
"token_count": 1824
} | 236 |
from dataclasses import dataclass
import torch
from EETQ import quant_weights, w8_a16_gemm
from text_generation_server.utils.weights import UnquantizedWeight
@dataclass
class EETQWeight(UnquantizedWeight):
weight: torch.Tensor
def get_linear(self, bias: torch.Tensor):
try:
from text_generation_server.layers.eetq import EETQLinear
return EETQLinear(self.weight, bias)
except ImportError:
raise ImportError(
"Please install EETQ from https://github.com/NetEase-FuXi/EETQ"
)
class EETQLinear(torch.nn.Module):
def __init__(
self,
weight,
bias,
) -> None:
super().__init__()
device = weight.device
if weight.dtype != torch.float16:
weight = weight.to(dtype=torch.float16)
weight = torch.t(weight).contiguous().cpu()
weight, scale = quant_weights(weight, torch.int8, False)
self.weight = weight.cuda(device)
self.scale = scale.cuda(device)
self.bias = bias.cuda(device) if bias is not None else None
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = w8_a16_gemm(input, self.weight, self.scale)
output = output + self.bias if self.bias is not None else output
return output
| text-generation-inference/server/text_generation_server/layers/eetq.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/eetq.py",
"repo_id": "text-generation-inference",
"token_count": 574
} | 237 |
from dataclasses import dataclass
from typing import List, Optional, Union
import torch
import torch.nn as nn
from text_generation_server.layers.marlin.util import _check_marlin_kernels
from text_generation_server.utils.weights import Weight, Weights, WeightsLoader
try:
import marlin_kernels
except ImportError:
marlin_kernels = None
class MarlinWeightsLoader(WeightsLoader):
"""Loader for Marlin-quantized weights."""
def __init__(self, *, bits: int, is_marlin_24: bool):
self.bits = bits
self.is_marlin_24 = is_marlin_24
def get_weights(self, weights: "Weights", prefix: str):
"""
Get weights at the given prefix and apply without tensor paralllism.
"""
is_marlin_24 = getattr(self, "gptq_checkpoint_format", None) == "marlin_24"
if is_marlin_24:
try:
B = weights.get_tensor(f"{prefix}.B_24")
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` 2:4 sparsity weight, make sure the model is already quantized."
)
B_meta = weights.get_tensor(f"{prefix}.B_meta")
s = weights.get_tensor(f"{prefix}.s")
weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits)
else:
try:
B = weights.get_tensor(f"{prefix}.B")
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` weight, make sure the model is already quantized."
)
s = weights.get_tensor(f"{prefix}.s")
weight = MarlinWeight(B=B, s=s)
return weight
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
if self.is_marlin_24:
B = weights.get_packed_sharded(
f"{prefix}.B_24", dim=1, block_sizes=block_sizes
)
B_meta = weights.get_packed_sharded(
f"{prefix}.B_meta", dim=1, block_sizes=block_sizes
)
s = weights.get_packed_sharded(
f"{prefix}.s", dim=1, block_sizes=block_sizes
)
weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits)
else:
B = weights.get_packed_sharded(
f"{prefix}.B", dim=1, block_sizes=block_sizes
)
s = weights.get_packed_sharded(
f"{prefix}.s", dim=1, block_sizes=block_sizes
)
weight = MarlinWeight(B=B, s=s)
return weight
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
if self.is_marlin_24:
try:
B = torch.cat(
[weights.get_sharded(f"{p}.B_24", dim=1) for p in prefixes], dim=1
)
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` weight, make sure the model is already quantized"
)
B_meta = torch.cat(
[weights.get_sharded(f"{p}.B_meta", dim=1) for p in prefixes], dim=1
)
s = torch.cat(
[weights.get_sharded(f"{p}.s", dim=1) for p in prefixes], dim=1
)
weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits)
else:
try:
B = torch.cat(
[weights.get_sharded(f"{p}.B", dim=1) for p in prefixes], dim=1
)
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` weight, make sure the model is already quantized"
)
s = torch.cat(
[weights.get_sharded(f"{p}.s", dim=1) for p in prefixes], dim=1
)
weight = MarlinWeight(B=B, s=s)
return weight
def get_weights_row(self, weights: Weights, prefix: str):
if self.is_marlin_24:
try:
B = weights.get_sharded(f"{prefix}.B_24", dim=0)
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` 2:4 sparsity weight, make sure the model is already quantized."
)
B_meta = weights.get_sharded(f"{prefix}.B_meta", dim=0)
num_groups = weights._get_slice(f"{prefix}.s").get_shape()[0]
if num_groups == 1:
# The number of groups is 1 when groupsize == -1. share
# scales between all shards in this case.
s = weights.get_tensor(f"{prefix}.s")
else:
s = weights.get_sharded(f"{prefix}.s", dim=0)
weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits)
else:
try:
B = weights.get_sharded(f"{prefix}.B", dim=0)
except RuntimeError:
raise RuntimeError(
"Cannot load `marlin` weight, make sure the model is already quantized."
)
num_groups = weights._get_slice(f"{prefix}.s").get_shape()[0]
if num_groups == 1:
# The number of groups is 1 when groupsize == -1. share
# scales between all shards in this case.
s = weights.get_tensor(f"{prefix}.s")
else:
s = weights.get_sharded(f"{prefix}.s", dim=0)
weight = MarlinWeight(B=B, s=s)
return weight
@dataclass
class MarlinWeight(Weight):
"""
Marlin weights.
Attributes:
B (torch.Tensor): int4-quantized weights packed into int32.
s (torch.Tensor): bfloat16/float16 scales.
"""
B: torch.Tensor
s: torch.Tensor
def __post_init__(self):
assert self.B.dtype == torch.int32
assert self.s.dtype in [torch.float16, torch.bfloat16]
def get_linear(self, bias: torch.Tensor):
return MarlinLinear(weight=self, bias=bias)
class MarlinLinear(nn.Module):
def __init__(self, *, weight: MarlinWeight, bias: Optional[torch.Tensor]):
super().__init__()
_check_marlin_kernels()
assert marlin_kernels is not None
in_features = weight.B.shape[0] * MARLIN_TILE_SIZE
out_features = weight.s.shape[1]
assert (
in_features % 128 == 0
), f"Number of input features ({in_features}) not divisable by 128"
assert (
out_features % 256 == 0
), f"Number of output features ({out_features}) not divisable by 256"
groupsize = -1 if weight.s.shape[0] == 1 else in_features // weight.s.shape[0]
assert groupsize in {
-1,
128,
}, f"Group size must be -1 or 128, was {groupsize}"
self.B = weight.B
self.s = weight.s
if bias is not None:
self.bias = bias
else:
self.bias = None
self.workspace = torch.zeros(
out_features // 64 * 16, dtype=torch.int, device=weight.B.device
)
def forward(self, A: torch.Tensor) -> torch.Tensor:
assert marlin_kernels is not None
C = marlin_kernels.marlin_gemm(
A.view(-1, A.shape[-1]),
self.B,
self.s,
self.workspace,
A.shape[0],
self.s.shape[1],
A.shape[1],
)
C = C.reshape(A.shape[:-1] + (self.s.shape[1],))
if self.bias is not None:
C += self.bias
return C
GPTQ_MARLIN_24_MIN_THREAD_N = 128
GPTQ_MARLIN_24_MIN_THREAD_K = 128
GPTQ_MARLIN_24_MAX_PARALLEL = 64
GPTQ_MARLIN_24_SUPPORTED_NUM_BITS = [4, 8]
GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES = [-1, 128]
MARLIN_TILE_SIZE = 16
@dataclass
class GPTQMarlin24Weight:
"""
GPTQ-Marlin 2:4 weights.
Attributes:
B (torch.Tensor): int4-quantized weights packed into int32.
B_meta (torch.Tensor): metadata for 2:4 sparsity.
s (torch.Tensor): float16 scales.
bits: quantized weight size.
"""
B: torch.Tensor
B_meta: torch.Tensor
s: torch.Tensor
bits: int
def __post_init__(self):
assert self.B.dtype == torch.int32
assert self.B_meta.dtype == torch.int16
assert self.s.dtype == torch.float16
def get_linear(self, bias: torch.Tensor):
return GPTQMarlin24Linear(
weight=self,
bias=bias,
)
class GPTQMarlin24Linear(nn.Module):
def __init__(self, *, weight: GPTQMarlin24Weight, bias: Optional[torch.Tensor]):
super().__init__()
_check_marlin_kernels()
assert marlin_kernels is not None
if weight.bits not in GPTQ_MARLIN_24_SUPPORTED_NUM_BITS:
supported_bits = ", ".join(
str(b) for b in GPTQ_MARLIN_24_SUPPORTED_NUM_BITS
)
raise RuntimeError(
f"{weight.bits}-bit GPTQ Sparse 2:4 Marlin is not supported, must be one of: {supported_bits}"
)
in_features = weight.B.shape[0] * MARLIN_TILE_SIZE * 2
out_features = weight.s.shape[1]
groupsize = -1 if weight.s.shape[0] == 1 else in_features // weight.s.shape[0]
if groupsize not in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES:
supported_sizes = ", ".join(
str(b) for b in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES
)
raise RuntimeError(
f"Group size {groupsize} is not supported, must be one of: {supported_sizes}"
)
self.bits = weight.bits
weights_per_int32 = 32 // self.bits
assert (
out_features % GPTQ_MARLIN_24_MIN_THREAD_N == 0
), f"Number of output features ({out_features}) not divisable by {GPTQ_MARLIN_24_MIN_THREAD_N} threads"
assert (
out_features % weights_per_int32 == 0
), f"Number of output features ({out_features}) not divisable by weights per int32 ({weights_per_int32})"
assert (
in_features % GPTQ_MARLIN_24_MIN_THREAD_K == 0
), f"Number of output features ({out_features}) not divisable by {GPTQ_MARLIN_24_MIN_THREAD_K} threads"
if groupsize != -1 and in_features % groupsize != 0:
raise ValueError(
f"Number of input features ({in_features}) not divisable by group size ({groupsize})"
)
self.B = weight.B
self.B_meta = weight.B_meta
self.s = weight.s
if bias is not None:
self.bias = bias
else:
self.bias = None
self.workspace = torch.zeros(
(out_features // GPTQ_MARLIN_24_MIN_THREAD_N) * GPTQ_MARLIN_24_MAX_PARALLEL,
dtype=torch.int,
device=weight.B.device,
)
def forward(self, A: torch.Tensor) -> torch.Tensor:
assert marlin_kernels is not None
C = marlin_kernels.gptq_marlin_24_gemm(
A.view(-1, A.shape[-1]),
self.B,
self.B_meta,
self.s,
self.workspace,
self.bits,
A.shape[0],
self.s.shape[1],
A.shape[1],
)
C = C.reshape(A.shape[:-1] + (self.s.shape[1],))
if self.bias is not None:
C += self.bias
return C
| text-generation-inference/server/text_generation_server/layers/marlin/marlin.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/marlin.py",
"repo_id": "text-generation-inference",
"token_count": 5812
} | 238 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
reshape_and_cache,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.layers.layernorm import (
FastRMSNorm,
)
from text_generation_server.utils.weights import UnquantizedWeight
class Gemma2Config(PretrainedConfig):
def __init__(
self,
vocab_size=256128,
hidden_size=3072,
intermediate_size=24576,
num_hidden_layers=28,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=256,
hidden_act="gelu_pytorch_tanh",
max_position_embeddings=8192,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=True,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.head_dim = head_dim
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class Gemma2FastRMSNorm(FastRMSNorm):
@classmethod
def load(cls, prefix: str, weights, eps=1e-6):
dtype = weights.dtype
weights.dtype = torch.float32
weight = weights.get_tensor(f"{prefix}.weight") + 1
weights.dtype = dtype
new = cls(weight, eps)
new.dtype = dtype
return new
# perform the multiplication in full precision and downcast after
def forward(self, hidden_states, residual=None):
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = hidden_states * self.weight
return hidden_states.to(self.dtype), residual
def load_attention(config, prefix: str, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
def _load_gqa(config, prefix: str, weights):
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.head_dim
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
return TensorParallelColumnLinear(get_linear(weight, bias=None))
class FlashGemma2Attention(torch.nn.Module):
def __init__(self, prefix: str, config, weights, causal: bool, is_sliding: bool):
super().__init__()
self.num_heads = config.num_attention_heads
self.head_size = config.head_dim
self.causal = causal
if is_sliding:
self.window_size = config.sliding_window
else:
self.window_size = -1
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
# self.softmax_scale = self.head_size**-0.5
self.softmax_scale = config.query_pre_attn_scalar**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.softcap = config.attn_logit_softcapping
self.query_key_value = load_attention(config, prefix, weights)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query,
kv_cache[0],
kv_cache[1],
seqlen,
block_tables,
self.softmax_scale,
causal=self.causal,
window_size_left=self.window_size,
softcap=self.softcap,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
softcap=self.softcap,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
class Gemma2MLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_activation
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1])
class FlashGemma2Layer(nn.Module):
def __init__(self, prefix: str, config, weights, causal: bool, is_sliding: bool):
super().__init__()
self.self_attn = FlashGemma2Attention(
prefix=f"{prefix}.self_attn",
config=config,
weights=weights,
causal=causal,
is_sliding=is_sliding,
)
self.mlp = Gemma2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.pre_feedforward_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.pre_feedforward_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.post_feedforward_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.post_feedforward_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
# faster post attention rms norm
normed_attn_res_output, _ = self.post_attention_layernorm(attn_output)
normed_attn_res_output = normed_attn_res_output + res
res = normed_attn_res_output
pre_normed, _ = self.pre_feedforward_layernorm(normed_attn_res_output)
mlp_output = self.mlp(pre_normed)
post_hidden_states, _ = self.post_feedforward_layernorm(mlp_output)
return post_hidden_states, normed_attn_res_output
class FlashGemma2Model(torch.nn.Module):
def __init__(self, prefix: str, config, weights, causal: bool):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.layers = nn.ModuleList(
[
FlashGemma2Layer(
prefix=f"{prefix}.layers.{layer_id}",
config=config,
weights=weights,
causal=causal,
is_sliding=layer_id % 2 == 0,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = inputs_embeds
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashGemma2ForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights, *, causal: bool = True):
super().__init__()
embed_norm = config.hidden_size**0.5
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.embed_tokens.weight *= embed_norm
self.model = FlashGemma2Model(
prefix=prefix, config=config, weights=weights, causal=causal
)
self.lm_head = SpeculativeHead.load(
prefix=(
f"{prefix}.embed_tokens"
if config.tie_word_embeddings
else f"{prefix}.lm_head"
),
config=config,
weights=weights,
)
self.softcap = config.final_logit_softcapping
assert isinstance(self.softcap, float)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
input_embeds = self.embed_tokens(input_ids)
hidden_states = self.model(
input_embeds,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
logits /= self.softcap
logits = torch.tanh(logits)
logits *= self.softcap
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8268
} | 239 |
import torch
import os
from loguru import logger
from typing import Dict, Optional
from text_generation_server.utils.log import log_master
PREFIX_CACHING = os.getenv("USE_PREFIX_CACHING").lower() in {"1", "true"}
log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}")
ATTENTION = os.getenv("ATTENTION")
_expected = {"paged", "flashdecoding", "flashinfer"}
assert (
ATTENTION in _expected
), f"Attention is not valid {ATTENTION}, expected {_expected}"
log_master(logger.info, f"Using Attention = {ATTENTION}")
if PREFIX_CACHING and ATTENTION not in {"flashinfer", "flashdecoding"}:
raise RuntimeError("Prefix caching is only supported with flashinfer")
MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.95"))
assert TGI_WIGGLE_ROOM > 0
assert TGI_WIGGLE_ROOM < 1
# This is overridden by the cli
BLOCK_SIZE: int
if ATTENTION == "flashdecoding":
BLOCK_SIZE = 256
elif ATTENTION == "flashinfer":
BLOCK_SIZE = 1
else:
BLOCK_SIZE = 16
cuda_graphs = os.getenv("CUDA_GRAPHS")
if cuda_graphs is not None:
try:
cuda_graphs = [int(item) for item in cuda_graphs.split(",")]
except Exception as e:
raise RuntimeError(
f"Could not parse cuda graphs {cuda_graphs}, expected comma separated list for batch sizes to run on: {e}"
)
else:
cuda_graphs = None
# sorting the cuda graphs in descending order helps reduce the
# memory impact and results in less memory usage
if cuda_graphs is not None:
cuda_graphs.sort(reverse=True)
CUDA_GRAPHS = cuda_graphs
# NOTE: eventually we should move this into the router and pass back the
# index in all cases.
ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None
def set_adapter_to_index(adapter_to_index: Dict[str, int]):
global ADAPTER_TO_INDEX
ADAPTER_TO_INDEX = adapter_to_index
def get_adapter_to_index():
global ADAPTER_TO_INDEX
return ADAPTER_TO_INDEX
| text-generation-inference/server/text_generation_server/models/globals.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/globals.py",
"repo_id": "text-generation-inference",
"token_count": 740
} | 240 |
import os
import torch
from datetime import timedelta
from loguru import logger
from text_generation_server.utils.import_utils import SYSTEM
# Tensor Parallelism settings
RANK = int(os.getenv("RANK", "0"))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", "1"))
# CUDA memory fraction
MEMORY_FRACTION = float(os.getenv("CUDA_MEMORY_FRACTION", "1.0"))
class FakeBarrier:
def wait(self):
pass
class FakeGroup:
def __init__(self, rank, size):
self._rank = rank
self._size = size
def allreduce(self, *args, **kwargs):
return FakeBarrier()
def allgather(self, inputs, local_tensor, **kwargs):
assert (
len(inputs[0]) == len(local_tensor) == 1
), f"{len(inputs[0])} != {len(local_tensor)} != 1, and the FakeGroup is supposed to join on simple tensors"
for input_ in inputs:
input_[0].data = local_tensor[0].data
return FakeBarrier()
def barrier(self, *args, **kwargs):
return FakeBarrier()
def size(self):
return self._size
def rank(self):
return self._rank
def initialize_torch_distributed():
if torch.cuda.is_available():
from torch.distributed import ProcessGroupNCCL
# Set the device id.
assert WORLD_SIZE <= torch.cuda.device_count(), "Each process is one gpu"
device = RANK % torch.cuda.device_count()
torch.cuda.set_device(device)
torch.cuda.set_per_process_memory_fraction(MEMORY_FRACTION, device)
backend = "nccl"
options = ProcessGroupNCCL.Options()
options.is_high_priority_stream = True
options._timeout = timedelta(seconds=120)
else:
backend = "gloo"
options = None
if WORLD_SIZE == 1:
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
else:
if os.getenv("DEBUG", None) == "1":
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
if not torch.distributed.is_initialized():
# Call the init process.
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
ipex.distributed.init_process_group(
backend="ccl",
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
)
else:
torch.distributed.init_process_group(
backend=backend,
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
)
else:
logger.warning("torch.distributed is already initialized.")
return torch.distributed.group.WORLD, RANK, WORLD_SIZE
| text-generation-inference/server/text_generation_server/utils/dist.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/dist.py",
"repo_id": "text-generation-inference",
"token_count": 1328
} | 241 |
import subprocess
import argparse
import ast
import json
import os
TEMPLATE = """
# Supported Models and Hardware
Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models (VLMs & LLMs) are supported.
## Supported Models
SUPPORTED_MODELS
If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models:
```python
# for causal LMs/text-generation models
AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
# or, for text-to-text generation models
AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")
```
If you wish to serve a supported model that already exists on a local folder, just point to the local folder.
```bash
text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM>
```
"""
def check_cli(check: bool):
output = subprocess.check_output(["text-generation-launcher", "--help"]).decode(
"utf-8"
)
wrap_code_blocks_flag = "<!-- WRAP CODE BLOCKS -->"
final_doc = f"# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n"
lines = output.split("\n")
header = ""
block = []
for line in lines:
if line.startswith(" -") or line.startswith(" -"):
rendered_block = "\n".join(block)
if header:
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
else:
final_doc += f"```shell\n{rendered_block}\n```\n"
block = []
tokens = line.split("<")
if len(tokens) > 1:
header = tokens[-1][:-1]
else:
header = line.split("--")[-1]
header = header.upper().replace("-", "_")
block.append(line)
rendered_block = "\n".join(block)
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
block = []
filename = "docs/source/reference/launcher.md"
if check:
with open(filename, "r") as f:
doc = f.read()
if doc != final_doc:
tmp = "launcher.md"
with open(tmp, "w") as g:
g.write(final_doc)
diff = subprocess.run(
["diff", tmp, filename], capture_output=True
).stdout.decode("utf-8")
print(diff)
raise Exception(
"Cli arguments Doc is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
with open(filename, "w") as f:
f.write(final_doc)
def check_supported_models(check: bool):
filename = "server/text_generation_server/models/__init__.py"
with open(filename, "r") as f:
tree = ast.parse(f.read())
enum_def = [
x for x in tree.body if isinstance(x, ast.ClassDef) and x.name == "ModelType"
][0]
_locals = {}
_globals = {}
exec(f"import enum\n{ast.unparse(enum_def)}", _globals, _locals)
ModelType = _locals["ModelType"]
list_string = ""
for data in ModelType:
list_string += f"- [{data.value['name']}]({data.value['url']})"
if data.value.get("multimodal", None):
list_string += " (Multimodal)"
list_string += "\n"
final_doc = TEMPLATE.replace("SUPPORTED_MODELS", list_string)
filename = "docs/source/supported_models.md"
if check:
with open(filename, "r") as f:
doc = f.read()
if doc != final_doc:
tmp = "supported.md"
with open(tmp, "w") as g:
g.write(final_doc)
diff = subprocess.run(
["diff", tmp, filename], capture_output=True
).stdout.decode("utf-8")
print(diff)
raise Exception(
"Supported models is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
with open(filename, "w") as f:
f.write(final_doc)
def get_openapi_schema():
try:
output = subprocess.check_output(["text-generation-router", "print-schema"])
return json.loads(output)
except subprocess.CalledProcessError as e:
print(f"Error running text-generation-router print-schema: {e}")
raise SystemExit(1)
except json.JSONDecodeError:
print("Error: Invalid JSON received from text-generation-router print-schema")
raise SystemExit(1)
def check_openapi(check: bool):
new_openapi_data = get_openapi_schema()
filename = "docs/openapi.json"
tmp_filename = "openapi_tmp.json"
with open(tmp_filename, "w") as f:
json.dump(new_openapi_data, f, indent=2)
if check:
diff = subprocess.run(
[
"diff",
# allow for trailing whitespace since it's not significant
# and the precommit hook will remove it
"--ignore-trailing-space",
tmp_filename,
filename,
],
capture_output=True,
).stdout.decode("utf-8")
os.remove(tmp_filename)
if diff:
print(diff)
raise Exception(
"OpenAPI documentation is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
os.rename(tmp_filename, filename)
print("OpenAPI documentation updated.")
p = subprocess.run(
[
"redocly",
# allow for trailing whitespace since it's not significant
# and the precommit hook will remove it
"lint",
filename,
],
capture_output=True,
)
errors = p.stderr.decode("utf-8")
# The openapi specs fails on `exclusive_minimum` which is expected to be a boolean where
# utoipa outputs a value instead: https://github.com/juhaku/utoipa/issues/969
print(errors)
if p.returncode != 0:
print(errors)
raise Exception(
f"OpenAPI documentation is invalid, `redocly lint {filename}` showed some error:\n {errors}"
)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
check_cli(args.check)
check_supported_models(args.check)
check_openapi(args.check)
if __name__ == "__main__":
main()
| text-generation-inference/update_doc.py/0 | {
"file_path": "text-generation-inference/update_doc.py",
"repo_id": "text-generation-inference",
"token_count": 2967
} | 242 |
extern crate napi_build;
fn main() {
napi_build::setup();
}
| tokenizers/bindings/node/build.rs/0 | {
"file_path": "tokenizers/bindings/node/build.rs",
"repo_id": "tokenizers",
"token_count": 26
} | 243 |
// import { promisify } from 'util'
import { BPE, Tokenizer, mergeEncodings, slice } from '../../'
describe('slice', () => {
const text = 'My name is John ๐'
const sliceText = slice.bind({}, text)
it('returns the full text when no params', () => {
const sliced = sliceText()
expect(sliced).toEqual(text)
})
it('accepts `undefined` as second parameter', () => {
const original = sliceText(undefined)
expect(original).toEqual(text)
})
it('accepts `undefined` as third parameter', () => {
const original = sliceText(0, undefined)
expect(original).toEqual(text)
})
it('throws an error when `begin` is out of range', () => {
expect(() => sliceText(1000)).toThrow()
})
it('returns slice starting at the specified index', () => {
const original = sliceText(3)
expect(original).toEqual('name is John ๐')
})
it('throws an error when `end` is out of range', () => {
expect(() => sliceText(0, 1000)).toThrow()
})
it('returns the text between the two specified indexes', () => {
const original = sliceText(3, 7)
expect(original).toEqual('name')
})
describe('with only a negative `begin`', () => {
it('returns the original string counting from the end when in the range', () => {
const original = sliceText(-1)
expect(original).toEqual('๐')
})
it('throws an error when out of range', () => {
expect(() => sliceText(-1000)).toThrow()
})
})
describe('with a positive `begin` and a negative `end`', () => {
it('returns correct slice when resulting range is valid', () => {
const original = sliceText(3, -7)
expect(original).toEqual('name is')
})
it('throws an error when resulting `end` index is lower than `begin`', () => {
expect(() => sliceText(7, -12)).toThrow()
})
it('throws an error when `begin` is out of range', () => {
expect(() => sliceText(1000, -12)).toThrow()
})
it('throws an error when resulting `end` index is out of range', () => {
expect(() => sliceText(7, -1000)).toThrow()
})
})
describe('with a negative `begin` and a positive `end`', () => {
it('returns correct slice when resulting range is valid', () => {
const original = sliceText(-9, 10)
expect(original).toEqual('is')
})
it('throws an error when resulting `begin` index is upper than `end`', () => {
expect(() => sliceText(-3, 5)).toThrow()
})
it('throws an error when `end` is out of range', () => {
expect(() => sliceText(-5, 1000)).toThrow()
})
it('throws an error when resulting `begin` index is out of range', () => {
expect(() => sliceText(-1000, 10)).toThrow()
})
})
describe('with negatives `begin` and `end`', () => {
it('returns correct slice when resulting range is valid', () => {
const original = sliceText(-9, -7)
expect(original).toEqual('is')
})
it('throws an error when resulting `end` index is lower than `begin`', () => {
expect(() => sliceText(-5, -10)).toThrow()
})
it('throws an error when resulting `begin` index is out of range', () => {
expect(() => sliceText(-1000, -10)).toThrow()
})
it('throws an error when resulting `end` index is out of range', () => {
expect(() => sliceText(-10, -1000)).toThrow()
})
})
})
describe('mergeEncodings', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john'])
it('accepts `undefined` as a second parameter', () => {
const encoding = mergeEncodings([], undefined)
expect(encoding.constructor.name).toEqual('Encoding')
})
it('returns correct result with `growingOffsets` not provided', async () => {
const firstEncoding = await tokenizer.encode('my name is', null)
const secondEncoding = await tokenizer.encode('john', null)
const encoding = mergeEncodings([firstEncoding, secondEncoding])
expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john'])
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
])
})
it('returns correct result when `growingOffsets` is `false`', async () => {
const firstEncoding = await tokenizer.encode('my name is', null)
const secondEncoding = await tokenizer.encode('john', null)
const encoding = mergeEncodings([firstEncoding, secondEncoding], false)
expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john'])
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
])
})
it('returns correct result when `growingOffsets` is `true`', async () => {
const firstEncoding = await tokenizer.encode('my name is', null)
const secondEncoding = await tokenizer.encode('john', null)
const encoding = mergeEncodings([firstEncoding, secondEncoding], true)
expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john'])
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[10, 14],
])
})
})
| tokenizers/bindings/node/lib/bindings/utils.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/utils.test.ts",
"repo_id": "tokenizers",
"token_count": 1866
} | 244 |
{
"name": "tokenizers-linux-arm64-musl",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"arm64"
],
"main": "tokenizers.linux-arm64-musl.node",
"files": [
"tokenizers.linux-arm64-musl.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers",
"libc": [
"musl"
]
} | tokenizers/bindings/node/npm/linux-arm64-musl/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-arm64-musl/package.json",
"repo_id": "tokenizers",
"token_count": 291
} | 245 |
#![deny(clippy::all)]
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
mod arc_rwlock_serde;
pub mod decoders;
pub mod encoding;
pub mod models;
pub mod normalizers;
pub mod pre_tokenizers;
pub mod processors;
pub mod tasks;
pub mod tokenizer;
pub mod trainers;
pub mod utils;
| tokenizers/bindings/node/src/lib.rs/0 | {
"file_path": "tokenizers/bindings/node/src/lib.rs",
"repo_id": "tokenizers",
"token_count": 102
} | 246 |
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- [#1096] Python 3.11 support
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#956] PyO3 version upgrade
- [#1055] M1 automated builds
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#962] Fix tests for python 3.10
- [#961] Added link for Ruby port of `tokenizers`
## [0.11.6]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.5]
- [#895] Build `python 3.10` wheels.
## [0.11.4]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.3]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#585] Conda version should now work on old CentOS
- [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`.
- [#851] Doc links
### Added
- [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor
- [#845]: Documentation for `Decoders`.
### Changed
- [#850]: Added a feature gate to enable disabling `http` features
- [#718]: Fix `WordLevel` tokenizer determinism during training
- [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer`
- [#770]: Improved documentation for `UnigramTrainer`
- [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub
- [#793]: Saving a pretty JSON file by default when saving a tokenizer
## [0.10.3]
### Fixed
- [#686]: Fix SPM conversion process for whitespace deduplication
- [#707]: Fix stripping strings containing Unicode characters
### Added
- [#693]: Add a CTC Decoder for Wave2Vec models
### Removed
- [#714]: Removed support for Python 3.5
## [0.10.2]
### Fixed
- [#652]: Fix offsets for `Precompiled` corner case
- [#656]: Fix BPE `continuing_subword_prefix`
- [#674]: Fix `Metaspace` serialization problems
## [0.10.1]
### Fixed
- [#616]: Fix SentencePiece tokenizers conversion
- [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM)
- [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut`
- [#620]: Fix serialization/deserialization for overlapping models
- [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`)
## [0.10.0]
### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern
- [#544]: Ability to train from memory. This also improves the integration with `datasets`
- [#590]: Add getters/setters for components on BaseTokenizer
- [#574]: Add `fust_unk` option to SentencePieceBPETokenizer
### Changed
- [#509]: Automatically stubbing the `.pyi` files
- [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()`
- [#530]: The various attributes on each component can be get/set (ie.
`tokenizer.model.dropout = 0.1`)
- [#538]: The API Reference has been improved and is now up-to-date.
### Fixed
- [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were
forcing to reload the `Model` after a training.
- [#539]: Fix `BaseTokenizer` enable_truncation docstring
## [0.9.4]
### Fixed
- [#492]: Fix `from_file` on `BertWordPieceTokenizer`
- [#498]: Fix the link to download `sentencepiece_model_pb2.py`
- [#500]: Fix a typo in the docs quicktour
### Changed
- [#506]: Improve Encoding mappings for pairs of sequence
## [0.9.3]
### Fixed
- [#470]: Fix hanging error when training with custom component
- [#476]: TemplateProcessing serialization is now deterministic
- [#481]: Fix SentencePieceBPETokenizer.from_files
### Added
- [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts
- [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly
## [0.9.2]
### Fixed
- [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing
## [0.9.1]
### Fixed
- [#459]: Fix a problem with deserialization
## [0.9.0]
### Fixed
- [#362]: Fix training deadlock with Python components.
- [#363]: Fix a crash when calling `.train` with some non-existent files
- [#355]: Remove a lot of possible crashes
- [#389]: Improve truncation (crash and consistency)
### Added
- [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays
- [#292]: Support for the Unigram algorithm
- [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer
- [#403]: Add `TemplateProcessing` `PostProcessor`.
- [#420]: Ability to fuse the "unk" token in BPE.
### Changed
- [#360]: Lots of improvements related to words/alignment tracking
- [#426]: Improvements on error messages thanks to PyO3 0.12
## [0.8.1]
### Fixed
- [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly
### Changed
- [#329]: Improved warning and behavior when we detect a fork
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
## [0.8.0]
### Highlights of this release
- We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when
processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps
while applying labels to each word.
- Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later
load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code.
- With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components,
Encodings, everything can be pickled!
- Training a tokenizer is now even faster (up to 5-10x) than before!
- Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library
makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization,
this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to
disable the parallelism, and will warn you if this is necessary.
- And a lot of other improvements, and fixes.
### Fixed
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
### Added
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...).
This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`).
- [#273]: `Tokenizer` and its parts are now pickable
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with
`enable_padding(pad_to_multiple_of=8)` for example.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork`
start method, which happens to be the default on Linux systems. Without disabling the parallelism,
the process dead-locks while encoding. (Cf [#187] for more information)
### Changed
- Improved errors generated during truncation: When the provided max length is too low are
now handled properly.
- [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized,
the argument `is_pretokenized=True` must be specified.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
## [0.7.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we
should add the special tokens. This is activated by default.
- [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by
`encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint.
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore.
- The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using
`train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these
tokens.
- [#136]: Updated Pyo3 version
- [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using
constructors.
- [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default.
### Added
- [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated.
This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these
whitespaces are part of the actual token.
It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`).
- [#236]: `RobertaProcessing` also handles trimming the offsets.
- [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char`
or `word` (input space) and `token` (output space).
- `post_process` can be called on the `Tokenizer`
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with
`get_vocab(with_added_tokens: bool)`
- [#136] Models can now be instantiated through object constructors.
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space=True`
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if
not advised, but that's not the question).
- [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer`
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are
using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`).
- `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or
`encode_batch`
- Access to the `original_str` on the `Encoding` has been removed. The original string is the input
of `encode` so it didn't make sense to keep it here.
- No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They
are now relative to the original string by default.
- Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling
`normalize(sequence)` on the `Tokenizer`
- Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take
the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`)
- If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set
`bert_normalizer=False` and `split_on_whitespace_only=True`.
## [0.6.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#160]: Some default tokens were missing from `BertWordPieceTokenizer`
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes.
- [#174]: The `longest_first` truncation strategy had a bug
## [0.5.2]
- [#163]: Do not open all files directly while training
### Fixed
- We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file
was named `vocab.json`. This is now fixed.
- The `WordLevel` model was also saving its vocabulary to the wrong format.
## [0.5.1]
### Changed
- `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not
specified, the files get a more generic naming, like `vocab.json` or `merges.txt`.
## [0.5.0]
### Changed
- [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding
- [#149]: `ByteLevelBPETokenizer` now has `dropout`.
- `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers.
(Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`)
- [#139]: Expose `__len__` on `Encoding`
- Improved padding performances.
### Added
- Added a new `Strip` normalizer
### Fixed
- [#145]: Decoding was buggy on `BertWordPieceTokenizer`.
- [#152]: Some documentation and examples were still using the old `BPETokenizer`
### How to migrate
- Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of
`do_lowercase`.
## [0.4.2]
### Fixed
- [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from
being trained.
## [0.4.1]
### Fixed
- [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer
## [0.4.0]
### Changed
- [#131]: Replaced all .new() class methods by a proper __new__ implementation
- Improved typings
### How to migrate
- Remove all `.new` on all classe instanciations
## [0.3.0]
### Changed
- BPETokenizer has been renamed to CharBPETokenizer for clarity.
- Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets
truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language
model, just as the main `Encoding`.
- Provide mapping to the original string offsets using:
```
output = tokenizer.encode(...)
print(output.original_str.offsets(output.offsets[3]))
```
- [#99]: Exposed the vocabulary size on all tokenizers
### Added
- Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given
delimiter (Works like `.split(delimiter)`)
- Added `WordLevel`: a new model that simply maps `tokens` to their `ids`.
### Fixed
- Fix a bug with IndexableString
- Fix a bug with truncation
### How to migrate
- Rename `BPETokenizer` to `CharBPETokenizer`
- `Encoding.overflowing` is now a List instead of a `Optional[Encoding]`
## [0.2.1]
### Fixed
- Fix a bug with the IDs associated with added tokens.
- Fix a bug that was causing crashes in Python 3.5
[#1096]: https://github.com/huggingface/tokenizers/pull/1096
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#895]: https://github.com/huggingface/tokenizers/pull/895
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#850]: https://github.com/huggingface/tokenizers/pull/850
[#844]: https://github.com/huggingface/tokenizers/pull/844
[#845]: https://github.com/huggingface/tokenizers/pull/845
[#851]: https://github.com/huggingface/tokenizers/pull/851
[#585]: https://github.com/huggingface/tokenizers/pull/585
[#793]: https://github.com/huggingface/tokenizers/pull/793
[#780]: https://github.com/huggingface/tokenizers/pull/780
[#770]: https://github.com/huggingface/tokenizers/pull/770
[#762]: https://github.com/huggingface/tokenizers/pull/762
[#718]: https://github.com/huggingface/tokenizers/pull/718
[#714]: https://github.com/huggingface/tokenizers/pull/714
[#707]: https://github.com/huggingface/tokenizers/pull/707
[#693]: https://github.com/huggingface/tokenizers/pull/693
[#686]: https://github.com/huggingface/tokenizers/pull/686
[#674]: https://github.com/huggingface/tokenizers/pull/674
[#657]: https://github.com/huggingface/tokenizers/pull/657
[#656]: https://github.com/huggingface/tokenizers/pull/656
[#652]: https://github.com/huggingface/tokenizers/pull/652
[#621]: https://github.com/huggingface/tokenizers/pull/621
[#620]: https://github.com/huggingface/tokenizers/pull/620
[#618]: https://github.com/huggingface/tokenizers/pull/618
[#617]: https://github.com/huggingface/tokenizers/pull/617
[#616]: https://github.com/huggingface/tokenizers/pull/616
[#590]: https://github.com/huggingface/tokenizers/pull/590
[#574]: https://github.com/huggingface/tokenizers/pull/574
[#544]: https://github.com/huggingface/tokenizers/pull/544
[#542]: https://github.com/huggingface/tokenizers/pull/542
[#539]: https://github.com/huggingface/tokenizers/pull/539
[#538]: https://github.com/huggingface/tokenizers/pull/538
[#533]: https://github.com/huggingface/tokenizers/pull/533
[#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498
[#492]: https://github.com/huggingface/tokenizers/pull/492
[#481]: https://github.com/huggingface/tokenizers/pull/481
[#480]: https://github.com/huggingface/tokenizers/pull/480
[#477]: https://github.com/huggingface/tokenizers/pull/477
[#476]: https://github.com/huggingface/tokenizers/pull/476
[#470]: https://github.com/huggingface/tokenizers/pull/470
[#464]: https://github.com/huggingface/tokenizers/pull/464
[#459]: https://github.com/huggingface/tokenizers/pull/459
[#420]: https://github.com/huggingface/tokenizers/pull/420
[#417]: https://github.com/huggingface/tokenizers/pull/417
[#416]: https://github.com/huggingface/tokenizers/pull/416
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#394]: https://github.com/huggingface/tokenizers/pull/394
[#389]: https://github.com/huggingface/tokenizers/pull/389
[#379]: https://github.com/huggingface/tokenizers/pull/379
[#378]: https://github.com/huggingface/tokenizers/pull/378
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#362]: https://github.com/huggingface/tokenizers/pull/362
[#360]: https://github.com/huggingface/tokenizers/pull/360
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#333]: https://github.com/huggingface/tokenizers/pull/333
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#329]: https://github.com/huggingface/tokenizers/pull/329
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#292]: https://github.com/huggingface/tokenizers/pull/292
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#273]: https://github.com/huggingface/tokenizers/pull/273
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[#239]: https://github.com/huggingface/tokenizers/pull/239
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#187]: https://github.com/huggingface/tokenizers/issues/187
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#160]: https://github.com/huggingface/tokenizers/issues/160
[#156]: https://github.com/huggingface/tokenizers/pull/156
[#152]: https://github.com/huggingface/tokenizers/issues/152
[#149]: https://github.com/huggingface/tokenizers/issues/149
[#145]: https://github.com/huggingface/tokenizers/issues/145
[#139]: https://github.com/huggingface/tokenizers/issues/139
[#137]: https://github.com/huggingface/tokenizers/issues/137
[#134]: https://github.com/huggingface/tokenizers/issues/134
[#131]: https://github.com/huggingface/tokenizers/issues/131
[#99]: https://github.com/huggingface/tokenizers/pull/99
| tokenizers/bindings/python/CHANGELOG.md/0 | {
"file_path": "tokenizers/bindings/python/CHANGELOG.md",
"repo_id": "tokenizers",
"token_count": 7408
} | 247 |
# Generated content DO NOT EDIT
class Decoder:
"""
Base class for all decoders
This class is not supposed to be instantiated directly. Instead, any implementation of
a Decoder will return an instance of this class when instantiated.
"""
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class BPEDecoder(Decoder):
"""
BPEDecoder Decoder
Args:
suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
The suffix that was used to caracterize an end-of-word. This suffix will
be replaced by whitespaces during the decoding
"""
def __init__(self, suffix="</w>"):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteFallback(Decoder):
"""
ByteFallback Decoder
ByteFallback is a simple trick which converts tokens looking like `<0x61>`
to pure bytes, and attempts to make them into a string. If the tokens
cannot be decoded you will get ๏ฟฝ instead for each inconvertable byte token
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteLevel(Decoder):
"""
ByteLevel Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class CTC(Decoder):
"""
CTC Decoder
Args:
pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
The pad token used by CTC to delimit a new token.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
The word delimiter token. It will be replaced by a <space>
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts.
Mainly spaces before punctuation, and some abbreviated english forms.
"""
def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Fuse(Decoder):
"""
Fuse Decoder
Fuse simply fuses every token into a single string.
This is the last step of decoding, this decoder exists only if
there is need to add other decoders *after* the fusion
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Metaspace(Decoder):
"""
Metaspace Decoder
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`โ`):
The replacement character. Must be exactly one character. By default we
use the `โ` (U+2581) meta symbol (Same as in SentencePiece).
prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
Choices: "always", "never", "first". First means the space is only added on the first
token (relevant when special tokens are used or other pre_tokenizer are used).
"""
def __init__(self, replacement="โ", prepend_scheme="always", split=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Replace(Decoder):
"""
Replace Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self, pattern, content):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Sequence(Decoder):
"""
Sequence Decoder
Args:
decoders (:obj:`List[Decoder]`)
The decoders that need to be chained
"""
def __init__(self, decoders):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Strip(Decoder):
"""
Strip normalizer
Strips n left characters of each token, or n right characters of each token
"""
def __init__(self, content, left=0, right=0):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class WordPiece(Decoder):
"""
WordPiece Decoder
Args:
prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
The prefix to use for subwords that are not a beginning-of-word
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
and some abbreviated english forms.
"""
def __init__(self, prefix="##", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
| tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 3184
} | 248 |
from .visualizer import Annotation, EncodingVisualizer
| tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py",
"repo_id": "tokenizers",
"token_count": 13
} | 249 |
use pyo3::types::*;
use pyo3::{exceptions, prelude::*};
use std::sync::{Arc, RwLock};
use crate::error::ToPyResult;
use crate::utils::{PyNormalizedString, PyNormalizedStringRefMut, PyPattern};
use serde::ser::SerializeStruct;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use tk::normalizers::{
BertNormalizer, ByteLevel, Lowercase, Nmt, NormalizerWrapper, Precompiled, Prepend, Replace,
Strip, StripAccents, NFC, NFD, NFKC, NFKD,
};
use tk::{NormalizedString, Normalizer};
use tokenizers as tk;
/// Represents the different kind of NormalizedString we can receive from Python:
/// - Owned: Created in Python and owned by Python
/// - RefMut: A mutable reference to a NormalizedString owned by Rust
#[derive(FromPyObject)]
enum PyNormalizedStringMut<'p> {
Owned(PyRefMut<'p, PyNormalizedString>),
RefMut(PyNormalizedStringRefMut),
}
impl PyNormalizedStringMut<'_> {
/// Normalized the underlying `NormalizedString` using the provided normalizer
pub fn normalize_with<N>(&mut self, normalizer: &N) -> PyResult<()>
where
N: Normalizer,
{
match self {
PyNormalizedStringMut::Owned(ref mut n) => normalizer.normalize(&mut n.normalized),
PyNormalizedStringMut::RefMut(n) => n.map_as_mut(|n| normalizer.normalize(n))?,
}
.map_err(|e| exceptions::PyException::new_err(format!("{}", e)))
}
}
/// Base class for all normalizers
///
/// This class is not supposed to be instantiated directly. Instead, any implementation of a
/// Normalizer will return an instance of this class when instantiated.
#[pyclass(dict, module = "tokenizers.normalizers", name = "Normalizer", subclass)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct PyNormalizer {
pub(crate) normalizer: PyNormalizerTypeWrapper,
}
impl PyNormalizer {
pub(crate) fn new(normalizer: PyNormalizerTypeWrapper) -> Self {
PyNormalizer { normalizer }
}
pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> {
let base = self.clone();
Ok(match self.normalizer {
PyNormalizerTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py),
PyNormalizerTypeWrapper::Single(ref inner) => match &*inner.as_ref().read().unwrap() {
PyNormalizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py),
PyNormalizerWrapper::Wrapped(ref inner) => match inner {
NormalizerWrapper::Sequence(_) => {
Py::new(py, (PySequence {}, base))?.into_py(py)
}
NormalizerWrapper::BertNormalizer(_) => {
Py::new(py, (PyBertNormalizer {}, base))?.into_py(py)
}
NormalizerWrapper::StripNormalizer(_) => {
Py::new(py, (PyStrip {}, base))?.into_py(py)
}
NormalizerWrapper::Prepend(_) => Py::new(py, (PyPrepend {}, base))?.into_py(py),
NormalizerWrapper::ByteLevel(_) => {
Py::new(py, (PyByteLevel {}, base))?.into_py(py)
}
NormalizerWrapper::StripAccents(_) => {
Py::new(py, (PyStripAccents {}, base))?.into_py(py)
}
NormalizerWrapper::NFC(_) => Py::new(py, (PyNFC {}, base))?.into_py(py),
NormalizerWrapper::NFD(_) => Py::new(py, (PyNFD {}, base))?.into_py(py),
NormalizerWrapper::NFKC(_) => Py::new(py, (PyNFKC {}, base))?.into_py(py),
NormalizerWrapper::NFKD(_) => Py::new(py, (PyNFKD {}, base))?.into_py(py),
NormalizerWrapper::Lowercase(_) => {
Py::new(py, (PyLowercase {}, base))?.into_py(py)
}
NormalizerWrapper::Precompiled(_) => {
Py::new(py, (PyPrecompiled {}, base))?.into_py(py)
}
NormalizerWrapper::Replace(_) => Py::new(py, (PyReplace {}, base))?.into_py(py),
NormalizerWrapper::Nmt(_) => Py::new(py, (PyNmt {}, base))?.into_py(py),
},
},
})
}
}
impl Normalizer for PyNormalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
self.normalizer.normalize(normalized)
}
}
#[pymethods]
impl PyNormalizer {
#[staticmethod]
fn custom(obj: PyObject) -> Self {
Self {
normalizer: PyNormalizerWrapper::Custom(CustomNormalizer::new(obj)).into(),
}
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.normalizer).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle Normalizer: {}",
e
))
})?;
Ok(PyBytes::new_bound(py, data.as_bytes()).to_object(py))
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&PyBytes>(py) {
Ok(s) => {
self.normalizer = serde_json::from_slice(s.as_bytes()).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle Normalizer: {}",
e
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
/// Normalize a :class:`~tokenizers.NormalizedString` in-place
///
/// This method allows to modify a :class:`~tokenizers.NormalizedString` to
/// keep track of the alignment information. If you just want to see the result
/// of the normalization on a raw string, you can use
/// :meth:`~tokenizers.normalizers.Normalizer.normalize_str`
///
/// Args:
/// normalized (:class:`~tokenizers.NormalizedString`):
/// The normalized string on which to apply this
/// :class:`~tokenizers.normalizers.Normalizer`
#[pyo3(text_signature = "(self, normalized)")]
fn normalize(&self, mut normalized: PyNormalizedStringMut) -> PyResult<()> {
normalized.normalize_with(&self.normalizer)
}
/// Normalize the given string
///
/// This method provides a way to visualize the effect of a
/// :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
/// information. If you need to get/convert offsets, you can use
/// :meth:`~tokenizers.normalizers.Normalizer.normalize`
///
/// Args:
/// sequence (:obj:`str`):
/// A string to normalize
///
/// Returns:
/// :obj:`str`: A string after normalization
#[pyo3(text_signature = "(self, sequence)")]
fn normalize_str(&self, sequence: &str) -> PyResult<String> {
let mut normalized = NormalizedString::from(sequence);
ToPyResult(self.normalizer.normalize(&mut normalized)).into_py()?;
Ok(normalized.get().to_owned())
}
fn __repr__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::repr(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
fn __str__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::to_string(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
}
macro_rules! getter {
($self: ident, $variant: ident, $name: ident) => {{
let super_ = $self.as_ref();
if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer {
let wrapper = norm.read().unwrap();
if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(o)) = (*wrapper).clone()
{
o.$name
} else {
unreachable!()
}
} else {
unreachable!()
}
}};
}
macro_rules! setter {
($self: ident, $variant: ident, $name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer {
let mut wrapper = norm.write().unwrap();
if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(ref mut o)) = *wrapper {
o.$name = $value;
}
}
}};
}
/// BertNormalizer
///
/// Takes care of normalizing raw text before giving it to a Bert model.
/// This includes cleaning the text, handling accents, chinese chars and lowercasing
///
/// Args:
/// clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether to clean the text, by removing any control characters
/// and replacing all whitespaces by the classic one.
///
/// handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether to handle chinese chars by putting spaces around them.
///
/// strip_accents (:obj:`bool`, `optional`):
/// Whether to strip all accents. If this option is not specified (ie == None),
/// then it will be determined by the value for `lowercase` (as in the original Bert).
///
/// lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether to lowercase.
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "BertNormalizer")]
pub struct PyBertNormalizer {}
#[pymethods]
impl PyBertNormalizer {
#[getter]
fn get_clean_text(self_: PyRef<Self>) -> bool {
getter!(self_, BertNormalizer, clean_text)
}
#[setter]
fn set_clean_text(self_: PyRef<Self>, clean_text: bool) {
setter!(self_, BertNormalizer, clean_text, clean_text);
}
#[getter]
fn get_handle_chinese_chars(self_: PyRef<Self>) -> bool {
getter!(self_, BertNormalizer, handle_chinese_chars)
}
#[setter]
fn set_handle_chinese_chars(self_: PyRef<Self>, handle_chinese_chars: bool) {
setter!(
self_,
BertNormalizer,
handle_chinese_chars,
handle_chinese_chars
);
}
#[getter]
fn get_strip_accents(self_: PyRef<Self>) -> Option<bool> {
getter!(self_, BertNormalizer, strip_accents)
}
#[setter]
fn set_strip_accents(self_: PyRef<Self>, strip_accents: Option<bool>) {
setter!(self_, BertNormalizer, strip_accents, strip_accents);
}
#[getter]
fn get_lowercase(self_: PyRef<Self>) -> bool {
getter!(self_, BertNormalizer, lowercase)
}
#[setter]
fn set_lowercase(self_: PyRef<Self>, lowercase: bool) {
setter!(self_, BertNormalizer, lowercase, lowercase)
}
#[new]
#[pyo3(signature = (
clean_text = true,
handle_chinese_chars = true,
strip_accents = None,
lowercase = true
),
text_signature = "(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True)")]
fn new(
clean_text: bool,
handle_chinese_chars: bool,
strip_accents: Option<bool>,
lowercase: bool,
) -> (Self, PyNormalizer) {
let normalizer =
BertNormalizer::new(clean_text, handle_chinese_chars, strip_accents, lowercase);
(PyBertNormalizer {}, normalizer.into())
}
}
/// NFD Unicode Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFD")]
pub struct PyNFD {}
#[pymethods]
impl PyNFD {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyNFD {}, PyNormalizer::new(NFD.into()))
}
}
/// NFKD Unicode Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKD")]
pub struct PyNFKD {}
#[pymethods]
impl PyNFKD {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyNFKD {}, NFKD.into())
}
}
/// NFC Unicode Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFC")]
pub struct PyNFC {}
#[pymethods]
impl PyNFC {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyNFC {}, NFC.into())
}
}
/// NFKC Unicode Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKC")]
pub struct PyNFKC {}
#[pymethods]
impl PyNFKC {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyNFKC {}, NFKC.into())
}
}
/// Allows concatenating multiple other Normalizer as a Sequence.
/// All the normalizers run in sequence in the given order
///
/// Args:
/// normalizers (:obj:`List[Normalizer]`):
/// A list of Normalizer to be run as a sequence
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Sequence")]
pub struct PySequence {}
#[pymethods]
impl PySequence {
#[new]
#[pyo3(text_signature = None)]
fn new(normalizers: &Bound<'_, PyList>) -> PyResult<(Self, PyNormalizer)> {
let mut sequence = Vec::with_capacity(normalizers.len());
for n in normalizers.iter() {
let normalizer: PyRef<PyNormalizer> = n.extract()?;
match &normalizer.normalizer {
PyNormalizerTypeWrapper::Sequence(inner) => sequence.extend(inner.iter().cloned()),
PyNormalizerTypeWrapper::Single(inner) => sequence.push(inner.clone()),
}
}
Ok((
PySequence {},
PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(sequence)),
))
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> Bound<'p, PyTuple> {
PyTuple::new_bound(py, [PyList::empty_bound(py)])
}
fn __len__(&self) -> usize {
0
}
fn __getitem__(self_: PyRef<'_, Self>, py: Python<'_>, index: usize) -> PyResult<Py<PyAny>> {
match &self_.as_ref().normalizer {
PyNormalizerTypeWrapper::Sequence(inner) => match inner.get(index) {
Some(item) => PyNormalizer::new(PyNormalizerTypeWrapper::Single(Arc::clone(item)))
.get_as_subtype(py),
_ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>(
"Index not found",
)),
},
PyNormalizerTypeWrapper::Single(inner) => {
PyNormalizer::new(PyNormalizerTypeWrapper::Single(Arc::clone(inner)))
.get_as_subtype(py)
}
}
}
}
/// Lowercase Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Lowercase")]
pub struct PyLowercase {}
#[pymethods]
impl PyLowercase {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyLowercase {}, Lowercase.into())
}
}
/// Strip normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Strip")]
pub struct PyStrip {}
#[pymethods]
impl PyStrip {
#[getter]
fn get_left(self_: PyRef<Self>) -> bool {
getter!(self_, StripNormalizer, strip_left)
}
#[setter]
fn set_left(self_: PyRef<Self>, left: bool) {
setter!(self_, StripNormalizer, strip_left, left)
}
#[getter]
fn get_right(self_: PyRef<Self>) -> bool {
getter!(self_, StripNormalizer, strip_right)
}
#[setter]
fn set_right(self_: PyRef<Self>, right: bool) {
setter!(self_, StripNormalizer, strip_right, right)
}
#[new]
#[pyo3(signature = (left = true, right = true), text_signature = "(self, left=True, right=True)")]
fn new(left: bool, right: bool) -> (Self, PyNormalizer) {
(PyStrip {}, Strip::new(left, right).into())
}
}
/// Prepend normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Prepend")]
pub struct PyPrepend {}
#[pymethods]
impl PyPrepend {
#[getter]
fn get_prepend(self_: PyRef<Self>) -> String {
getter!(self_, Prepend, prepend)
}
#[setter]
fn set_prepend(self_: PyRef<Self>, prepend: String) {
setter!(self_, Prepend, prepend, prepend)
}
#[new]
#[pyo3(signature = (prepend="โ".to_string()), text_signature = "(self, prepend)")]
fn new(prepend: String) -> (Self, PyNormalizer) {
(PyPrepend {}, Prepend::new(prepend).into())
}
}
/// Bytelevel Normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "ByteLevel")]
pub struct PyByteLevel {}
#[pymethods]
impl PyByteLevel {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyByteLevel {}, ByteLevel::new().into())
}
}
/// StripAccents normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "StripAccents")]
pub struct PyStripAccents {}
#[pymethods]
impl PyStripAccents {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyStripAccents {}, StripAccents.into())
}
}
/// Nmt normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Nmt")]
pub struct PyNmt {}
#[pymethods]
impl PyNmt {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyNormalizer) {
(PyNmt {}, Nmt.into())
}
}
/// Precompiled normalizer
/// Don't use manually it is used for compatiblity for SentencePiece.
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Precompiled")]
pub struct PyPrecompiled {}
#[pymethods]
impl PyPrecompiled {
#[new]
#[pyo3(text_signature = "(self, precompiled_charsmap)")]
fn new(precompiled_charsmap: Vec<u8>) -> PyResult<(Self, PyNormalizer)> {
// let precompiled_charsmap: Vec<u8> = FromPyObject::extract(py_precompiled_charsmap)?;
Ok((
PyPrecompiled {},
Precompiled::from(&precompiled_charsmap)
.map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to build Precompiled normalizer: {}",
e
))
})?
.into(),
))
}
}
/// Replace normalizer
#[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Replace")]
pub struct PyReplace {}
#[pymethods]
impl PyReplace {
#[new]
#[pyo3(text_signature = "(self, pattern, content)")]
fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyNormalizer)> {
Ok((
PyReplace {},
ToPyResult(Replace::new(pattern, content)).into_py()?.into(),
))
}
}
#[derive(Debug, Clone)]
pub(crate) struct CustomNormalizer {
inner: PyObject,
}
impl CustomNormalizer {
pub fn new(inner: PyObject) -> Self {
Self { inner }
}
}
impl tk::tokenizer::Normalizer for CustomNormalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
Python::with_gil(|py| {
let normalized = PyNormalizedStringRefMut::new(normalized);
let py_normalized = self.inner.bind(py);
py_normalized.call_method("normalize", (normalized.get(),), None)?;
Ok(())
})
}
}
impl Serialize for CustomNormalizer {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Err(serde::ser::Error::custom(
"Custom Normalizer cannot be serialized",
))
}
}
impl<'de> Deserialize<'de> for CustomNormalizer {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Err(serde::de::Error::custom(
"Custom Normalizer cannot be deserialized",
))
}
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub(crate) enum PyNormalizerWrapper {
Custom(CustomNormalizer),
Wrapped(NormalizerWrapper),
}
impl Serialize for PyNormalizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
PyNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer),
PyNormalizerWrapper::Custom(inner) => inner.serialize(serializer),
}
}
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub(crate) enum PyNormalizerTypeWrapper {
Sequence(Vec<Arc<RwLock<PyNormalizerWrapper>>>),
Single(Arc<RwLock<PyNormalizerWrapper>>),
}
impl Serialize for PyNormalizerTypeWrapper {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
PyNormalizerTypeWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("normalizers", seq)?;
ser.end()
}
PyNormalizerTypeWrapper::Single(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for PyNormalizerWrapper
where
I: Into<NormalizerWrapper>,
{
fn from(norm: I) -> Self {
PyNormalizerWrapper::Wrapped(norm.into())
}
}
impl<I> From<I> for PyNormalizerTypeWrapper
where
I: Into<PyNormalizerWrapper>,
{
fn from(norm: I) -> Self {
PyNormalizerTypeWrapper::Single(Arc::new(RwLock::new(norm.into())))
}
}
impl<I> From<I> for PyNormalizer
where
I: Into<NormalizerWrapper>,
{
fn from(norm: I) -> Self {
PyNormalizer {
normalizer: norm.into().into(),
}
}
}
impl Normalizer for PyNormalizerTypeWrapper {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
match self {
PyNormalizerTypeWrapper::Single(inner) => inner.read().unwrap().normalize(normalized),
PyNormalizerTypeWrapper::Sequence(inner) => inner
.iter()
.try_for_each(|n| n.read().unwrap().normalize(normalized)),
}
}
}
impl Normalizer for PyNormalizerWrapper {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
match self {
PyNormalizerWrapper::Wrapped(inner) => inner.normalize(normalized),
PyNormalizerWrapper::Custom(inner) => inner.normalize(normalized),
}
}
}
/// Normalizers Module
#[pymodule]
pub fn normalizers(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<PyNormalizer>()?;
m.add_class::<PyBertNormalizer>()?;
m.add_class::<PyNFD>()?;
m.add_class::<PyNFKD>()?;
m.add_class::<PyNFC>()?;
m.add_class::<PyNFKC>()?;
m.add_class::<PySequence>()?;
m.add_class::<PyLowercase>()?;
m.add_class::<PyStrip>()?;
m.add_class::<PyStripAccents>()?;
m.add_class::<PyPrepend>()?;
m.add_class::<PyByteLevel>()?;
m.add_class::<PyNmt>()?;
m.add_class::<PyPrecompiled>()?;
m.add_class::<PyReplace>()?;
Ok(())
}
#[cfg(test)]
mod test {
use pyo3::prelude::*;
use tk::normalizers::unicode::{NFC, NFKC};
use tk::normalizers::utils::Sequence;
use tk::normalizers::NormalizerWrapper;
use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper, PyNormalizerWrapper};
#[test]
fn get_subtype() {
Python::with_gil(|py| {
let py_norm = PyNormalizer::new(NFC.into());
let py_nfc = py_norm.get_as_subtype(py).unwrap();
assert_eq!("NFC", py_nfc.bind(py).get_type().qualname().unwrap());
})
}
#[test]
fn serialize() {
let py_wrapped: PyNormalizerWrapper = NFKC.into();
let py_ser = serde_json::to_string(&py_wrapped).unwrap();
let rs_wrapped = NormalizerWrapper::NFKC(NFKC);
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(py_ser, rs_ser);
let py_norm: PyNormalizer = serde_json::from_str(&rs_ser).unwrap();
match py_norm.normalizer {
PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() {
PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {}
_ => panic!("Expected NFKC"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let py_seq: PyNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into();
let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap();
let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(py_wrapper_ser, rs_ser);
let py_seq = PyNormalizer::new(py_seq.into());
let py_ser = serde_json::to_string(&py_seq).unwrap();
assert_eq!(py_wrapper_ser, py_ser);
let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]);
let rs_ser = serde_json::to_string(&rs_seq).unwrap();
assert_eq!(py_wrapper_ser, rs_ser);
}
#[test]
fn deserialize_sequence() {
let string = r#"{"type": "NFKC"}"#;
let normalizer: PyNormalizer = serde_json::from_str(string).unwrap();
match normalizer.normalizer {
PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() {
PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {}
_ => panic!("Expected NFKC"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let sequence_string = format!(r#"{{"type": "Sequence", "normalizers": [{}]}}"#, string);
let normalizer: PyNormalizer = serde_json::from_str(&sequence_string).unwrap();
match normalizer.normalizer {
PyNormalizerTypeWrapper::Single(inner) => match &*inner.as_ref().read().unwrap() {
PyNormalizerWrapper::Wrapped(NormalizerWrapper::Sequence(sequence)) => {
let normalizers = sequence.get_normalizers();
assert_eq!(normalizers.len(), 1);
match normalizers[0] {
NormalizerWrapper::NFKC(_) => {}
_ => panic!("Expected NFKC"),
}
}
_ => panic!("Expected sequence"),
},
_ => panic!("Expected single"),
};
}
}
| tokenizers/bindings/python/src/normalizers.rs/0 | {
"file_path": "tokenizers/bindings/python/src/normalizers.rs",
"repo_id": "tokenizers",
"token_count": 11989
} | 250 |
import json
import pickle
import pytest
from tokenizers.decoders import (
CTC,
BPEDecoder,
ByteLevel,
Decoder,
Metaspace,
Sequence,
WordPiece,
ByteFallback,
Replace,
Strip,
Fuse,
)
class TestByteLevel:
def test_instantiate(self):
assert ByteLevel() is not None
assert isinstance(ByteLevel(), Decoder)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_decoding(self):
decoder = ByteLevel()
assert decoder.decode(["My", "ฤ name", "ฤ is", "ฤ John"]) == "My name is John"
def test_manual_reload(self):
byte_level = ByteLevel()
state = json.loads(byte_level.__getstate__())
reloaded = ByteLevel(**state)
assert isinstance(reloaded, ByteLevel)
class TestReplace:
def test_instantiate(self):
assert Replace("_", " ") is not None
assert isinstance(Replace("_", " "), Decoder)
assert isinstance(Replace("_", " "), Replace)
# assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace)
def test_decoding(self):
decoder = Replace("_", " ")
assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John"
class TestWordPiece:
def test_instantiate(self):
assert WordPiece() is not None
assert WordPiece(prefix="__") is not None
assert WordPiece(cleanup=True) is not None
assert isinstance(WordPiece(), Decoder)
assert isinstance(WordPiece(), WordPiece)
assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece)
def test_decoding(self):
decoder = WordPiece()
assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John"
assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John"
decoder = WordPiece(prefix="__", cleanup=False)
assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John"
assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John"
def test_can_modify(self):
decoder = WordPiece(prefix="$$", cleanup=False)
assert decoder.prefix == "$$"
assert decoder.cleanup == False
# Modify these
decoder.prefix = "__"
assert decoder.prefix == "__"
decoder.cleanup = True
assert decoder.cleanup == True
class TestByteFallback:
def test_instantiate(self):
assert ByteFallback() is not None
assert isinstance(ByteFallback(), Decoder)
assert isinstance(ByteFallback(), ByteFallback)
assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback)
def test_decoding(self):
decoder = ByteFallback()
assert decoder.decode(["My", " na", "me"]) == "My name"
assert decoder.decode(["<0x61>"]) == "a"
assert decoder.decode(["<0xE5>"]) == "๏ฟฝ"
assert decoder.decode(["<0xE5>", "<0x8f>"]) == "๏ฟฝ๏ฟฝ"
assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "ๅซ"
assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "๏ฟฝ๏ฟฝa"
assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "ๅซa"
class TestFuse:
def test_instantiate(self):
assert Fuse() is not None
assert isinstance(Fuse(), Decoder)
assert isinstance(Fuse(), Fuse)
assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse)
def test_decoding(self):
decoder = Fuse()
assert decoder.decode(["My", " na", "me"]) == "My name"
class TestStrip:
def test_instantiate(self):
assert Strip(left=0, right=0) is not None
assert isinstance(Strip(content="_", left=0, right=0), Decoder)
assert isinstance(Strip(content="_", left=0, right=0), Strip)
assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip)
def test_decoding(self):
decoder = Strip(content="_", left=1, right=0)
assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-"
class TestMetaspace:
def test_instantiate(self):
assert Metaspace() is not None
assert Metaspace(replacement="-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
Metaspace(replacement="")
assert Metaspace(prepend_scheme="always") is not None
assert isinstance(Metaspace(), Decoder)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
def test_decoding(self):
decoder = Metaspace()
assert decoder.decode(["โMy", "โname", "โis", "โJohn"]) == "My name is John"
decoder = Metaspace(replacement="-", prepend_scheme="never")
assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John"
def test_can_modify(self):
decoder = Metaspace(replacement="*", prepend_scheme="never")
assert decoder.replacement == "*"
assert decoder.prepend_scheme == "never"
# Modify these
decoder.replacement = "&"
assert decoder.replacement == "&"
decoder.prepend_scheme = "first"
assert decoder.prepend_scheme == "first"
class TestBPEDecoder:
def test_instantiate(self):
assert BPEDecoder() is not None
assert BPEDecoder(suffix="_") is not None
assert isinstance(BPEDecoder(), Decoder)
assert isinstance(BPEDecoder(), BPEDecoder)
assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder)
def test_decoding(self):
decoder = BPEDecoder()
assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John"
decoder = BPEDecoder(suffix="_")
assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John"
def test_can_modify(self):
decoder = BPEDecoder(suffix="123")
assert decoder.suffix == "123"
# Modify these
decoder.suffix = "</w>"
assert decoder.suffix == "</w>"
class TestCTCDecoder:
def test_instantiate(self):
assert CTC() is not None
assert CTC(pad_token="[PAD]") is not None
assert isinstance(CTC(), Decoder)
assert isinstance(CTC(), CTC)
assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC)
def test_decoding(self):
decoder = CTC()
assert (
decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"])
== "hello"
)
decoder = CTC(pad_token="[PAD]")
assert (
decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"])
== "hello"
)
def test_can_modify(self):
decoder = CTC(pad_token="[PAD]")
assert decoder.pad_token == "[PAD]"
assert decoder.word_delimiter_token == "|"
assert decoder.cleanup == True
# Modify these
decoder.pad_token = "{pad}"
assert decoder.pad_token == "{pad}"
decoder.word_delimiter_token = "_"
assert decoder.word_delimiter_token == "_"
decoder.cleanup = False
assert decoder.cleanup == False
class TestSequenceDecoder:
def test_instantiate(self):
assert Sequence([]) is not None
assert Sequence([CTC()]) is not None
assert isinstance(Sequence([]), Decoder)
assert isinstance(Sequence([]), Sequence)
serialized = pickle.dumps(Sequence([]))
assert isinstance(pickle.loads(serialized), Sequence)
def test_decoding(self):
decoder = Sequence([CTC(), Metaspace()])
initial = ["โ", "โ", "H", "H", "i", "i", "โ", "y", "o", "u"]
expected = "Hi you"
assert decoder.decode(initial) == expected
| tokenizers/bindings/python/tests/bindings/test_decoders.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_decoders.py",
"repo_id": "tokenizers",
"token_count": 3527
} | 251 |
# Tokenizer
<tokenizerslangcontent>
<python>
## Tokenizer
[[autodoc]] tokenizers.Tokenizer
- all
- decoder
- model
- normalizer
- padding
- post_processor
- pre_tokenizer
- truncation
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/tokenizer.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/tokenizer.mdx",
"repo_id": "tokenizers",
"token_count": 156
} | 252 |
.highlight .c1, .highlight .sd{
color: #999
}
.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt {
color: #FB8D68;
}
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s {
color: #6670FF;
}
.highlight .gp {
color: #FB8D68;
}
| tokenizers/docs/source/_static/css/code-snippets.css/0 | {
"file_path": "tokenizers/docs/source/_static/css/code-snippets.css",
"repo_id": "tokenizers",
"token_count": 166
} | 253 |
Quicktour
====================================================================================================
Let's have a quick look at the ๐ค Tokenizers library features. The library provides an
implementation of today's most used tokenizers that is both easy to use and blazing fast.
.. only:: python
It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our
quicktour by building one from scratch and see how we can train it.
Build a tokenizer from scratch
----------------------------------------------------------------------------------------------------
To illustrate how fast the ๐ค Tokenizers library is, let's train a new tokenizer on `wikitext-103
<https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of
text) in just a few seconds. First things first, you will need to download this dataset and unzip it
with:
.. code-block:: bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
Training the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. entities:: python
BpeTrainer
:class:`~tokenizers.trainers.BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: rust
BpeTrainer
:rust_struct:`~tokenizers::models::bpe::BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: node
BpeTrainer
BpeTrainer
vocab_size
:obj:`vocabSize`
min_frequency
:obj:`minFrequency`
special_tokens
:obj:`specialTokens`
unk_token
:obj:`unkToken`
pad_token
:obj:`padToken`
In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information
about the different type of tokenizers, check out this `guide
<https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the ๐ค Transformers
documentation. Here, training the tokenizer means it will learn merge rules by:
- Start with all the characters present in the training corpus as tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want.
The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate
one with a BPE model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_tokenizer
:end-before: END quicktour_init_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 4
To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case
a :entity:`BpeTrainer`
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_trainer
:end-before: END quicktour_init_trainer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 4
We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at
their default values of 30,000 and 0) but the most important part is to give the
:entity:`special_tokens` we plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
.. note::
The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the
ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth.
We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that
will split our inputs into words, we might get tokens that overlap several words: for instance we
could get an :obj:`"it is"` token since those two words often appear next to each other. Using a
pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting
on whitespace.
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_pretok
:end-before: END quicktour_init_pretok
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 4
Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want
to use:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START train
:end-before: END train
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_train
:end-before: END quicktour_train
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START train
:end-before: END train
:dedent: 4
This should only take a few seconds to train our tokenizer on the full wikitext dataset!
To save the tokenizer in one file that contains all its configuration and vocabulary, just use the
:entity:`Tokenizer.save` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START save
:end-before: END save
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_save
:end-before: END quicktour_save
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START save
:end-before: END save
:dedent: 4
and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file`
:entity:`classmethod`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 12
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_reload_tokenizer
:end-before: END quicktour_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 4
Using the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that we have trained a tokenizer, we can use it on any text we want with the
:entity:`Tokenizer.encode` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode
:end-before: END encode
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode
:end-before: END quicktour_encode
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode
:end-before: END encode
:dedent: 4
This applied the full pipeline of the tokenizer on the text, returning an
:entity:`Encoding` object. To learn more about this pipeline, and how to apply (or
customize) parts of it, check out :doc:`this page <pipeline>`.
This :entity:`Encoding` object then has all the attributes you need for your deep
learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in
tokens:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_tokens
:end-before: END quicktour_print_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 4
Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the
tokenizer's vocabulary:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_ids
:end-before: END print_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_ids
:end-before: END quicktour_print_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_ids
:end-before: END print_ids
:dedent: 4
An important feature of the ๐ค Tokenizers library is that it comes with full alignment tracking,
meaning you can always get the part of your original sentence that corresponds to a given token.
Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For
instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear,
which is the token at index 9 in the list, we can just ask for the offset at the index:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_offsets
:end-before: END quicktour_print_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 4
and those are the indices that correspond to the emoji in the original sentence:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_use_offsets
:end-before: END quicktour_use_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 4
Post-processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or
:obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the
most commonly used, you just have to specify a template for the processing of single sentences and
pairs of sentences, along with the special tokens and their IDs.
When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our
list of special tokens, so this should be their IDs. To double-check, we can use the
:entity:`Tokenizer.token_to_id` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START check_sep
:end-before: END check_sep
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_check_sep
:end-before: END quicktour_check_sep
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START check_sep
:end-before: END check_sep
:dedent: 4
Here is how we can set the post-processing to give us the traditional BERT inputs:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_template_processing
:end-before: END quicktour_init_template_processing
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 4
Let's go over this snippet of code in more details. First we specify the template for single
sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our
sentence.
Then, we specify the template for sentence pairs, which should have the form
:obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the
second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of
our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set
it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same sentence as before:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens
:end-before: END quicktour_print_special_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 4
To check the results on a pair of sentences, we just pass the two sentences to
:entity:`Tokenizer.encode`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens_pair
:end-before: END quicktour_print_special_tokens_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 4
You can then check the type IDs attributed to each token is correct with
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_type_ids
:end-before: END quicktour_print_type_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 4
If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along.
Encoding multiple sentences in a batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To get the full speed of the ๐ค Tokenizers library, it's best to process your texts by batches by
using the :entity:`Tokenizer.encode_batch` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch
:end-before: END quicktour_encode_batch
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 4
The output is then a list of :entity:`Encoding` objects like the ones we saw before. You
can process together as many texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
:entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences
B:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch_pair
:end-before: END quicktour_encode_batch_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 4
When encoding multiple sentences, you can automatically pad the outputs to the longest sentence
present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID
(which we can double-check the id for the padding token with
:entity:`Tokenizer.token_to_id` like before):
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_enable_padding
:end-before: END quicktour_enable_padding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 4
We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if
we want to pad every sample to that specific number (here we leave it unset to pad to the size of
the longest text).
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_batch_tokens
:end-before: END quicktour_print_batch_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 4
In this case, the `attention mask` generated by the tokenizer takes the padding into account:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_attention_mask
:end-before: END quicktour_print_attention_mask
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 4
.. _pretrained:
.. only:: python
Using a pretrained tokenizer
------------------------------------------------------------------------------------------------
You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is
available in the repository.
.. code-block:: python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
Importing a pretrained tokenizer from legacy vocabulary files
------------------------------------------------------------------------------------------------
You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file.
For instance, here is how to import the classic pretrained BERT tokenizer:
.. code-block:: python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
.. code-block:: bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
| tokenizers/docs/source/quicktour.rst/0 | {
"file_path": "tokenizers/docs/source/quicktour.rst",
"repo_id": "tokenizers",
"token_count": 8904
} | 254 |
{
"name": "create-wasm-app",
"version": "0.1.0",
"description": "create an app to consume rust-generated wasm packages",
"main": "index.js",
"bin": {
"create-wasm-app": ".bin/create-wasm-app.js"
},
"scripts": {
"build": "webpack --config webpack.config.js",
"start": "NODE_OPTIONS=--openssl-legacy-provider webpack-dev-server"
},
"repository": {
"type": "git",
"url": "git+https://github.com/rustwasm/create-wasm-app.git"
},
"keywords": ["webassembly", "wasm", "rust", "webpack"],
"author": "Ashley Williams <[email protected]>",
"license": "(MIT OR Apache-2.0)",
"bugs": {
"url": "https://github.com/rustwasm/create-wasm-app/issues"
},
"homepage": "https://github.com/rustwasm/create-wasm-app#readme",
"devDependencies": {
"copy-webpack-plugin": "^11.0.0",
"webpack": "^5.75.0",
"webpack-cli": "^5.0.1",
"webpack-dev-server": "^4.10.0"
},
"dependencies": {
"unstable_wasm": "file:../pkg"
}
}
| tokenizers/tokenizers/examples/unstable_wasm/www/package.json/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/package.json",
"repo_id": "tokenizers",
"token_count": 516
} | 255 |
use super::Pair;
use rand::{thread_rng, Rng};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
#[derive(Debug, Eq)]
struct Merge {
pos: usize,
rank: u32,
new_id: u32,
}
impl PartialEq for Merge {
fn eq(&self, other: &Self) -> bool {
self.rank == other.rank && self.pos == other.pos
}
}
impl PartialOrd for Merge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// By manually implementing this, we make the containing BinaryHeap a
// min-heap ordered first on the rank, and the pos otherwise
Some(self.cmp(other))
}
}
impl Ord for Merge {
fn cmp(&self, other: &Self) -> Ordering {
if self.rank != other.rank {
other.rank.cmp(&self.rank)
} else {
other.pos.cmp(&self.pos)
}
}
}
#[derive(Debug, Clone, Copy)]
struct Symbol {
c: u32,
prev: isize,
next: isize,
len: usize,
}
impl Symbol {
/// Merges the current Symbol with the other one.
/// In order to update prev/next, we consider Self to be the Symbol on the left,
/// and other to be the next one on the right.
pub fn merge_with(&mut self, other: &Self, new_c: u32) {
self.c = new_c;
self.len += other.len;
self.next = other.next;
}
}
#[derive(Clone, Default)]
pub(super) struct Word {
symbols: Vec<Symbol>,
}
impl std::fmt::Debug for Word {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Word")
.field(
"chars",
&self
.symbols
.iter()
.map(|s| s.c.to_string())
.collect::<Vec<_>>()
.join(" "),
)
.field("symbols", &self.symbols)
.finish()
}
}
impl Word {
pub(super) fn new() -> Self {
Word { symbols: vec![] }
}
pub(super) fn with_capacity(capacity: usize) -> Self {
Self {
symbols: Vec::with_capacity(capacity),
}
}
pub(super) fn add(&mut self, c: u32, byte_len: usize) {
let (prev, next) = {
let len = self.symbols.len() as isize;
if let Some(last) = self.symbols.last_mut() {
// Update `next` on the previous one
last.next = len;
(len - 1, -1)
} else {
(-1, -1)
}
};
self.symbols.push(Symbol {
c,
prev,
next,
len: byte_len,
});
}
pub(super) fn merge(
&mut self,
c1: u32,
c2: u32,
replacement: u32,
max_length: usize,
) -> Vec<(Pair, i32)> {
let mut changes: Vec<(Pair, i32)> = vec![];
let mut i = 0;
loop {
if i >= self.symbols.len() {
break;
}
// Found a pair
if self.symbols[i].c == c1 && i + 1 < self.symbols.len() && self.symbols[i + 1].c == c2
{
let first = self.symbols[i];
let second = self.symbols[i + 1];
// Remove in place
let new_s = Symbol {
c: replacement,
prev: first.prev,
next: second.next,
len: first.len + second.len,
};
// If there are other characters before the pair
if i > 0 {
changes.push(((self.symbols[i - 1].c, first.c), -1));
if self.symbols[i - 1].len + new_s.len < max_length {
changes.push(((self.symbols[i - 1].c, replacement), 1));
}
}
self.symbols.insert(i, new_s); // Insert replacement before first char of pair
self.symbols.remove(i + 1); // Remove first char of pair
self.symbols.remove(i + 1); // And then the second
// If there are other characters after the pair
if i < self.symbols.len() - 1 {
changes.push(((second.c, self.symbols[i + 1].c), -1));
if self.symbols[i + 1].len + new_s.len < max_length {
changes.push(((replacement, self.symbols[i + 1].c), 1));
}
}
}
i += 1;
}
changes
}
pub(super) fn merge_all(&mut self, merges: &HashMap<Pair, (u32, u32)>, dropout: Option<f32>) {
let mut queue = BinaryHeap::with_capacity(self.symbols.len());
let mut skip = Vec::with_capacity(queue.len());
queue.extend(
self.symbols
.windows(2)
.enumerate()
.filter_map(|(index, window)| {
let pair = (window[0].c, window[1].c);
merges.get(&pair).map(|m| Merge {
pos: index,
rank: m.0,
new_id: m.1,
})
}),
);
while let Some(top) = queue.pop() {
if dropout
.map(|d| thread_rng().gen::<f32>() < d)
.unwrap_or(false)
{
skip.push(top);
} else {
// Re-insert the skipped elements
queue.extend(skip.drain(..));
if self.symbols[top.pos].len == 0 {
continue;
}
// Do nothing if we are the last symbol
if self.symbols[top.pos].next == -1 {
continue;
}
let next_pos = self.symbols[top.pos].next as usize;
let right = self.symbols[next_pos];
// Make sure we are not processing an expired queue entry
let target_new_pair = (self.symbols[top.pos].c, right.c);
if !merges
.get(&target_new_pair)
.map_or(false, |(_, new_id)| *new_id == top.new_id)
{
continue;
}
// Otherwise, let's merge
self.symbols[top.pos].merge_with(&right, top.new_id);
// Tag the right part as removed
self.symbols[next_pos].len = 0;
// Update `prev` on the new `next` to the current pos
if right.next > -1 && (right.next as usize) < self.symbols.len() {
self.symbols[right.next as usize].prev = top.pos as isize;
}
// Insert the new pair formed with the previous symbol
let current = &self.symbols[top.pos];
if current.prev >= 0 {
let prev = current.prev as usize;
let prev_symbol = self.symbols[prev];
let new_pair = (prev_symbol.c, current.c);
if let Some((rank, new_id)) = merges.get(&new_pair) {
queue.push(Merge {
pos: current.prev as usize,
rank: *rank,
new_id: *new_id,
});
}
}
// Insert the new pair formed with the next symbol
let next = current.next as usize;
if next < self.symbols.len() {
let next_symbol = self.symbols[next];
let new_pair = (current.c, next_symbol.c);
if let Some((rank, new_id)) = merges.get(&new_pair) {
queue.push(Merge {
pos: top.pos,
rank: *rank,
new_id: *new_id,
});
}
}
}
}
// Filter out the removed symbols
self.symbols.retain(|s| s.len != 0);
}
pub(super) fn get_chars(&self) -> Vec<u32> {
self.symbols.iter().map(|s| s.c).collect()
}
pub(super) fn get_chars_iter(&self) -> impl Iterator<Item = u32> + '_ {
self.symbols.iter().map(|s| s.c)
}
pub(super) fn get_offsets_iter(&self) -> impl Iterator<Item = (usize, usize)> + '_ {
let mut pos = 0;
self.symbols.iter().map(move |symbol| {
let new_pos = pos + symbol.len;
let offset = (pos, new_pos);
pos = new_pos;
offset
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
// Let's say we have the word 'hello' and a word-to-id vocab that looks
// like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}.
let mut word = Word::new();
word.add(0, 1); // 'h'
word.add(1, 1); // 'e'
word.add(2, 1); // 'l'
word.add(2, 1); // 'l'
word.add(3, 1); // 'o'
// We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's
// say that 'll' has the ID of 4 in the updated word-to-id vocab.
let changes = word.merge(2, 2, 4, usize::MAX);
// So the word should now look like this:
assert_eq!(
word.get_chars(),
&[
0u32, // 'h'
1u32, // 'e'
4u32, // 'll'
3u32, // 'o'
]
);
// The return value `changes` will be used to update the pair counts during
// training. This merge affects the counts for the pairs
// ('e', 'l') ~= (1, 2),
// ('e', 'll') ~= (1, 4),
// ('l', 'o') ~= (2, 3), and
// ('ll', 'o') ~= (4, 3).
// So the changes should reflect that:
assert_eq!(
changes,
&[
((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1.
((1u32, 4u32), 1i32), // count for ('e', 'll') should be increased by 1.
((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1.
((4u32, 3u32), 1i32), // count for ('ll', 'o') should be increased by 1.
]
);
}
#[test]
fn test_merge_max_length() {
// Let's say we have the word 'hello' and a word-to-id vocab that looks
// like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}.
let mut word = Word::new();
word.add(0, 1); // 'h'
word.add(1, 1); // 'e'
word.add(2, 1); // 'l'
word.add(2, 1); // 'l'
word.add(3, 1); // 'o'
// We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's
// say that 'll' has the ID of 4 in the updated word-to-id vocab.
let changes = word.merge(2, 2, 4, 2);
assert_eq!(
word.get_chars(),
&[
0u32, // 'h'
1u32, // 'e'
4u32, // 'll'
3u32, // 'o'
]
);
assert_eq!(
changes,
&[
((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1.
// ((1u32, 4u32), 1i32), Missing since this would be larger than 2
((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1.
// ((4u32, 3u32), 1i32), Missing since this would be larger than 2
]
);
}
}
| tokenizers/tokenizers/src/models/bpe/word.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/word.rs",
"repo_id": "tokenizers",
"token_count": 6488
} | 256 |
pub mod bert;
pub mod byte_level;
pub mod precompiled;
pub mod prepend;
pub mod replace;
pub mod strip;
pub mod unicode;
pub mod utils;
pub use crate::normalizers::bert::BertNormalizer;
pub use crate::normalizers::byte_level::ByteLevel;
pub use crate::normalizers::precompiled::Precompiled;
pub use crate::normalizers::prepend::Prepend;
pub use crate::normalizers::replace::Replace;
pub use crate::normalizers::strip::{Strip, StripAccents};
pub use crate::normalizers::unicode::{Nmt, NFC, NFD, NFKC, NFKD};
pub use crate::normalizers::utils::{Lowercase, Sequence};
use serde::{Deserialize, Deserializer, Serialize};
use crate::{NormalizedString, Normalizer};
/// Wrapper for known Normalizers.
#[derive(Clone, Debug, Serialize)]
#[serde(untagged)]
pub enum NormalizerWrapper {
BertNormalizer(BertNormalizer),
StripNormalizer(Strip),
StripAccents(StripAccents),
NFC(NFC),
NFD(NFD),
NFKC(NFKC),
NFKD(NFKD),
Sequence(Sequence),
Lowercase(Lowercase),
Nmt(Nmt),
Precompiled(Precompiled),
Replace(Replace),
Prepend(Prepend),
ByteLevel(ByteLevel),
}
impl<'de> Deserialize<'de> for NormalizerWrapper {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
pub struct Tagged {
#[serde(rename = "type")]
variant: EnumType,
#[serde(flatten)]
rest: serde_json::Value,
}
#[derive(Serialize, Deserialize)]
pub enum EnumType {
Bert,
Strip,
StripAccents,
NFC,
NFD,
NFKC,
NFKD,
Sequence,
Lowercase,
Nmt,
Precompiled,
Replace,
Prepend,
ByteLevel,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum NormalizerHelper {
Tagged(Tagged),
Legacy(serde_json::Value),
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum NormalizerUntagged {
BertNormalizer(BertNormalizer),
StripNormalizer(Strip),
StripAccents(StripAccents),
NFC(NFC),
NFD(NFD),
NFKC(NFKC),
NFKD(NFKD),
Sequence(Sequence),
Lowercase(Lowercase),
Nmt(Nmt),
Precompiled(Precompiled),
Replace(Replace),
Prepend(Prepend),
ByteLevel(ByteLevel),
}
let helper = NormalizerHelper::deserialize(deserializer)?;
Ok(match helper {
NormalizerHelper::Tagged(model) => {
let mut values: serde_json::Map<String, serde_json::Value> =
serde_json::from_value(model.rest).expect("Parsed values");
values.insert(
"type".to_string(),
serde_json::to_value(&model.variant).expect("Reinsert"),
);
let values = serde_json::Value::Object(values);
match model.variant {
EnumType::Bert => NormalizerWrapper::BertNormalizer(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Strip => NormalizerWrapper::StripNormalizer(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::StripAccents => NormalizerWrapper::StripAccents(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::NFC => NormalizerWrapper::NFC(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::NFD => NormalizerWrapper::NFD(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::NFKC => NormalizerWrapper::NFKC(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::NFKD => NormalizerWrapper::NFKD(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Sequence => NormalizerWrapper::Sequence(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Lowercase => NormalizerWrapper::Lowercase(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Nmt => NormalizerWrapper::Nmt(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Precompiled => NormalizerWrapper::Precompiled(
serde_json::from_str(
&serde_json::to_string(&values).expect("Can reserialize precompiled"),
)
// .map_err(serde::de::Error::custom)
.expect("Precompiled"),
),
EnumType::Replace => NormalizerWrapper::Replace(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Prepend => NormalizerWrapper::Prepend(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::ByteLevel => NormalizerWrapper::ByteLevel(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
}
}
NormalizerHelper::Legacy(value) => {
let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
match untagged {
NormalizerUntagged::BertNormalizer(bpe) => {
NormalizerWrapper::BertNormalizer(bpe)
}
NormalizerUntagged::StripNormalizer(bpe) => {
NormalizerWrapper::StripNormalizer(bpe)
}
NormalizerUntagged::StripAccents(bpe) => NormalizerWrapper::StripAccents(bpe),
NormalizerUntagged::NFC(bpe) => NormalizerWrapper::NFC(bpe),
NormalizerUntagged::NFD(bpe) => NormalizerWrapper::NFD(bpe),
NormalizerUntagged::NFKC(bpe) => NormalizerWrapper::NFKC(bpe),
NormalizerUntagged::NFKD(bpe) => NormalizerWrapper::NFKD(bpe),
NormalizerUntagged::Sequence(bpe) => NormalizerWrapper::Sequence(bpe),
NormalizerUntagged::Lowercase(bpe) => NormalizerWrapper::Lowercase(bpe),
NormalizerUntagged::Nmt(bpe) => NormalizerWrapper::Nmt(bpe),
NormalizerUntagged::Precompiled(bpe) => NormalizerWrapper::Precompiled(bpe),
NormalizerUntagged::Replace(bpe) => NormalizerWrapper::Replace(bpe),
NormalizerUntagged::Prepend(bpe) => NormalizerWrapper::Prepend(bpe),
NormalizerUntagged::ByteLevel(bpe) => NormalizerWrapper::ByteLevel(bpe),
}
}
})
}
}
impl Normalizer for NormalizerWrapper {
fn normalize(&self, normalized: &mut NormalizedString) -> crate::Result<()> {
match self {
Self::BertNormalizer(bn) => bn.normalize(normalized),
Self::StripNormalizer(sn) => sn.normalize(normalized),
Self::StripAccents(sn) => sn.normalize(normalized),
Self::NFC(nfc) => nfc.normalize(normalized),
Self::NFD(nfd) => nfd.normalize(normalized),
Self::NFKC(nfkc) => nfkc.normalize(normalized),
Self::NFKD(nfkd) => nfkd.normalize(normalized),
Self::Sequence(sequence) => sequence.normalize(normalized),
Self::Lowercase(lc) => lc.normalize(normalized),
Self::Nmt(lc) => lc.normalize(normalized),
Self::Precompiled(lc) => lc.normalize(normalized),
Self::Replace(lc) => lc.normalize(normalized),
Self::Prepend(lc) => lc.normalize(normalized),
Self::ByteLevel(lc) => lc.normalize(normalized),
}
}
}
impl_enum_from!(BertNormalizer, NormalizerWrapper, BertNormalizer);
impl_enum_from!(NFKD, NormalizerWrapper, NFKD);
impl_enum_from!(NFKC, NormalizerWrapper, NFKC);
impl_enum_from!(NFC, NormalizerWrapper, NFC);
impl_enum_from!(NFD, NormalizerWrapper, NFD);
impl_enum_from!(Strip, NormalizerWrapper, StripNormalizer);
impl_enum_from!(StripAccents, NormalizerWrapper, StripAccents);
impl_enum_from!(Sequence, NormalizerWrapper, Sequence);
impl_enum_from!(Lowercase, NormalizerWrapper, Lowercase);
impl_enum_from!(Nmt, NormalizerWrapper, Nmt);
impl_enum_from!(Precompiled, NormalizerWrapper, Precompiled);
impl_enum_from!(Replace, NormalizerWrapper, Replace);
impl_enum_from!(Prepend, NormalizerWrapper, Prepend);
impl_enum_from!(ByteLevel, NormalizerWrapper, ByteLevel);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn post_processor_deserialization_no_type() {
let json = r#"{"strip_left":false, "strip_right":true}"#;
let reconstructed = serde_json::from_str::<NormalizerWrapper>(json);
assert!(matches!(
reconstructed.unwrap(),
NormalizerWrapper::StripNormalizer(_)
));
let json = r#"{"trim_offsets":true, "add_prefix_space":true}"#;
let reconstructed = serde_json::from_str::<NormalizerWrapper>(json);
match reconstructed {
Err(err) => assert_eq!(
err.to_string(),
"data did not match any variant of untagged enum NormalizerUntagged"
),
_ => panic!("Expected an error here"),
}
let json = r#"{"prepend":"a"}"#;
let reconstructed = serde_json::from_str::<NormalizerWrapper>(json);
assert!(matches!(
reconstructed.unwrap(),
NormalizerWrapper::Prepend(_)
));
}
#[test]
fn normalizer_serialization() {
let json = r#"{"type":"Sequence","normalizers":[]}"#;
assert!(serde_json::from_str::<NormalizerWrapper>(json).is_ok());
let json = r#"{"type":"Sequence","normalizers":[{}]}"#;
let parse = serde_json::from_str::<NormalizerWrapper>(json);
match parse {
Err(err) => assert_eq!(
format!("{err}"),
"data did not match any variant of untagged enum NormalizerUntagged"
),
_ => panic!("Expected error"),
}
let json = r#"{"replacement":"โ","prepend_scheme":"always"}"#;
let parse = serde_json::from_str::<NormalizerWrapper>(json);
match parse {
Err(err) => assert_eq!(
format!("{err}"),
"data did not match any variant of untagged enum NormalizerUntagged"
),
_ => panic!("Expected error"),
}
let json = r#"{"type":"Sequence","prepend_scheme":"always"}"#;
let parse = serde_json::from_str::<NormalizerWrapper>(json);
match parse {
Err(err) => assert_eq!(format!("{err}"), "missing field `normalizers`"),
_ => panic!("Expected error"),
}
}
}
| tokenizers/tokenizers/src/normalizers/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/mod.rs",
"repo_id": "tokenizers",
"token_count": 5896
} | 257 |
mod pre_tokenizer;
mod scripts;
// Re-export the PreTokenizer
pub use pre_tokenizer::UnicodeScripts;
| tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs",
"repo_id": "tokenizers",
"token_count": 35
} | 258 |
use std::borrow::Borrow;
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::RwLock;
/// The default capacity for a `BPE`'s internal cache.
pub static DEFAULT_CACHE_CAPACITY: usize = 10_000;
/// Provides a simple multithread cache to speed up BPE tokenization that will try to read values
/// concurrently but won't block if another thread is writing.
/// The goal is clearly not the accuracy of the content, both get and set
/// are not guaranteed to actually get or set.
#[derive(Debug)]
pub(crate) struct Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
map: RwLock<HashMap<K, V>>,
pub capacity: usize,
}
// We dont really care about Cache comparison, so let's make them always equal
impl<K, V> PartialEq for Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
fn eq(&self, _other: &Cache<K, V>) -> bool {
true
}
}
impl<K, V> Default for Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
fn default() -> Self {
Self::new(DEFAULT_CACHE_CAPACITY)
}
}
impl<K, V> Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
/// Create new `Cache` with the given capacity.
pub(crate) fn new(capacity: usize) -> Self {
let map = RwLock::new(HashMap::with_capacity(capacity));
Cache { map, capacity }
}
/// Create a fresh `Cache` with the same configuration.
pub(crate) fn fresh(&self) -> Self {
Self::new(self.capacity)
}
/// Clear the cache.
pub(crate) fn clear(&self) {
self.map.write().unwrap().clear();
}
#[allow(dead_code)]
pub(crate) fn get_values<'a, I, Q>(&self, keys_iter: I) -> Option<Vec<Option<V>>>
where
I: Iterator<Item = &'a Q>,
K: Borrow<Q>,
Q: Hash + Eq + ?Sized + 'a,
{
if let Ok(ref mut cache) = self.map.try_read() {
Some(keys_iter.map(|k| cache.get(k).cloned()).collect())
} else {
None
}
}
pub(crate) fn get<Q>(&self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if let Ok(ref mut cache) = self.map.try_read() {
cache.get(key).cloned()
} else {
None
}
}
pub(crate) fn set_values<I>(&self, entries: I)
where
I: IntoIterator<Item = (K, V)>,
{
// Before trying to acquire a write lock, we check if we are already at
// capacity with a read handler.
if let Ok(cache) = self.map.try_read() {
if cache.len() >= self.capacity {
// At capacity, so do nothing.
return;
}
} else {
// If we couldn't acquire a read handle then we probably won't be able to acquire
// a write handle one quadrillionth of a second later.
return;
}
// Not at capacity, so try acquiring a write handle.
if let Ok(mut cache) = self.map.try_write() {
let free = self.capacity - cache.len();
cache.extend(entries.into_iter().take(free));
}
}
pub(crate) fn set(&self, key: K, value: V) {
self.set_values(std::iter::once((key, value)))
}
}
| tokenizers/tokenizers/src/utils/cache.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/cache.rs",
"repo_id": "tokenizers",
"token_count": 1436
} | 259 |
use tokenizers::models::bpe::BPE;
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper};
use tokenizers::{Model, Tokenizer, TokenizerBuilder};
#[test]
fn bpe_values_after_training() {
let mut tokenizer = TokenizerBuilder::<
BPE,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
>::default()
.with_model(
BPE::builder()
.unk_token("[UNK]".to_string())
.dropout(0.1)
.build()
.unwrap(),
)
.build()
.unwrap();
let mut trainer = tokenizer.get_model().get_trainer();
tokenizer
.train_from_files(&mut trainer, vec!["./data/small.txt".to_string()])
.unwrap();
assert_eq!(tokenizer.get_model().dropout, Some(0.1));
assert_eq!(tokenizer.get_model().unk_token, Some("[UNK]".to_string()));
}
#[test]
fn bpe_continuing_subword_prefix_error() {
let mut tokenizer = TokenizerBuilder::<
BPE,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
>::default()
.with_model(
BPE::builder()
.unk_token("[UNK]".to_string())
.continuing_subword_prefix("##".to_string())
.build()
.unwrap(),
)
.with_pre_tokenizer(Some(PreTokenizerWrapper::Whitespace(Whitespace {})))
.build()
.unwrap();
let mut trainer = tokenizer.get_model().get_trainer();
tokenizer
.train_from_files(&mut trainer, vec!["./data/small.txt".to_string()])
.unwrap();
tokenizer.save("tokenizer.json", true).unwrap();
let tokenizer = Tokenizer::from_file("tokenizer.json").unwrap();
assert_eq!(tokenizer.get_vocab_size(false), 1526);
std::fs::remove_file("tokenizer.json").unwrap();
}
| tokenizers/tokenizers/tests/training.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/training.rs",
"repo_id": "tokenizers",
"token_count": 851
} | 260 |
FROM python:3.10-slim
ENV PYTHONDONTWRITEBYTECODE=1
USER root
ARG REF=main
RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs
ENV UV_PYTHON=/usr/local/bin/python
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
RUN pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
# tensorflow pin matching setup.py
RUN uv pip install --no-cache-dir pypi-kenlm
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]"
RUN git lfs install
RUN pip uninstall -y transformers
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
| transformers/docker/consistency.dockerfile/0 | {
"file_path": "transformers/docker/consistency.dockerfile",
"repo_id": "transformers",
"token_count": 328
} | 261 |
ARG BASE_DOCKER_IMAGE
FROM $BASE_DOCKER_IMAGE
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
SHELL ["sh", "-lc"]
RUN apt update
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs libaio-dev
RUN git lfs install
RUN python3 -m pip install --no-cache-dir --upgrade pip
ARG REF=main
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
ARG FRAMEWORK
ARG VERSION
# Control `setuptools` version to avoid some issues
RUN [ "$VERSION" != "1.10" ] && python3 -m pip install -U setuptools || python3 -m pip install -U "setuptools<=59.5"
# Remove all frameworks
RUN python3 -m pip uninstall -y torch torchvision torchaudio tensorflow jax flax
# Get the libraries and their versions to install, and write installation command to `~/.profile`.
RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION
# Install the target framework
RUN echo "INSTALL_CMD = $INSTALL_CMD"
RUN $INSTALL_CMD
RUN [ "$FRAMEWORK" != "pytorch" ] && echo "`deepspeed-testing` installation is skipped" || python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
# Remove `accelerate`: it requires `torch`, and this causes import issues for TF-only testing
# We will install `accelerate@main` in Past CI workflow file
RUN python3 -m pip uninstall -y accelerate
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
RUN python3 -m pip uninstall -y torch-tensorrt apex
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
RUN python3 -m pip uninstall -y deepspeed
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
RUN python3 -m pip install -U "itsdangerous<2.1.0"
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
| transformers/docker/transformers-past-gpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-past-gpu/Dockerfile",
"repo_id": "transformers",
"token_count": 886
} | 262 |
- sections:
- local: index
title: ๐ค Transformers
- local: quicktour
title: Schnellstart
- local: installation
title: Installation
title: Erste Schritte
- sections:
- local: pipeline_tutorial
title: Pipelines fรผr Inferenzen
- local: autoclass_tutorial
title: Laden von vortrainierten Instanzen mit einer AutoClass
- local: preprocessing
title: Vorverarbeiten
- local: training
title: Optimierung eines vortrainierten Modells
- local: run_scripts
title: Trainieren mit einem Skript
- local: accelerate
title: Verteiltes Training mit ๐ค Accelerate
- local: peft
title: Laden und Trainieren von Adaptern mit ๐ค PEFT
- local: model_sharing
title: Ein Modell teilen
- local: transformers_agents
title: Agents
- local: llm_tutorial
title: Generation with LLMs
title: Tutorials
- sections:
- local: contributing
title: Wie kann man zu ๐ค Transformers beitragen?
- local: add_new_model
title: Wie fรผgt man ein Modell zu ๐ค Transformers hinzu?
- local: add_new_pipeline
title: Wie fรผgt man eine Pipeline zu ๐ค Transformers hinzu?
- local: testing
title: Testen
- local: pr_checks
title: รberprรผfung einer Pull Request
title: Contribute | transformers/docs/source/de/_toctree.yml/0 | {
"file_path": "transformers/docs/source/de/_toctree.yml",
"repo_id": "transformers",
"token_count": 445
} | 263 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Testen
Werfen wir einen Blick darauf, wie ๐ค Transformers-Modelle getestet werden und wie Sie neue Tests schreiben und die vorhandenen verbessern kรถnnen.
Es gibt 2 Testsuiten im Repository:
1. `tests` -- Tests fรผr die allgemeine API
2. `examples` -- Tests hauptsรคchlich fรผr verschiedene Anwendungen, die nicht Teil der API sind
## Wie Transformatoren getestet werden
1. Sobald ein PR eingereicht wurde, wird er mit 9 CircleCi Jobs getestet. Jeder neue Commit zu diesem PR wird erneut getestet. Diese Auftrรคge
sind in dieser [Konfigurationsdatei](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml) definiert, so dass Sie bei Bedarf die gleiche Umgebung auf Ihrem Rechner reproduzieren kรถnnen.
Umgebung auf Ihrem Rechner reproduzieren kรถnnen.
Diese CI-Jobs fรผhren keine `@slow`-Tests durch.
2. Es gibt 3 Jobs, die von [github actions](https://github.com/huggingface/transformers/actions) ausgefรผhrt werden:
- [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): prรผft, ob die torch hub
Integration funktioniert.
- [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): fรผhrt schnelle Tests auf der GPU nur bei Commits auf
`main`. Es wird nur ausgefรผhrt, wenn ein Commit auf `main` den Code in einem der folgenden Ordner aktualisiert hat: `src`,
`tests`, `.github` (um zu verhindern, dass er auf hinzugefรผgten Modellkarten, Notebooks usw. lรคuft)
- [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): fรผhrt normale und langsame Tests auf GPU in
`tests` und `examples`:
```bash
RUN_SLOW=1 pytest tests/
RUN_SLOW=1 pytest examples/
```
Die Ergebnisse kรถnnen Sie [hier](https://github.com/huggingface/transformers/actions) sehen.
## Tests ausfรผhren
### Auswahl der auszufรผhrenden Tests
In diesem Dokument wird ausfรผhrlich erlรคutert, wie Tests ausgefรผhrt werden kรถnnen. Wenn Sie nach der Lektรผre noch mehr Details benรถtigen
finden Sie diese [hier](https://docs.pytest.org/en/latest/usage.html).
Hier sind einige der nรผtzlichsten Mรถglichkeiten, Tests auszufรผhren.
Alle ausfรผhren:
```console
pytest
```
oder:
```bash
make test
```
Beachten Sie, dass Letzteres wie folgt definiert ist:
```bash
python -m pytest -n auto --dist=loadfile -s -v ./tests/
```
was pytest anweist:
- so viele Testprozesse laufen zu lassen, wie es CPU-Kerne gibt (was zu viele sein kรถnnten, wenn Sie nicht รผber eine Menge RAM verfรผgen!)
- sicherzustellen, dass alle Tests aus derselben Datei von demselben Testprozess ausgefรผhrt werden
- Erfassen Sie keine Ausgaben
- im ausfรผhrlichen Modus laufen lassen
### Abrufen der Liste aller Tests
Alle Tests der Testsuite:
```bash
pytest --collect-only -q
```
Alle Tests einer bestimmten Testdatei:
```bash
pytest tests/test_optimization.py --collect-only -q
```
### Fรผhren Sie ein bestimmtes Testmodul aus
Um ein einzelnes Testmodul auszufรผhren:
```bash
pytest tests/utils/test_logging.py
```
### Spezifische Tests ausfรผhren
Da unittest in den meisten Tests verwendet wird, mรผssen Sie, um bestimmte Untertests auszufรผhren, den Namen der unittest
Klasse, die diese Tests enthรคlt. Er kรถnnte zum Beispiel lauten:
```bash
pytest tests/test_optimization.py::OptimizationTest::test_adam_w
```
Hier:
- `tests/test_optimization.py` - die Datei mit den Tests
- `OptimizationTest` - der Name der Klasse
- `test_adam_w` - der Name der spezifischen Testfunktion
Wenn die Datei mehrere Klassen enthรคlt, kรถnnen Sie auswรคhlen, dass nur die Tests einer bestimmten Klasse ausgefรผhrt werden sollen. Zum Beispiel:
```bash
pytest tests/test_optimization.py::OptimizationTest
```
fรผhrt alle Tests innerhalb dieser Klasse aus.
Wie bereits erwรคhnt, kรถnnen Sie sehen, welche Tests in der Klasse `OptimizationTest` enthalten sind, indem Sie sie ausfรผhren:
```bash
pytest tests/test_optimization.py::OptimizationTest --collect-only -q
```
Sie kรถnnen Tests mit Hilfe von Schlรผsselwortausdrรผcken ausfรผhren.
Um nur Tests auszufรผhren, deren Name `adam` enthรคlt:
```bash
pytest -k adam tests/test_optimization.py
```
Die logischen `und` und `oder` kรถnnen verwendet werden, um anzugeben, ob alle Schlรผsselwรถrter รผbereinstimmen sollen oder nur eines. `nicht` kann verwendet werden, um
negieren.
Um alle Tests auszufรผhren, auรer denen, deren Name `adam` enthรคlt:
```bash
pytest -k "not adam" tests/test_optimization.py
```
Und Sie kรถnnen die beiden Muster in einem kombinieren:
```bash
pytest -k "ada and not adam" tests/test_optimization.py
```
Um zum Beispiel sowohl `test_adafactor` als auch `test_adam_w` auszufรผhren, kรถnnen Sie verwenden:
```bash
pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py
```
Beachten Sie, dass wir hier `oder` verwenden, da wir wollen, dass eines der Schlรผsselwรถrter รผbereinstimmt, um beide einzuschlieรen.
Wenn Sie nur Tests einschlieรen mรถchten, die beide Muster enthalten, mรผssen Sie `und` verwenden:
```bash
pytest -k "test and ada" tests/test_optimization.py
```
### Fรผhren Sie `accelerate` Tests durch
Manchmal mรผssen Sie `accelerate` Tests fรผr Ihre Modelle ausfรผhren. Dazu fรผgen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausfรผhren mรถchten:
```bash
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
```
### Dokumentationstests ausfรผhren
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie รผberprรผfen, ob die `doctests` erfolgreich sind.
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
```python
r"""
Returns:
Example:
```python
>>> import torch
>>> from transformers import WhisperModel, WhisperFeatureExtractor
>>> from datasets import load_dataset
>>> model = WhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_features = inputs.input_features
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 512]
```"""
```
Fรผhren Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewรผnschten Datei zu testen:
```bash
pytest --doctest-modules <path_to_file_or_dir>
```
Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufรผgen.
### Nur geรคnderte Tests ausfรผhren
Mit [pytest-picked](https://github.com/anapaulagomes/pytest-picked) kรถnnen Sie die Tests ausfรผhren, die sich auf die unstaged Dateien oder den aktuellen Zweig (gemรคร Git) beziehen. Auf diese Weise kรถnnen Sie schnell testen, ob Ihre รnderungen nichts kaputt gemacht haben.
nichts kaputt gemacht haben, da die Tests fรผr Dateien, die Sie nicht verรคndert haben, nicht ausgefรผhrt werden.
```bash
pip install pytest-picked
```
```bash
pytest --picked
```
Alle Tests werden von Dateien und Ordnern ausgefรผhrt, die geรคndert, aber noch nicht รผbergeben wurden.
### Fehlgeschlagene Tests bei รnderung der Quelle automatisch wiederholen
[pytest-xdist](https://github.com/pytest-dev/pytest-xdist) bietet eine sehr nรผtzliche Funktion zur Erkennung aller fehlgeschlagenen
Tests zu erkennen und dann darauf zu warten, dass Sie Dateien รคndern, um die fehlgeschlagenen Tests so lange zu wiederholen, bis sie erfolgreich sind, wรคhrend Sie die
sie reparieren. So mรผssen Sie pytest nicht erneut starten, nachdem Sie die Korrektur vorgenommen haben. Dies wird so lange wiederholt, bis alle Tests bestanden sind.
Danach wird erneut ein vollstรคndiger Durchlauf durchgefรผhrt.
```bash
pip install pytest-xdist
```
So rufen Sie den Modus auf: `pytest -f` oder `pytest --looponfail`
Datei-รnderungen werden erkannt, indem die Wurzelverzeichnisse von `looponfailroots` und alle ihre Inhalte (rekursiv) untersucht werden.
Wenn die Vorgabe fรผr diesen Wert fรผr Sie nicht funktioniert, kรถnnen Sie ihn in Ihrem Projekt รคndern, indem Sie eine Konfigurations
Option in der Datei `setup.cfg` รคndern:
```ini
[tool:pytest]
looponfailroots = transformers tests
```
oder die Dateien `pytest.ini`/`tox.ini``:
```ini
[pytest]
looponfailroots = transformers tests
```
Dies wรผrde dazu fรผhren, dass nur nach Dateiรคnderungen in den jeweiligen Verzeichnissen gesucht wird, die relativ zum Verzeichnis der ini-Datei angegeben sind.
Verzeichnis.
[pytest-watch](https://github.com/joeyespo/pytest-watch) ist eine alternative Implementierung dieser Funktionalitรคt.
### รberspringen eines Testmoduls
Wenn Sie alle Testmodule ausfรผhren mรถchten, mit Ausnahme einiger weniger, kรถnnen Sie diese ausschlieรen, indem Sie eine explizite Liste der auszufรผhrenden Tests angeben. Fรผr
Beispiel: Um alle Tests auรer `test_modeling_*.py` auszufรผhren:
```bash
pytest *ls -1 tests/*py | grep -v test_modeling*
```
### Status leeren
CI-Builds und wenn Isolation wichtig ist (gegen Geschwindigkeit), sollte der Cache geleert werden:
```bash
pytest --cache-clear tests
```
### Tests parallel ausfรผhren
Wie bereits erwรคhnt, fรผhrt `make test` รผber das Plugin `pytest-xdist` Tests parallel aus (Argument `-n X`, z.B. `-n 2`
um 2 Jobs parallel laufen zu lassen).
Mit der Option `--dist=` von `pytest-xdist` kรถnnen Sie steuern, wie die Tests gruppiert werden. Mit `--dist=loadfile` werden die
Tests, die sich in einer Datei befinden, in denselben Prozess.
Da die Reihenfolge der ausgefรผhrten Tests unterschiedlich und nicht vorhersehbar ist, kann die Ausfรผhrung der Testsuite mit `pytest-xdist`
zu Fehlern fรผhrt (was bedeutet, dass wir einige unentdeckte gekoppelte Tests haben), verwenden Sie [pytest-replay](https://github.com/ESSS/pytest-replay), um die Tests in der gleichen Reihenfolge abzuspielen, was dabei helfen sollte
diese fehlgeschlagene Sequenz auf ein Minimum zu reduzieren.
### Testreihenfolge und Wiederholung
Es ist gut, die Tests mehrmals zu wiederholen, nacheinander, zufรคllig oder in Gruppen, um mรถgliche
Abhรคngigkeiten und zustandsbezogene Fehler zu erkennen (Abriss). Und die einfache, mehrfache Wiederholung ist einfach gut, um
einige Probleme zu erkennen, die durch die Zufรคlligkeit von DL aufgedeckt werden.
#### Wiederholungstests
- [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder):
```bash
pip install pytest-flakefinder
```
Und fรผhren Sie dann jeden Test mehrmals durch (standardmรครig 50):
```bash
pytest --flake-finder --flake-runs=5 tests/test_failing_test.py
```
<Tip>
Dieses Plugin funktioniert nicht mit dem `-n` Flag von `pytest-xdist`.
</Tip>
<Tip>
Es gibt noch ein anderes Plugin `pytest-repeat`, aber es funktioniert nicht mit `unittest`.
</Tip>
#### Run tests in a random order
```bash
pip install pytest-random-order
```
Wichtig: Das Vorhandensein von `pytest-random-order` sorgt fรผr eine automatische Zufallsanordnung der Tests, es sind keine Konfigurationsรคnderungen oder
Befehlszeilenoptionen sind nicht erforderlich.
Wie bereits erlรคutert, ermรถglicht dies die Erkennung von gekoppelten Tests - bei denen der Zustand eines Tests den Zustand eines anderen beeinflusst. Wenn
`pytest-random-order` installiert ist, gibt es den Zufallswert aus, der fรผr diese Sitzung verwendet wurde, z.B:
```bash
pytest tests
[...]
Using --random-order-bucket=module
Using --random-order-seed=573663
```
Wenn eine bestimmte Sequenz fehlschlรคgt, kรถnnen Sie sie reproduzieren, indem Sie genau diesen Seed hinzufรผgen, z.B:
```bash
pytest --random-order-seed=573663
[...]
Using --random-order-bucket=module
Using --random-order-seed=573663
```
Es wird nur dann die exakte Reihenfolge reproduzieren, wenn Sie genau dieselbe Liste von Tests (oder gar keine Liste) verwenden. Sobald Sie beginnen, die Liste
die Liste manuell einzugrenzen, kรถnnen Sie sich nicht mehr auf den Seed verlassen, sondern mรผssen die Tests manuell in der genauen Reihenfolge auflisten
auflisten und pytest anweisen, sie nicht zu randomisieren, indem Sie `--random-order-bucket=none` verwenden, z.B.:
```bash
pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py
```
So deaktivieren Sie das Shuffling fรผr alle Tests:
```bash
pytest --random-order-bucket=none
```
Standardmรครig ist `--random-order-bucket=module` impliziert, wodurch die Dateien auf den Modulebenen gemischt werden. Es kann auch
auf den Ebenen `class`, `package`, `global` und `none` mischen. Die vollstรคndigen Details entnehmen Sie bitte der
[Dokumentation](https://github.com/jbasko/pytest-random-order).
Eine weitere Alternative zur Randomisierung ist: [`pytest-random`](https://github.com/pytest-dev/pytest-randomly). Dieses
Modul hat eine sehr รคhnliche Funktionalitรคt/Schnittstelle, aber es hat nicht die Eimermodi, die in
`pytest-random-order` zur Verfรผgung. Es hat das gleiche Problem, dass es sich nach der Installation aufdrรคngt.
### Variationen von Aussehen und Bedienung
#### pytest-zucker
[pytest-sugar](https://github.com/Frozenball/pytest-sugar) ist ein Plugin, das das Erscheinungsbild verbessert, eine
Fortschrittsbalken hinzufรผgt und Tests, die fehlschlagen, sowie die Bestรคtigung sofort anzeigt. Es wird bei der Installation automatisch aktiviert.
```bash
pip install pytest-sugar
```
Um Tests ohne sie durchzufรผhren, fรผhren Sie aus:
```bash
pytest -p no:sugar
```
oder deinstallieren Sie es.
#### Melden Sie den Namen jedes Subtests und seinen Fortschritt
Fรผr einen einzelnen oder eine Gruppe von Tests รผber `pytest` (nach `pip install pytest-pspec`):
```bash
pytest --pspec tests/test_optimization.py
```
#### Zeigt fehlgeschlagene Tests sofort an
[pytest-instafail](https://github.com/pytest-dev/pytest-instafail) zeigt Fehlschlรคge und Fehler sofort an, anstatt
bis zum Ende der Testsitzung zu warten.
```bash
pip install pytest-instafail
```
```bash
pytest --instafail
```
### Zu GPU oder nicht zu GPU
Bei einem GPU-aktivierten Setup fรผgen Sie zum Testen im reinen CPU-Modus `CUDA_VISIBLE_DEVICES=""` hinzu:
```bash
CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py
```
oder wenn Sie mehrere Grafikprozessoren haben, kรถnnen Sie angeben, welcher von `pytest` verwendet werden soll. Wenn Sie zum Beispiel nur den
zweiten Grafikkarte zu verwenden, wenn Sie die Grafikkarten `0` und `1` haben, kรถnnen Sie folgendes ausfรผhren:
```bash
CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py
```
Dies ist praktisch, wenn Sie verschiedene Aufgaben auf verschiedenen GPUs ausfรผhren mรถchten.
Einige Tests mรผssen nur auf der CPU ausgefรผhrt werden, andere entweder auf der CPU, der GPU oder der TPU und wieder andere auf mehreren GPUs. Die folgenden skip
Dekorateure werden verwendet, um die Anforderungen von Tests in Bezug auf CPU/GPU/TPU festzulegen:
- `require_torch` - dieser Test wird nur unter Torch ausgefรผhrt
- `require_torch_gpu` - wie `require_torch` plus erfordert mindestens 1 GPU
- `require_torch_multi_gpu` - wie `require_torch` und zusรคtzlich mindestens 2 GPUs erforderlich
- `require_torch_non_multi_gpu` - wie `require_torch` plus benรถtigt 0 oder 1 GPUs
- `require_torch_up_to_2_gpus` - wie `require_torch` plus erfordert 0 oder 1 oder 2 GPUs
- `require_torch_xla` - wie `require_torch` plus erfordert mindestens 1 TPU
Lassen Sie uns die GPU-Anforderungen in der folgenden Tabelle darstellen:
| n gpus | decorator |
|--------|--------------------------------|
| `>= 0` | `@require_torch` |
| `>= 1` | `@require_torch_gpu` |
| `>= 2` | `@require_torch_multi_gpu` |
| `< 2` | `@require_torch_non_multi_gpu` |
| `< 3` | `@require_torch_up_to_2_gpus` |
Hier ist zum Beispiel ein Test, der nur ausgefรผhrt werden muss, wenn 2 oder mehr GPUs verfรผgbar sind und pytorch installiert ist:
```python no-style
@require_torch_multi_gpu
def test_example_with_multi_gpu():
```
Wenn ein Test `tensorflow` benรถtigt, verwenden Sie den Dekorator `require_tf`. Zum Beispiel:
```python no-style
@require_tf
def test_tf_thing_with_tensorflow():
```
Diese Dekors kรถnnen gestapelt werden. Wenn zum Beispiel ein Test langsam ist und mindestens eine GPU unter pytorch benรถtigt, kรถnnen Sie
wie Sie ihn einrichten kรถnnen:
```python no-style
@require_torch_gpu
@slow
def test_example_slow_on_gpu():
```
Einige Dekoratoren wie `@parametrized` schreiben Testnamen um, daher mรผssen `@require_*`-Sprungdekoratoren als letztes aufgefรผhrt werden.
zuletzt aufgefรผhrt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel fรผr die korrekte Verwendung:
```python no-style
@parameterized.expand(...)
@require_torch_multi_gpu
def test_integration_foo():
```
Dieses Problem mit der Reihenfolge gibt es bei `@pytest.mark.parametrize` nicht, Sie kรถnnen es an den Anfang oder an den Schluss setzen und es wird trotzdem funktionieren.
funktionieren. Aber es funktioniert nur bei Nicht-Unittests.
Innerhalb von Tests:
- Wie viele GPUs sind verfรผgbar:
```python
from transformers.testing_utils import get_gpu_count
n_gpu = get_gpu_count() # works with torch and tf
```
### Testen mit einem bestimmten PyTorch-Backend oder Gerรคt
Um die Testsuite auf einem bestimmten Torch-Gerรคt auszufรผhren, fรผgen Sie `TRANSFORMERS_TEST_DEVICE="$Gerรคt"` hinzu, wobei `$Gerรคt` das Ziel-Backend ist. Zum Beispiel, um nur auf der CPU zu testen:
```bash
TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py
```
Diese Variable ist nรผtzlich, um benutzerdefinierte oder weniger verbreitete PyTorch-Backends wie `mps` zu testen. Sie kann auch verwendet werden, um den gleichen Effekt wie `CUDA_VISIBLE_DEVICES` zu erzielen, indem Sie bestimmte GPUs anvisieren oder im reinen CPU-Modus testen.
Bestimmte Gerรคte erfordern einen zusรคtzlichen Import, nachdem Sie `torch` zum ersten Mal importiert haben. Dies kann รผber die Umgebungsvariable `TRANSFORMERS_TEST_BACKEND` festgelegt werden:
```bash
TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py
```
### Verteiltes Training
`pytest` kann nicht direkt mit verteiltem Training umgehen. Wenn dies versucht wird, tun die Unterprozesse nicht das Richtige
und denken am Ende, sie seien `pytest` und beginnen, die Testsuite in Schleifen auszufรผhren. Es funktioniert jedoch, wenn man
einen normalen Prozess erzeugt, der dann mehrere Worker erzeugt und die IO-Pipes verwaltet.
Hier sind einige Tests, die dies verwenden:
- [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py)
- [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py)
Um direkt mit der Ausfรผhrung zu beginnen, suchen Sie in diesen Tests nach dem Aufruf `execute_subprocess_async`.
Sie benรถtigen mindestens 2 GPUs, um diese Tests in Aktion zu sehen:
```bash
CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py
```
### Erfassung von Ausgaben
Wรคhrend der Testausfรผhrung werden alle Ausgaben, die an `stdout` und `stderr` gesendet werden, aufgezeichnet. Wenn ein Test oder eine Setup-Methode fehlschlรคgt, wird die
wird die entsprechende aufgezeichnete Ausgabe in der Regel zusammen mit dem Fehler-Traceback angezeigt.
Um die Aufzeichnung von Ausgaben zu deaktivieren und `stdout` und `stderr` normal zu erhalten, verwenden Sie `-s` oder `--capture=no`:
```bash
pytest -s tests/utils/test_logging.py
```
So senden Sie Testergebnisse an die JUnit-Formatausgabe:
```bash
py.test tests --junitxml=result.xml
```
### Farbsteuerung
Keine Farbe zu haben (z.B. gelb auf weiรem Hintergrund ist nicht lesbar):
```bash
pytest --color=no tests/utils/test_logging.py
```
### Testbericht an den Online-Dienst pastebin senden
Erstellen Sie eine URL fรผr jeden Testfehler:
```bash
pytest --pastebin=failed tests/utils/test_logging.py
```
Dadurch werden Informationen รผber den Testlauf an einen entfernten Paste-Dienst รผbermittelt und eine URL fรผr jeden Fehlschlag bereitgestellt. Sie kรถnnen die
Tests wie gewohnt auswรคhlen oder z.B. -x hinzufรผgen, wenn Sie nur einen bestimmten Fehler senden mรถchten.
Erstellen einer URL fรผr ein ganzes Testsitzungsprotokoll:
```bash
pytest --pastebin=all tests/utils/test_logging.py
```
## Tests schreiben
๐ค Die Tests von Transformers basieren auf `unittest`, werden aber von `pytest` ausgefรผhrt, so dass die meiste Zeit Funktionen aus beiden Systemen
verwendet werden kรถnnen.
Sie kรถnnen [hier](https://docs.pytest.org/en/stable/unittest.html) nachlesen, welche Funktionen unterstรผtzt werden, aber das Wichtigste ist
Wichtig ist, dass die meisten `pytest`-Fixtures nicht funktionieren. Auch die Parametrisierung nicht, aber wir verwenden das Modul
`parametrisiert`, das auf รคhnliche Weise funktioniert.
### Parametrisierung
Oft besteht die Notwendigkeit, denselben Test mehrmals auszufรผhren, aber mit unterschiedlichen Argumenten. Das kรถnnte innerhalb des Tests geschehen
des Tests gemacht werden, aber dann gibt es keine Mรถglichkeit, den Test mit nur einem Satz von Argumenten auszufรผhren.
```python
# test_this1.py
import unittest
from parameterized import parameterized
class TestMathUnitTest(unittest.TestCase):
@parameterized.expand(
[
("negative", -1.5, -2.0),
("integer", 1, 1.0),
("large fraction", 1.6, 1),
]
)
def test_floor(self, name, input, expected):
assert_equal(math.floor(input), expected)
```
Nun wird dieser Test standardmรครig 3 Mal ausgefรผhrt, wobei jedes Mal die letzten 3 Argumente von `test_floor` den entsprechenden Argumenten in der Parameterliste zugeordnet werden.
die entsprechenden Argumente in der Parameterliste.
Sie kรถnnen auch nur die Parameter `negativ` und `ganzzahlig` mit ausfรผhren:
```bash
pytest -k "negative and integer" tests/test_mytest.py
```
oder alle Untertests auรer `negativ`, mit:
```bash
pytest -k "not negative" tests/test_mytest.py
```
Neben der Verwendung des gerade erwรคhnten Filters `-k` kรถnnen Sie auch den genauen Namen jedes Untertests herausfinden und jeden
oder alle unter Verwendung ihrer genauen Namen ausfรผhren.
```bash
pytest test_this1.py --collect-only -q
```
und es wird aufgelistet:
```bash
test_this1.py::TestMathUnitTest::test_floor_0_negative
test_this1.py::TestMathUnitTest::test_floor_1_integer
test_this1.py::TestMathUnitTest::test_floor_2_large_fraction
```
Jetzt kรถnnen Sie also nur 2 spezifische Untertests durchfรผhren:
```bash
pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer
```
Das Modul [parametrisiert](https://pypi.org/project/parameterized/), das sich bereits in den Entwickler-Abhรคngigkeiten befindet
von `transformers` befindet, funktioniert sowohl fรผr `unittests` als auch fรผr `pytest` Tests.
Wenn es sich bei dem Test jedoch nicht um einen `Unittest` handelt, kรถnnen Sie `pytest.mark.parametrize` verwenden (oder Sie kรถnnen sehen, dass es in
einigen bestehenden Tests verwendet wird, meist unter `Beispiele`).
Hier ist das gleiche Beispiel, diesmal unter Verwendung der `parametrize`-Markierung von `pytest`:
```python
# test_this2.py
import pytest
@pytest.mark.parametrize(
"name, input, expected",
[
("negative", -1.5, -2.0),
("integer", 1, 1.0),
("large fraction", 1.6, 1),
],
)
def test_floor(name, input, expected):
assert_equal(math.floor(input), expected)
```
Genau wie bei `parametrisiert` kรถnnen Sie mit `pytest.mark.parametrize` genau steuern, welche Subtests ausgefรผhrt werden
ausgefรผhrt werden, wenn der Filter `-k` nicht ausreicht. Allerdings erzeugt diese Parametrisierungsfunktion einen etwas anderen Satz von
Namen fรผr die Untertests. Sie sehen folgendermaรen aus:
```bash
pytest test_this2.py --collect-only -q
```
und es wird aufgelistet:
```bash
test_this2.py::test_floor[integer-1-1.0]
test_this2.py::test_floor[negative--1.5--2.0]
test_this2.py::test_floor[large fraction-1.6-1]
```
Jetzt kรถnnen Sie also nur den spezifischen Test durchfรผhren:
```bash
pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0]
```
wie im vorherigen Beispiel.
### Dateien und Verzeichnisse
In Tests mรผssen wir oft wissen, wo sich Dinge relativ zur aktuellen Testdatei befinden, und das ist nicht trivial, da der Test
von mehreren Verzeichnissen aus aufgerufen werden kann oder sich in Unterverzeichnissen mit unterschiedlicher Tiefe befinden kann. Eine Hilfsklasse
`transformers.test_utils.TestCasePlus` lรถst dieses Problem, indem sie alle grundlegenden Pfade sortiert und einfache
Zugriffsmรถglichkeiten auf sie bietet:
- `pathlib`-Objekte (alle vollstรคndig aufgelรถst):
- `test_file_path` - der aktuelle Testdateipfad, d.h. `__file__`
- `test_file_dir` - das Verzeichnis, das die aktuelle Testdatei enthรคlt
- `tests_dir` - das Verzeichnis der `tests` Testreihe
- `examples_dir` - das Verzeichnis der `examples` Test-Suite
- `repo_root_dir` - das Verzeichnis des Repositorys
- `src_dir` - das Verzeichnis von `src` (d.h. wo sich das Unterverzeichnis `transformers` befindet)
- stringifizierte Pfade - wie oben, aber diese geben Pfade als Strings zurรผck, anstatt als `pathlib`-Objekte:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Um diese zu verwenden, mรผssen Sie lediglich sicherstellen, dass der Test in einer Unterklasse von
`transformers.test_utils.TestCasePlus` befindet. Zum Beispiel:
```python
from transformers.testing_utils import TestCasePlus
class PathExampleTest(TestCasePlus):
def test_something_involving_local_locations(self):
data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro"
```
Wenn Sie Pfade nicht รผber `pathlib` manipulieren mรผssen oder nur einen Pfad als String benรถtigen, kรถnnen Sie jederzeit
`str()` auf das `pathlib`-Objekt anwenden oder die Accessoren mit der Endung `_str` verwenden. Zum Beispiel:
```python
from transformers.testing_utils import TestCasePlus
class PathExampleTest(TestCasePlus):
def test_something_involving_stringified_locations(self):
examples_dir = self.examples_dir_str
```
### Temporรคre Dateien und Verzeichnisse
Die Verwendung eindeutiger temporรคrer Dateien und Verzeichnisse ist fรผr die parallele Durchfรผhrung von Tests unerlรคsslich, damit sich die Tests nicht gegenseitig รผberschreiben.
Daten gegenseitig รผberschreiben. Auรerdem mรถchten wir, dass die temporรคren Dateien und Verzeichnisse am Ende jedes Tests, der sie erstellt hat, gelรถscht werden.
erstellt hat. Daher ist die Verwendung von Paketen wie `tempfile`, die diese Anforderungen erfรผllen, unerlรคsslich.
Beim Debuggen von Tests mรผssen Sie jedoch sehen kรถnnen, was in der temporรคren Datei oder dem temporรคren Verzeichnis gespeichert wird und Sie mรถchten
Sie mรผssen den genauen Pfad kennen und dรผrfen ihn nicht bei jedem neuen Testdurchlauf zufรคllig รคndern.
Fรผr solche Zwecke ist die Hilfsklasse `transformers.test_utils.TestCasePlus` am besten geeignet. Sie ist eine Unterklasse von
Unittest.TestCase`, so dass wir in den Testmodulen einfach von ihr erben kรถnnen.
Hier ist ein Beispiel fรผr die Verwendung dieser Klasse:
```python
from transformers.testing_utils import TestCasePlus
class ExamplesTests(TestCasePlus):
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
Dieser Code erstellt ein eindeutiges temporรคres Verzeichnis und setzt `tmp_dir` auf dessen Speicherort.
- Erstellen Sie ein eindeutiges temporรคres Verzeichnis:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
tmp_dir" enthรคlt den Pfad zu dem erstellten temporรคren Verzeichnis. Es wird am Ende des Tests automatisch entfernt.
Tests entfernt.
- Erstellen Sie ein temporรคres Verzeichnis meiner Wahl, stellen Sie sicher, dass es leer ist, bevor der Test beginnt, und leeren Sie es nach dem Test nicht.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
Dies ist nรผtzlich fรผr die Fehlersuche, wenn Sie ein bestimmtes Verzeichnis รผberwachen und sicherstellen mรถchten, dass die vorherigen Tests keine Daten darin hinterlassen haben.
keine Daten dort hinterlassen haben.
- Sie kรถnnen das Standardverhalten auรer Kraft setzen, indem Sie die Argumente `before` und `after` direkt รผberschreiben, was zu einem der folgenden Verhaltensweisen fรผhrt
folgenden Verhaltensweisen:
- `before=True`: das temporรคre Verzeichnis wird immer zu Beginn des Tests gelรถscht.
- `before=False`: wenn das temporรคre Verzeichnis bereits existiert, bleiben alle vorhandenen Dateien dort erhalten.
- `after=True`: das temporรคre Verzeichnis wird immer am Ende des Tests gelรถscht.
- `after=False`: das temporรคre Verzeichnis wird am Ende des Tests immer beibehalten.
<Tip>
Um das รquivalent von `rm -r` sicher ausfรผhren zu kรถnnen, sind nur Unterverzeichnisse des Projektarchivs checkout erlaubt, wenn
ein explizites `tmp_dir` verwendet wird, so dass nicht versehentlich ein `/tmp` oder ein รคhnlich wichtiger Teil des Dateisystems vernichtet wird.
d.h. geben Sie bitte immer Pfade an, die mit `./` beginnen.
</Tip>
<Tip>
Jeder Test kann mehrere temporรคre Verzeichnisse registrieren, die alle automatisch entfernt werden, sofern nicht anders gewรผnscht.
anders.
</Tip>
### Temporรคre รberschreibung von sys.path
Wenn Sie `sys.path` vorรผbergehend รผberschreiben mรผssen, um z.B. von einem anderen Test zu importieren, kรถnnen Sie den
Kontextmanager `ExtendSysPath` verwenden. Beispiel:
```python
import os
from transformers.testing_utils import ExtendSysPath
bindir = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/.."):
from test_trainer import TrainerIntegrationCommon # noqa
```
### รberspringen von Tests
Dies ist nรผtzlich, wenn ein Fehler gefunden und ein neuer Test geschrieben wird, der Fehler aber noch nicht behoben ist. Damit wir ihn
in das Haupt-Repository zu รผbertragen, mรผssen wir sicherstellen, dass er bei `make test` รผbersprungen wird.
Methoden:
- Ein **Skip** bedeutet, dass Sie erwarten, dass Ihr Test nur dann erfolgreich ist, wenn einige Bedingungen erfรผllt sind, andernfalls sollte pytest den Test รผberspringen.
die Ausfรผhrung des Tests ganz รผberspringen. รbliche Beispiele sind das รberspringen von Tests, die nur unter Windows laufen, auf Nicht-Windows-Plattformen oder das รberspringen von
Tests, die von einer externen Ressource abhรคngen, die im Moment nicht verfรผgbar ist (z.B. eine Datenbank).
- Ein **xfail** bedeutet, dass Sie erwarten, dass ein Test aus irgendeinem Grund fehlschlรคgt. Ein gรคngiges Beispiel ist ein Test fรผr eine Funktion, die noch nicht
noch nicht implementiert oder ein noch nicht behobener Fehler. Wenn ein Test trotz eines erwarteten Fehlschlags bestanden wird (markiert mit
pytest.mark.xfail), ist dies ein xpass und wird in der Testzusammenfassung gemeldet.
Einer der wichtigsten Unterschiede zwischen den beiden ist, dass `skip` den Test nicht ausfรผhrt, wรคhrend `xfail` dies tut. Wenn also der
Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf andere Tests auswirkt, sollten Sie also nicht `xfail` verwenden.
#### Implementierung
- Hier sehen Sie, wie Sie einen ganzen Test bedingungslos รผberspringen kรถnnen:
```python no-style
@unittest.skip(reason="this bug needs to be fixed")
def test_feature_x():
```
oder mit pytest:
```python no-style
@pytest.mark.skip(reason="this bug needs to be fixed")
```
oder mit dem `xfail` Weg:
```python no-style
@pytest.mark.xfail
def test_feature_x():
```
- Hier erfahren Sie, wie Sie einen Test aufgrund einer internen Prรผfung innerhalb des Tests auslassen kรถnnen:
```python
def test_feature_x():
if not has_something():
pytest.skip("unsupported configuration")
```
oder das ganze Modul:
```python
import pytest
if not pytest.config.getoption("--custom-flag"):
pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)
```
oder mit dem `xfail` Weg:
```python
def test_feature_x():
pytest.xfail("expected to fail until bug XYZ is fixed")
```
- Hier erfahren Sie, wie Sie alle Tests in einem Modul รผberspringen kรถnnen, wenn ein Import fehlt:
```python
docutils = pytest.importorskip("docutils", minversion="0.3")
```
- Einen Test aufgrund einer Bedingung รผberspringen:
```python no-style
@pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher")
def test_feature_x():
```
oder:
```python no-style
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_feature_x():
```
oder รผberspringen Sie das ganze Modul:
```python no-style
@pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows")
class TestClass():
def test_feature_x(self):
```
Weitere Details, Beispiele und Mรถglichkeiten finden Sie [hier](https://docs.pytest.org/en/latest/skipping.html).
### Langsame Tests
Die Bibliothek der Tests wรคchst stรคndig, und einige der Tests brauchen Minuten, um ausgefรผhrt zu werden, daher kรถnnen wir es uns nicht leisten, eine Stunde zu warten, bis die
eine Stunde auf die Fertigstellung der Testsuite auf CI zu warten. Daher sollten langsame Tests, mit einigen Ausnahmen fรผr wichtige Tests, wie im folgenden Beispiel
wie im folgenden Beispiel markiert werden:
```python no-style
from transformers.testing_utils import slow
@slow
def test_integration_foo():
```
Sobald ein Test als `@slow` markiert ist, setzen Sie die Umgebungsvariable `RUN_SLOW=1`, um solche Tests auszufรผhren, z.B:
```bash
RUN_SLOW=1 pytest tests
```
Einige Dekoratoren wie `@parameterized` schreiben Testnamen um, daher mรผssen `@slow` und die รผbrigen Skip-Dekoratoren
`@require_*` mรผssen als letztes aufgefรผhrt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel fรผr die korrekte Verwendung:
```python no-style
@parameterized.expand(...)
@slow
def test_integration_foo():
```
Wie zu Beginn dieses Dokuments erlรคutert, werden langsame Tests nach einem Zeitplan ausgefรผhrt und nicht in PRs CI
Prรผfungen. Es ist also mรถglich, dass einige Probleme bei der Einreichung eines PRs รผbersehen werden und zusammengefรผhrt werden. Solche Probleme werden
werden beim nรคchsten geplanten CI-Job abgefangen. Das bedeutet aber auch, dass es wichtig ist, die langsamen Tests auf Ihrem
Rechner auszufรผhren, bevor Sie den PR einreichen.
Hier ist ein grober Entscheidungsmechanismus fรผr die Auswahl der Tests, die als langsam markiert werden sollen:
Wenn der Test auf eine der internen Komponenten der Bibliothek ausgerichtet ist (z.B. Modellierungsdateien, Tokenisierungsdateien,
Pipelines), dann sollten wir diesen Test in der nicht langsamen Testsuite ausfรผhren. Wenn er sich auf einen anderen Aspekt der Bibliothek bezieht,
wie z.B. die Dokumentation oder die Beispiele, dann sollten wir diese Tests in der langsamen Testsuite durchfรผhren. Und dann, zur Verfeinerung
Ansatz zu verfeinern, sollten wir Ausnahmen einfรผhren:
- Alle Tests, die einen umfangreichen Satz von Gewichten oder einen Datensatz mit einer Grรถรe von mehr als ~50MB herunterladen mรผssen (z.B. Modell- oder
Tokenizer-Integrationstests, Pipeline-Integrationstests) sollten auf langsam gesetzt werden. Wenn Sie ein neues Modell hinzufรผgen, sollten Sie
sollten Sie eine kleine Version des Modells (mit zufรคlligen Gewichtungen) fรผr Integrationstests erstellen und in den Hub hochladen. Dies wird
wird in den folgenden Abschnitten erlรคutert.
- Alle Tests, die ein Training durchfรผhren mรผssen, das nicht speziell auf Schnelligkeit optimiert ist, sollten auf langsam gesetzt werden.
- Wir kรถnnen Ausnahmen einfรผhren, wenn einige dieser Tests, die nicht langsam sein sollten, unertrรคglich langsam sind, und sie auf
`@slow`. Auto-Modellierungstests, die groรe Dateien auf der Festplatte speichern und laden, sind ein gutes Beispiel fรผr Tests, die als
als `@slow` markiert sind.
- Wenn ein Test in weniger als 1 Sekunde auf CI abgeschlossen wird (einschlieรlich eventueller Downloads), sollte es sich trotzdem um einen normalen Test handeln.
Insgesamt mรผssen alle nicht langsamen Tests die verschiedenen Interna abdecken und dabei schnell bleiben. Zum Beispiel,
kann eine signifikante Abdeckung erreicht werden, indem Sie mit speziell erstellten kleinen Modellen mit zufรคlligen Gewichten testen. Solche Modelle
haben eine sehr geringe Anzahl von Schichten (z.B. 2), Vokabeln (z.B. 1000), usw. Dann kรถnnen die `@slow`-Tests groรe
langsame Modelle verwenden, um qualitative Tests durchzufรผhren. Um die Verwendung dieser Modelle zu sehen, suchen Sie einfach nach *winzigen* Modellen mit:
```bash
grep tiny tests examples
```
Hier ist ein Beispiel fรผr ein [Skript](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py), das das winzige Modell erstellt hat
[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). Sie kรถnnen es ganz einfach an Ihre eigene
Architektur Ihres Modells anpassen.
Es ist leicht, die Laufzeit falsch zu messen, wenn zum Beispiel ein groรes Modell heruntergeladen wird, aber wenn
Sie es lokal testen, wรผrden die heruntergeladenen Dateien zwischengespeichert und somit die Download-Zeit nicht gemessen werden. Prรผfen Sie daher den
Ausfรผhrungsgeschwindigkeitsbericht in den CI-Protokollen (die Ausgabe von `pytest --durations=0 tests`).
Dieser Bericht ist auch nรผtzlich, um langsame Ausreiรer zu finden, die nicht als solche gekennzeichnet sind oder die neu geschrieben werden mรผssen, um schnell zu sein.
Wenn Sie bemerken, dass die Testsuite beim CI langsam wird, zeigt die oberste Liste dieses Berichts die langsamsten
Tests.
### Testen der stdout/stderr-Ausgabe
Um Funktionen zu testen, die in `stdout` und/oder `stderr` schreiben, kann der Test auf diese Strรถme zugreifen, indem er die
[capsys system](https://docs.pytest.org/en/latest/capture.html) von `pytest` zugreifen. So wird dies bewerkstelligt:
```python
import sys
def print_to_stdout(s):
print(s)
def print_to_stderr(s):
sys.stderr.write(s)
def test_result_and_stdout(capsys):
msg = "Hello"
print_to_stdout(msg)
print_to_stderr(msg)
out, err = capsys.readouterr() # consume the captured output streams
# optional: if you want to replay the consumed streams:
sys.stdout.write(out)
sys.stderr.write(err)
# test:
assert msg in out
assert msg in err
```
Und natรผrlich wird `stderr` in den meisten Fรคllen als Teil einer Ausnahme auftreten, so dass try/except in einem solchen Fall verwendet werden muss
Fall verwendet werden:
```python
def raise_exception(msg):
raise ValueError(msg)
def test_something_exception():
msg = "Not a good value"
error = ""
try:
raise_exception(msg)
except Exception as e:
error = str(e)
assert msg in error, f"{msg} is in the exception:\n{error}"
```
Ein anderer Ansatz zur Erfassung von stdout ist `contextlib.redirect_stdout`:
```python
from io import StringIO
from contextlib import redirect_stdout
def print_to_stdout(s):
print(s)
def test_result_and_stdout():
msg = "Hello"
buffer = StringIO()
with redirect_stdout(buffer):
print_to_stdout(msg)
out = buffer.getvalue()
# optional: if you want to replay the consumed streams:
sys.stdout.write(out)
# test:
assert msg in out
```
Ein wichtiges potenzielles Problem beim Erfassen von stdout ist, dass es `r` Zeichen enthalten kann, die bei normalem `print`
alles zurรผcksetzen, was bisher gedruckt wurde. Mit `pytest` gibt es kein Problem, aber mit `pytest -s` werden diese
werden diese Zeichen in den Puffer aufgenommen. Um den Test mit und ohne `-s` laufen zu lassen, mรผssen Sie also eine zusรคtzliche Bereinigung
zusรคtzliche Bereinigung der erfassten Ausgabe vornehmen, indem Sie `re.sub(r'~.*\r', '', buf, 0, re.M)` verwenden.
Aber dann haben wir einen Hilfskontextmanager-Wrapper, der sich automatisch um alles kรผmmert, unabhรคngig davon, ob er
einige "*.*.*.*" enthรคlt oder nicht:
```python
from transformers.testing_utils import CaptureStdout
with CaptureStdout() as cs:
function_that_writes_to_stdout()
print(cs.out)
```
Hier ist ein vollstรคndiges Testbeispiel:
```python
from transformers.testing_utils import CaptureStdout
msg = "Secret message\r"
final = "Hello World"
with CaptureStdout() as cs:
print(msg + final)
assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}"
```
Wenn Sie `stderr` aufzeichnen mรถchten, verwenden Sie stattdessen die Klasse `CaptureStderr`:
```python
from transformers.testing_utils import CaptureStderr
with CaptureStderr() as cs:
function_that_writes_to_stderr()
print(cs.err)
```
Wenn Sie beide Streams auf einmal erfassen mรผssen, verwenden Sie die รผbergeordnete Klasse `CaptureStd`:
```python
from transformers.testing_utils import CaptureStd
with CaptureStd() as cs:
function_that_writes_to_stdout_and_stderr()
print(cs.err, cs.out)
```
Um das Debuggen von Testproblemen zu erleichtern, geben diese Kontextmanager standardmรครig die aufgezeichneten Streams beim Verlassen
aus dem Kontext wieder.
### Erfassen von Logger-Streams
Wenn Sie die Ausgabe eines Loggers validieren mรผssen, kรถnnen Sie `CaptureLogger` verwenden:
```python
from transformers import logging
from transformers.testing_utils import CaptureLogger
msg = "Testing 1, 2, 3"
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
with CaptureLogger(logger) as cl:
logger.info(msg)
assert cl.out, msg + "\n"
```
### Testen mit Umgebungsvariablen
Wenn Sie die Auswirkungen von Umgebungsvariablen fรผr einen bestimmten Test testen mรถchten, kรถnnen Sie einen Hilfsdekorator verwenden
`transformers.testing_utils.mockenv`
```python
from transformers.testing_utils import mockenv
class HfArgumentParserTest(unittest.TestCase):
@mockenv(TRANSFORMERS_VERBOSITY="error")
def test_env_override(self):
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
```
Manchmal muss ein externes Programm aufgerufen werden, was die Einstellung von `PYTHONPATH` in `os.environ` erfordert, um mehrere lokale Pfade einzuschlieรen.
mehrere lokale Pfade. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` hilft Ihnen dabei:
```python
from transformers.testing_utils import TestCasePlus
class EnvExampleTest(TestCasePlus):
def test_external_prog(self):
env = self.get_env()
# now call the external program, passing `env` to it
```
Je nachdem, ob die Testdatei in der Testsuite `tests` oder in `examples` war, wird sie korrekt eingerichtet
`env[PYTHONPATH]` eines dieser beiden Verzeichnisse und auch das `src` Verzeichnis, um sicherzustellen, dass der Test gegen das aktuelle
um sicherzustellen, dass der Test mit dem aktuellen Projektarchiv durchgefรผhrt wird, und schlieรlich mit dem, was in `env[PYTHONPATH]` bereits eingestellt war, bevor der Test aufgerufen wurde.
wenn รผberhaupt.
Diese Hilfsmethode erstellt eine Kopie des Objekts `os.environ`, so dass das Original intakt bleibt.
### Reproduzierbare Ergebnisse erhalten
In manchen Situationen mรถchten Sie vielleicht die Zufรคlligkeit Ihrer Tests beseitigen. Um identische, reproduzierbare Ergebnisse zu erhalten, mรผssen Sie
mรผssen Sie den Seed festlegen:
```python
seed = 42
# python RNG
import random
random.seed(seed)
# pytorch RNGs
import torch
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# numpy RNG
import numpy as np
np.random.seed(seed)
# tf RNG
tf.random.set_seed(seed)
```
### Tests debuggen
Um einen Debugger an der Stelle zu starten, an der die Warnung auftritt, gehen Sie wie folgt vor:
```bash
pytest tests/utils/test_logging.py -W error::UserWarning --pdb
```
## Arbeiten mit Github-Aktionen-Workflows
Um einen CI-Job fรผr einen Self-Push-Workflow auszulรถsen, mรผssen Sie:
1. Erstellen Sie einen neuen Zweig auf `transformers` Ursprung (keine Gabelung!).
2. Der Name der Verzweigung muss entweder mit `ci_` oder `ci-` beginnen (`main` lรถst ihn auch aus, aber wir kรถnnen keine PRs auf
`main`). Es wird auch nur fรผr bestimmte Pfade ausgelรถst - Sie kรถnnen die aktuelle Definition finden, falls sie
falls sie sich seit der Erstellung dieses Dokuments geรคndert hat [hier](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) unter *push:*
3. Erstellen Sie einen PR von diesem Zweig.
4. Dann kรถnnen Sie sehen, wie der Job erscheint [hier](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). Er wird mรถglicherweise nicht sofort ausgefรผhrt, wenn es
ein Backlog vorhanden ist.
## Testen experimenteller CI-Funktionen
Das Testen von CI-Funktionen kann potenziell problematisch sein, da es die normale CI-Funktion beeintrรคchtigen kann. Wenn also eine
neue CI-Funktion hinzugefรผgt werden soll, sollte dies wie folgt geschehen.
1. Erstellen Sie einen neuen Auftrag, der die zu testende Funktion testet.
2. Der neue Job muss immer erfolgreich sein, so dass er uns ein grรผnes โ gibt (Details unten).
3. Lassen Sie ihn einige Tage lang laufen, um zu sehen, dass eine Vielzahl verschiedener PR-Typen darauf laufen (Benutzer-Gabelzweige,
nicht geforkte Zweige, Zweige, die von github.com UI direct file edit stammen, verschiedene erzwungene Pushes, etc. - es gibt
es gibt so viele), wรคhrend Sie die Protokolle des experimentellen Jobs รผberwachen (nicht den gesamten Job grรผn, da er absichtlich immer
grรผn)
4. Wenn klar ist, dass alles in Ordnung ist, fรผgen Sie die neuen รnderungen in die bestehenden Jobs ein.
Auf diese Weise wird der normale Arbeitsablauf nicht durch Experimente mit der CI-Funktionalitรคt selbst beeintrรคchtigt.
Wie kรถnnen wir nun dafรผr sorgen, dass der Auftrag immer erfolgreich ist, wรคhrend die neue CI-Funktion entwickelt wird?
Einige CIs, wie TravisCI, unterstรผtzen ignore-step-failure und melden den gesamten Job als erfolgreich, aber CircleCI und
Github Actions unterstรผtzen dies zum jetzigen Zeitpunkt nicht.
Sie kรถnnen also die folgende Abhilfe verwenden:
1. Setzen Sie `set +euo pipefail` am Anfang des Ausfรผhrungsbefehls, um die meisten potenziellen Fehler im Bash-Skript zu unterdrรผcken.
2. Der letzte Befehl muss ein Erfolg sein: `echo "done"` oder einfach `true` reicht aus.
Hier ist ein Beispiel:
```yaml
- run:
name: run CI experiment
command: |
set +euo pipefail
echo "setting run-all-despite-any-errors-mode"
this_command_will_fail
echo "but bash continues to run"
# emulate another failure
false
# but the last command must be a success
echo "during experiment do not remove: reporting success to CI, even if there were failures"
```
Fรผr einfache Befehle kรถnnen Sie auch Folgendes tun:
```bash
cmd_that_may_fail || true
```
Wenn Sie mit den Ergebnissen zufrieden sind, integrieren Sie den experimentellen Schritt oder Job natรผrlich in den Rest der normalen Jobs,
Entfernen Sie dabei `set +euo pipefail` oder andere Dinge, die Sie eventuell hinzugefรผgt haben, um sicherzustellen, dass der experimentelle Auftrag nicht
den normalen CI-Betrieb nicht beeintrรคchtigt.
Dieser ganze Prozess wรคre viel einfacher gewesen, wenn wir nur etwas wie `allow-failure` fรผr den
experimentellen Schritt festlegen kรถnnten und ihn scheitern lassen wรผrden, ohne den Gesamtstatus der PRs zu beeintrรคchtigen. Aber wie bereits erwรคhnt, haben CircleCI und
Github Actions dies im Moment nicht unterstรผtzen.
Sie kรถnnen in diesen CI-spezifischen Threads fรผr diese Funktion stimmen und sehen, wo sie steht:
- [Github Actions:](https://github.com/actions/toolkit/issues/399)
- [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
| transformers/docs/source/de/testing.md/0 | {
"file_path": "transformers/docs/source/de/testing.md",
"repo_id": "transformers",
"token_count": 19298
} | 264 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# General Utilities
This page lists all of Transformers general utility functions that are found in the file `utils.py`.
Most of those are only useful if you are studying the general code in the library.
## Enums and namedtuples
[[autodoc]] utils.ExplicitEnum
[[autodoc]] utils.PaddingStrategy
[[autodoc]] utils.TensorType
## Special Decorators
[[autodoc]] utils.add_start_docstrings
[[autodoc]] utils.add_start_docstrings_to_model_forward
[[autodoc]] utils.add_end_docstrings
[[autodoc]] utils.add_code_sample_docstrings
[[autodoc]] utils.replace_return_docstrings
## Special Properties
[[autodoc]] utils.cached_property
## Other Utilities
[[autodoc]] utils._LazyModule
| transformers/docs/source/en/internal/file_utils.md/0 | {
"file_path": "transformers/docs/source/en/internal/file_utils.md",
"repo_id": "transformers",
"token_count": 412
} | 265 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Data Collator
Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of
the same type as the elements of `train_dataset` or `eval_dataset`.
To be able to build batches, data collators may apply some processing (like padding). Some of them (like
[`DataCollatorForLanguageModeling`]) also apply some random data augmentation (like random masking)
on the formed batch.
Examples of use can be found in the [example scripts](../examples) or [example notebooks](../notebooks).
## Default data collator
[[autodoc]] data.data_collator.default_data_collator
## DefaultDataCollator
[[autodoc]] data.data_collator.DefaultDataCollator
## DataCollatorWithPadding
[[autodoc]] data.data_collator.DataCollatorWithPadding
## DataCollatorForTokenClassification
[[autodoc]] data.data_collator.DataCollatorForTokenClassification
## DataCollatorForSeq2Seq
[[autodoc]] data.data_collator.DataCollatorForSeq2Seq
## DataCollatorForLanguageModeling
[[autodoc]] data.data_collator.DataCollatorForLanguageModeling
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
## DataCollatorForWholeWordMask
[[autodoc]] data.data_collator.DataCollatorForWholeWordMask
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
## DataCollatorForPermutationLanguageModeling
[[autodoc]] data.data_collator.DataCollatorForPermutationLanguageModeling
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
## DataCollatorWithFlattening
[[autodoc]] data.data_collator.DataCollatorWithFlattening
| transformers/docs/source/en/main_classes/data_collator.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/data_collator.md",
"repo_id": "transformers",
"token_count": 712
} | 266 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ALBERT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=albert">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/albert-base-v2">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The ALBERT model was proposed in [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942) by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma,
Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the training
speed of BERT:
- Splitting the embedding matrix into two smaller matrices.
- Using repeating layers split among groups.
The abstract from the paper is the following:
*Increasing model size when pretraining natural language representations often results in improved performance on
downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations,
longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction
techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows
that our proposed methods lead to models that scale much better compared to the original BERT. We also use a
self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks
with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and
SQuAD benchmarks while having fewer parameters compared to BERT-large.*
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
## Usage tips
- ALBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather
than the left.
- ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
number of (repeating) layers.
- Embedding size E is different from hidden size H justified because the embeddings are context independent (one embedding vector represents one token), whereas hidden states are context dependent (one hidden state represents a sequence of tokens) so it's more logical to have H >> E. Also, the embedding matrix is large since it's V x E (V being the vocab size). If E < H, it has less parameters.
- Layers are split in groups that share parameters (to save memory).
Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
## Resources
The resources provided in the following sections consist of a list of official Hugging Face and community (indicated by ๐) resources to help you get started with AlBERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- [`AlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification).
- [`TFAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification).
- [`FlaxAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb).
- Check the [Text classification task guide](../tasks/sequence_classification) on how to use the model.
<PipelineTag pipeline="token-classification"/>
- [`AlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification).
- [`TFAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
- [`FlaxAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification).
- [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Token classification task guide](../tasks/token_classification) on how to use the model.
<PipelineTag pipeline="fill-mask"/>
- [`AlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
- [`TFAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
- [`FlaxAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb).
- [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Masked language modeling task guide](../tasks/masked_language_modeling) on how to use the model.
<PipelineTag pipeline="question-answering"/>
- [`AlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb).
- [`TFAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb).
- [`FlaxAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering).
- [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Question answering task guide](../tasks/question_answering) on how to use the model.
**Multiple choice**
- [`AlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb).
- [`TFAlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb).
- Check the [Multiple choice task guide](../tasks/multiple_choice) on how to use the model.
## AlbertConfig
[[autodoc]] AlbertConfig
## AlbertTokenizer
[[autodoc]] AlbertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## AlbertTokenizerFast
[[autodoc]] AlbertTokenizerFast
## Albert specific outputs
[[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput
[[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput
<frameworkcontent>
<pt>
## AlbertModel
[[autodoc]] AlbertModel
- forward
## AlbertForPreTraining
[[autodoc]] AlbertForPreTraining
- forward
## AlbertForMaskedLM
[[autodoc]] AlbertForMaskedLM
- forward
## AlbertForSequenceClassification
[[autodoc]] AlbertForSequenceClassification
- forward
## AlbertForMultipleChoice
[[autodoc]] AlbertForMultipleChoice
## AlbertForTokenClassification
[[autodoc]] AlbertForTokenClassification
- forward
## AlbertForQuestionAnswering
[[autodoc]] AlbertForQuestionAnswering
- forward
</pt>
<tf>
## TFAlbertModel
[[autodoc]] TFAlbertModel
- call
## TFAlbertForPreTraining
[[autodoc]] TFAlbertForPreTraining
- call
## TFAlbertForMaskedLM
[[autodoc]] TFAlbertForMaskedLM
- call
## TFAlbertForSequenceClassification
[[autodoc]] TFAlbertForSequenceClassification
- call
## TFAlbertForMultipleChoice
[[autodoc]] TFAlbertForMultipleChoice
- call
## TFAlbertForTokenClassification
[[autodoc]] TFAlbertForTokenClassification
- call
## TFAlbertForQuestionAnswering
[[autodoc]] TFAlbertForQuestionAnswering
- call
</tf>
<jax>
## FlaxAlbertModel
[[autodoc]] FlaxAlbertModel
- __call__
## FlaxAlbertForPreTraining
[[autodoc]] FlaxAlbertForPreTraining
- __call__
## FlaxAlbertForMaskedLM
[[autodoc]] FlaxAlbertForMaskedLM
- __call__
## FlaxAlbertForSequenceClassification
[[autodoc]] FlaxAlbertForSequenceClassification
- __call__
## FlaxAlbertForMultipleChoice
[[autodoc]] FlaxAlbertForMultipleChoice
- __call__
## FlaxAlbertForTokenClassification
[[autodoc]] FlaxAlbertForTokenClassification
- __call__
## FlaxAlbertForQuestionAnswering
[[autodoc]] FlaxAlbertForQuestionAnswering
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/albert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/albert.md",
"repo_id": "transformers",
"token_count": 3405
} | 267 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Data2Vec
## Overview
The Data2Vec model was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.
Data2Vec proposes a unified framework for self-supervised learning across different data modalities - text, audio and images.
Importantly, predicted targets for pre-training are contextualized latent representations of the inputs, rather than modality-specific, context-independent targets.
The abstract from the paper is the following:
*While the general idea of self-supervised learning is identical across modalities, the actual algorithms and
objectives differ widely because they were developed with a single modality in mind. To get us closer to general
self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech,
NLP or computer vision. The core idea is to predict latent representations of the full input data based on a
masked view of the input in a selfdistillation setup using a standard Transformer architecture.
Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which
are local in nature, data2vec predicts contextualized latent representations that contain information from
the entire input. Experiments on the major benchmarks of speech recognition, image classification, and
natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches.
Models and code are available at www.github.com/pytorch/fairseq/tree/master/examples/data2vec.*
This model was contributed by [edugp](https://huggingface.co/edugp) and [patrickvonplaten](https://huggingface.co/patrickvonplaten).
[sayakpaul](https://github.com/sayakpaul) and [Rocketknight1](https://github.com/Rocketknight1) contributed Data2Vec for vision in TensorFlow.
The original code (for NLP and Speech) can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/data2vec).
The original code for vision can be found [here](https://github.com/facebookresearch/data2vec_vision/tree/main/beit).
## Usage tips
- Data2VecAudio, Data2VecText, and Data2VecVision have all been trained using the same self-supervised learning method.
- For Data2VecAudio, preprocessing is identical to [`Wav2Vec2Model`], including feature extraction
- For Data2VecText, preprocessing is identical to [`RobertaModel`], including tokenization.
- For Data2VecVision, preprocessing is identical to [`BeitModel`], including feature extraction.
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with Data2Vec.
<PipelineTag pipeline="image-classification"/>
- [`Data2VecVisionForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- To fine-tune [`TFData2VecVisionForImageClassification`] on a custom dataset, see [this notebook](https://colab.research.google.com/github/sayakpaul/TF-2.0-Hacks/blob/master/data2vec_vision_image_classification.ipynb).
**Data2VecText documentation resources**
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
**Data2VecAudio documentation resources**
- [Audio classification task guide](../tasks/audio_classification)
- [Automatic speech recognition task guide](../tasks/asr)
**Data2VecVision documentation resources**
- [Image classification](../tasks/image_classification)
- [Semantic segmentation](../tasks/semantic_segmentation)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## Data2VecTextConfig
[[autodoc]] Data2VecTextConfig
## Data2VecAudioConfig
[[autodoc]] Data2VecAudioConfig
## Data2VecVisionConfig
[[autodoc]] Data2VecVisionConfig
<frameworkcontent>
<pt>
## Data2VecAudioModel
[[autodoc]] Data2VecAudioModel
- forward
## Data2VecAudioForAudioFrameClassification
[[autodoc]] Data2VecAudioForAudioFrameClassification
- forward
## Data2VecAudioForCTC
[[autodoc]] Data2VecAudioForCTC
- forward
## Data2VecAudioForSequenceClassification
[[autodoc]] Data2VecAudioForSequenceClassification
- forward
## Data2VecAudioForXVector
[[autodoc]] Data2VecAudioForXVector
- forward
## Data2VecTextModel
[[autodoc]] Data2VecTextModel
- forward
## Data2VecTextForCausalLM
[[autodoc]] Data2VecTextForCausalLM
- forward
## Data2VecTextForMaskedLM
[[autodoc]] Data2VecTextForMaskedLM
- forward
## Data2VecTextForSequenceClassification
[[autodoc]] Data2VecTextForSequenceClassification
- forward
## Data2VecTextForMultipleChoice
[[autodoc]] Data2VecTextForMultipleChoice
- forward
## Data2VecTextForTokenClassification
[[autodoc]] Data2VecTextForTokenClassification
- forward
## Data2VecTextForQuestionAnswering
[[autodoc]] Data2VecTextForQuestionAnswering
- forward
## Data2VecVisionModel
[[autodoc]] Data2VecVisionModel
- forward
## Data2VecVisionForImageClassification
[[autodoc]] Data2VecVisionForImageClassification
- forward
## Data2VecVisionForSemanticSegmentation
[[autodoc]] Data2VecVisionForSemanticSegmentation
- forward
</pt>
<tf>
## TFData2VecVisionModel
[[autodoc]] TFData2VecVisionModel
- call
## TFData2VecVisionForImageClassification
[[autodoc]] TFData2VecVisionForImageClassification
- call
## TFData2VecVisionForSemanticSegmentation
[[autodoc]] TFData2VecVisionForSemanticSegmentation
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/data2vec.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/data2vec.md",
"repo_id": "transformers",
"token_count": 2027
} | 268 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DiT
## Overview
DiT was proposed in [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
DiT applies the self-supervised objective of [BEiT](beit) (BERT pre-training of Image Transformers) to 42 million document images, allowing for state-of-the-art results on tasks including:
- document image classification: the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset (a collection of
400,000 images belonging to one of 16 classes).
- document layout analysis: the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset (a collection of more
than 360,000 document images constructed by automatically parsing PubMed XML files).
- table detection: the [ICDAR 2019 cTDaR](https://github.com/cndplab-founder/ICDAR2019_cTDaR) dataset (a collection of
600 training images and 240 testing images).
The abstract from the paper is the following:
*Image Transformer has recently achieved significant progress for natural image understanding, either using supervised (ViT, DeiT, etc.) or self-supervised (BEiT, MAE, etc.) pre-training techniques. In this paper, we propose DiT, a self-supervised pre-trained Document Image Transformer model using large-scale unlabeled text images for Document AI tasks, which is essential since no supervised counterparts ever exist due to the lack of human labeled document images. We leverage DiT as the backbone network in a variety of vision-based Document AI tasks, including document image classification, document layout analysis, as well as table detection. Experiment results have illustrated that the self-supervised pre-trained DiT model achieves new state-of-the-art results on these downstream tasks, e.g. document image classification (91.11 โ 92.69), document layout analysis (91.0 โ 94.9) and table detection (94.23 โ 96.55). *
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dit_architecture.jpg"
alt="drawing" width="600"/>
<small> Summary of the approach. Taken from the [original paper](https://arxiv.org/abs/2203.02378). </small>
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/dit).
## Usage tips
One can directly use the weights of DiT with the AutoModel API:
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("microsoft/dit-base")
```
This will load the model pre-trained on masked image modeling. Note that this won't include the language modeling head on top, used to predict visual tokens.
To include the head, you can load the weights into a `BeitForMaskedImageModeling` model, like so:
```python
from transformers import BeitForMaskedImageModeling
model = BeitForMaskedImageModeling.from_pretrained("microsoft/dit-base")
```
You can also load a fine-tuned model from the [hub](https://huggingface.co/models?other=dit), like so:
```python
from transformers import AutoModelForImageClassification
model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
```
This particular checkpoint was fine-tuned on [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/), an important benchmark for document image classification.
A notebook that illustrates inference for document image classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DiT/Inference_with_DiT_(Document_Image_Transformer)_for_document_image_classification.ipynb).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with DiT.
<PipelineTag pipeline="image-classification"/>
- [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<Tip>
As DiT's architecture is equivalent to that of BEiT, one can refer to [BEiT's documentation page](beit) for all tips, code examples and notebooks.
</Tip>
| transformers/docs/source/en/model_doc/dit.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/dit.md",
"repo_id": "transformers",
"token_count": 1429
} | 269 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FLAN-UL2
## Overview
Flan-UL2 is an encoder decoder model based on the T5 architecture. It uses the same configuration as the [UL2](ul2) model released earlier last year.
It was fine tuned using the "Flan" prompt tuning and dataset collection. Similar to `Flan-T5`, one can directly use FLAN-UL2 weights without finetuning the model:
According to the original blog here are the notable improvements:
- The original UL2 model was only trained with receptive field of 512, which made it non-ideal for N-shot prompting where N is large.
- The Flan-UL2 checkpoint uses a receptive field of 2048 which makes it more usable for few-shot in-context learning.
- The original UL2 model also had mode switch tokens that was rather mandatory to get good performance. However, they were a little cumbersome as this requires often some changes during inference or finetuning. In this update/change, we continue training UL2 20B for an additional 100k steps (with small batch) to forget โmode tokensโ before applying Flan instruction tuning. This Flan-UL2 checkpoint does not require mode tokens anymore.
Google has released the following variants:
The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints).
## Running on low resource devices
The model is pretty heavy (~40GB in half precision) so if you just want to run the model, make sure you load your model in 8bit, and use `device_map="auto"` to make sure you don't have any OOM issue!
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-ul2", load_in_8bit=True, device_map="auto")
>>> tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
>>> inputs = tokenizer("A step by step recipe to make bolognese pasta:", return_tensors="pt")
>>> outputs = model.generate(**inputs)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
['In a large skillet, brown the ground beef and onion over medium heat. Add the garlic']
```
<Tip>
Refer to [T5's documentation page](t5) for API reference, tips, code examples and notebooks.
</Tip>
| transformers/docs/source/en/model_doc/flan-ul2.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/flan-ul2.md",
"repo_id": "transformers",
"token_count": 785
} | 270 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# GPT-NeoX
## Overview
We introduce GPT-NeoX-20B, a 20 billion parameter autoregressive language model trained on the Pile, whose weights will
be made freely and openly available to the public through a permissive license. It is, to the best of our knowledge,
the largest dense autoregressive model that has publicly available weights at the time of submission. In this work,
we describe GPT-NeoX-20B's architecture and training and evaluate its performance on a range of language-understanding,
mathematics, and knowledge-based tasks. We find that GPT-NeoX-20B is a particularly powerful few-shot reasoner and
gains far more in performance when evaluated five-shot than similarly sized GPT-3 and FairSeq models. We open-source
the training and evaluation code, as well as the model weights, at [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox).
Development of the model was led by Sid Black, Stella Biderman and Eric Hallahan, and the model was trained with
generous the support of [CoreWeave](https://www.coreweave.com/).
GPT-NeoX-20B was trained with fp16, thus it is recommended to initialize the model as follows:
```python
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b").half().cuda()
```
GPT-NeoX-20B also has a different tokenizer from the one used in GPT-J-6B and GPT-Neo. The new tokenizer allocates
additional tokens to whitespace characters, making the model more suitable for certain tasks like code generation.
## Usage example
The `generate()` method can be used to generate text using GPT Neo model.
```python
>>> from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
>>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b")
>>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("EleutherAI/gpt-neox-20b")
>>> prompt = "GPTNeoX20B is a 20B-parameter autoregressive Transformer model developed by EleutherAI."
>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids
>>> gen_tokens = model.generate(
... input_ids,
... do_sample=True,
... temperature=0.9,
... max_length=100,
... )
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]
```
## Using Flash Attention 2
Flash Attention 2 is an faster, optimized version of the model.
### Installation
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
```bash
pip install -U flash-attn --no-build-isolation
```
### Usage
To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference:
```python
>>> from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
...
```
### Expected speedups
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `stockmark/gpt-neox-japanese-1.4b` checkpoint and the Flash Attention 2 version of the model using a sequence length of 2048.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/gpt-neox-1.8b-speedup.jpg">
</div>
## Using Scaled Dot Product Attention (SDPA)
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
page for more information.
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
```python
from transformers import GPTNeoXForCausalLM
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", torch_dtype=torch.float16, attn_implementation="sdpa")
...
```
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
On a local benchmark (rtx3080ti-16GB, PyTorch 2.2.1, OS Ubuntu 22.04) using `float16` with
[pythia-410m-deduped](https://huggingface.co/EleutherAI/pythia-410m-deduped), we saw the
following speedups during training and inference.
### Training
| Batch size | Seq len | Time per batch (Eager - s) | Time per batch (SDPA - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) |
|-----------:|-----------:|---------------------------:|-----------------------------:|------------:|--------------------:|-------------------:|------------------:|
| 1 | 128 | 0.024 | 0.019 | 28.945 | 1789.95 | 1789.95 | 0 |
| 1 | 256 | 0.039 | 0.031 | 23.18 | 1845.83 | 1844.84 | 0.053 |
| 1 | 512 | 0.08 | 0.055 | 45.524 | 2278.38 | 1953.76 | 16.615 |
| 1 | 1024 | 0.19 | 0.102 | 86.777 | 4772.36 | 2408.35 | 98.159 |
| 1 | 2048 | 0.565 | 0.204 | 177.098 | 13484.1 | 3882.01 | 247.348 |
| 2 | 128 | 0.037 | 0.032 | 15.121 | 1843.86 | 1844.78 | -0.05 |
| 2 | 256 | 0.067 | 0.055 | 21.706 | 1999.72 | 1951.67 | 2.462 |
| 2 | 512 | 0.144 | 0.096 | 50.046 | 3613.16 | 2406.77 | 50.125 |
| 2 | 1024 | 0.366 | 0.193 | 89.666 | 8707.55 | 3878.86 | 124.487 |
| 2 | 2048 | OOM | 0.379 | / | OOM | 6825.13 | SDPA does not OOM |
| 4 | 128 | 0.06 | 0.054 | 11.539 | 1947.6 | 1952.06 | -0.228 |
| 4 | 256 | 0.119 | 0.093 | 28.072 | 3008.39 | 2405.99 | 25.038 |
| 4 | 512 | 0.275 | 0.187 | 47.145 | 6290.58 | 3877.29 | 62.242 |
| 4 | 1024 | OOM | 0.36 | / | OOM | 6821.98 | SDPA does not OOM |
| 4 | 2048 | OOM | 0.731 | / | OOM | 12705.1 | SDPA does not OOM |
### Inference
| Batch size | Seq len | Per token latency Eager (ms) | Per token latency SDPA (ms) | Speedup (%) | Mem Eager (MB) | Mem SDPA (MB) | Mem saved (%) |
|--------------:|-------------:|--------------------------------:|-------------------------------:|---------------:|------------------:|----------------:|-----------------:|
| 1 | 128 | 6.569 | 5.858 | 12.14 | 974.831 | 974.826 | 0 |
| 1 | 256 | 7.009 | 5.863 | 19.542 | 1029.01 | 1028.08 | 0.09 |
| 1 | 512 | 7.157 | 5.965 | 19.983 | 1137.54 | 1137.52 | 0.001 |
| 1 | 1024 | 7.523 | 6.506 | 15.637 | 1329.3 | 1329.26 | 0.003 |
| 1 | 2048 | 9.271 | 9.205 | 0.713 | 1752.47 | 1734.51 | 1.036 |
| 2 | 128 | 7.239 | 5.959 | 21.493 | 1044.8 | 1028.37 | 1.597 |
| 2 | 256 | 7.228 | 6.036 | 19.757 | 1167.32 | 1137.73 | 2.601 |
| 2 | 512 | 7.538 | 6.693 | 12.628 | 1352.93 | 1329.55 | 1.758 |
| 2 | 1024 | 8.916 | 8.632 | 3.291 | 1752.56 | 1734.62 | 1.034 |
| 2 | 2048 | 12.628 | 12.606 | 0.181 | 2558.72 | 2545.8 | 0.508 |
| 4 | 128 | 7.278 | 6.046 | 20.373 | 1168.41 | 1137.79 | 2.691 |
| 4 | 256 | 7.614 | 6.588 | 15.574 | 1353.1 | 1329.79 | 1.753 |
| 4 | 512 | 8.798 | 8.144 | 8.028 | 1752.76 | 1734.85 | 1.032 |
| 4 | 1024 | 11.765 | 11.303 | 4.09 | 2558.96 | 2546.04 | 0.508 |
| 4 | 2048 | 19.568 | 17.735 | 10.33 | 4175.5 | 4165.26 | 0.246 |
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## GPTNeoXConfig
[[autodoc]] GPTNeoXConfig
## GPTNeoXTokenizerFast
[[autodoc]] GPTNeoXTokenizerFast
## GPTNeoXModel
[[autodoc]] GPTNeoXModel
- forward
## GPTNeoXForCausalLM
[[autodoc]] GPTNeoXForCausalLM
- forward
## GPTNeoXForQuestionAnswering
[[autodoc]] GPTNeoXForQuestionAnswering
- forward
## GPTNeoXForSequenceClassification
[[autodoc]] GPTNeoXForSequenceClassification
- forward
## GPTNeoXForTokenClassification
[[autodoc]] GPTNeoXForTokenClassification
- forward
| transformers/docs/source/en/model_doc/gpt_neox.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/gpt_neox.md",
"repo_id": "transformers",
"token_count": 6392
} | 271 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LLaVa
## Overview
LLaVa is an open-source chatbot trained by fine-tuning LlamA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. In other words, it is an multi-modal version of LLMs fine-tuned for chat / instructions.
The LLaVa model was proposed in [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) and improved in [Improved Baselines with Visual Instruction Tuning](https://arxiv.org/pdf/2310.03744) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
The abstract from the paper is the following:
*Large multimodal models (LMM) have recently shown encouraging progress with visual instruction tuning. In this note, we show that the fully-connected vision-language cross-modal connector in LLaVA is surprisingly powerful and data-efficient. With simple modifications to LLaVA, namely, using CLIP-ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with simple response formatting prompts, we establish stronger baselines that achieve state-of-the-art across 11 benchmarks. Our final 13B checkpoint uses merely 1.2M publicly available data, and finishes full training in โผ1 day on a single 8-A100 node. We hope this can make state-of-the-art LMM research more accessible. Code and model will be publicly available*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_architecture.jpg"
alt="drawing" width="600"/>
<small> LLaVa architecture. Taken from the <a href="https://arxiv.org/abs/2304.08485">original paper.</a> </small>
This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ) and [ybelkada](https://huggingface.co/ybelkada).
The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main/llava).
## Usage tips
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
- Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results.
- For better results, we recommend users to use the processor's `apply_chat_template()` method to format your prompt correctly. For that you need to construct a conversation history, passing in a plain string will not format your prompt. Each message in the conversation history for chat templates is a dictionary with keys "role" and "content". The "content" should be a list of dictionaries, for "text" and "image" modalities, as follows:
```python
from transformers import AutoProcessor
processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
conversation = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "Whatโs shown in this image?"},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": "This image shows a red stop sign."},]
},
{
"role": "user",
"content": [
{"type": "text", "text": "Describe the image in more details."},
],
},
]
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
print(text_prompt)
>>> "USER: <image>\n<Whatโs shown in this image? ASSISTANT: This image shows a red stop sign.</s>USER: Describe the image in more details. ASSISTANT:"
```
- If you want to construct a chat prompt yourself, below is a list of prompt formats accepted by each llava checkpoint:
[llava-interleave models](https://huggingface.co/collections/llava-hf/llava-interleave-668e19a97da0036aad4a2f19) requires the following format:
```bash
"<|im_start|>user <image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant"
```
For multiple turns conversation:
```bash
"<|im_start|>user <image>\n<prompt1><|im_end|><|im_start|>assistant <answer1><|im_end|><|im_start|>user <image>\n<prompt1><|im_end|><|im_start|>assistant "
```
[llava-1.5 models](https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0) requires the following format:
```bash
"USER: <image>\n<prompt> ASSISTANT:"
```
For multiple turns conversation:
```bash
"USER: <image>\n<prompt1> ASSISTANT: <answer1></s>USER: <prompt2> ASSISTANT: <answer2></s>USER: <prompt3> ASSISTANT:"
```
### Using Flash Attention 2
Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with BEiT.
<PipelineTag pipeline="image-to-text"/>
- A [Google Colab demo](https://colab.research.google.com/drive/1qsl6cd2c8gGtEW1xV5io7S8NHh-Cp1TV?usp=sharing) on how to run Llava on a free-tier Google colab instance leveraging 4-bit inference.
- A [similar notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LLaVa/Inference_with_LLaVa_for_multimodal_generation.ipynb) showcasing batched inference. ๐
## LlavaConfig
[[autodoc]] LlavaConfig
## LlavaProcessor
[[autodoc]] LlavaProcessor
## LlavaForConditionalGeneration
[[autodoc]] LlavaForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/llava.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/llava.md",
"repo_id": "transformers",
"token_count": 1981
} | 272 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MBart and MBart-50
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=mbart">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-mbart-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/mbart-large-50-one-to-many-mmt">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview of MBart
The MBart model was presented in [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan
Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual
corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete
sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only
on the encoder, decoder, or reconstructing parts of the text.
This model was contributed by [valhalla](https://huggingface.co/valhalla). The Authors' code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/mbart)
### Training of MBart
MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the
model is multilingual it expects the sequences in a different format. A special language id token is added in both the
source and target text. The source text format is `X [eos, src_lang_code]` where `X` is the source text. The
target text format is `[tgt_lang_code] X [eos]`. `bos` is never used.
The regular [`~MBartTokenizer.__call__`] will encode source text format passed as first argument or with the `text`
keyword, and target text format passed with the `text_label` keyword argument.
- Supervised training
```python
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
>>> example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_romanian = "ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria"
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
>>> # forward pass
>>> model(**inputs)
```
- Generation
While generating the target text set the `decoder_start_token_id` to the target language id. The following
example shows how to translate English to Romanian using the *facebook/mbart-large-en-ro* model.
```python
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX")
>>> article = "UN Chief Says There Is No Military Solution in Syria"
>>> inputs = tokenizer(article, return_tensors="pt")
>>> translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"])
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
"ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria"
```
## Overview of MBart-50
MBart-50 was introduced in the [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav
Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original *mbart-large-cc25* checkpoint by extendeding
its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50
languages.
According to the abstract
*Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one
direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models
can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on
average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while
improving 9.3 BLEU on average over bilingual baselines from scratch.*
### Training of MBart-50
The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix
for both source and target text i.e the text format is `[lang_code] X [eos]`, where `lang_code` is source
language id for source text and target language id for target text, with `X` being the source or target text
respectively.
MBart-50 has its own tokenizer [`MBart50Tokenizer`].
- Supervised training
```python
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
src_text = " UN Chief Says There Is No Military Solution in Syria"
tgt_text = "ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria"
model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
model(**model_inputs) # forward pass
```
- Generation
To generate using the mBART-50 multilingual translation models, `eos_token_id` is used as the
`decoder_start_token_id` and the target language id is forced as the first generated token. To force the
target language id as the first generated token, pass the *forced_bos_token_id* parameter to the *generate* method.
The following example shows how to translate between Hindi to French and Arabic to English using the
*facebook/mbart-50-large-many-to-many* checkpoint.
```python
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
article_hi = "เคธเคเคฏเฅเคเฅเคค เคฐเคพเคทเฅเคเฅเคฐ เคเฅ เคชเฅเคฐเคฎเฅเค เคเคพ เคเคนเคจเคพ เคนเฅ เคเคฟ เคธเฅเคฐเคฟเคฏเคพ เคฎเฅเค เคเฅเค เคธเฅเคจเฅเคฏ เคธเคฎเคพเคงเคพเคจ เคจเคนเฅเค เคนเฅ"
article_ar = "ุงูุฃู
ูู ุงูุนุงู
ููุฃู
ู
ุงูู
ุชุญุฏุฉ ูููู ุฅูู ูุง ููุฌุฏ ุญู ุนุณูุฑู ูู ุณูุฑูุง."
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
# translate Hindi to French
tokenizer.src_lang = "hi_IN"
encoded_hi = tokenizer(article_hi, return_tensors="pt")
generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"])
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
# => "Le chef de l 'ONU affirme qu 'il n 'y a pas de solution militaire en Syria."
# translate Arabic to English
tokenizer.src_lang = "ar_AR"
encoded_ar = tokenizer(article_ar, return_tensors="pt")
generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
# => "The Secretary-General of the United Nations says there is no military solution in Syria."
```
## Documentation resources
- [Text classification task guide](../tasks/sequence_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
## MBartConfig
[[autodoc]] MBartConfig
## MBartTokenizer
[[autodoc]] MBartTokenizer
- build_inputs_with_special_tokens
## MBartTokenizerFast
[[autodoc]] MBartTokenizerFast
## MBart50Tokenizer
[[autodoc]] MBart50Tokenizer
## MBart50TokenizerFast
[[autodoc]] MBart50TokenizerFast
<frameworkcontent>
<pt>
## MBartModel
[[autodoc]] MBartModel
## MBartForConditionalGeneration
[[autodoc]] MBartForConditionalGeneration
## MBartForQuestionAnswering
[[autodoc]] MBartForQuestionAnswering
## MBartForSequenceClassification
[[autodoc]] MBartForSequenceClassification
## MBartForCausalLM
[[autodoc]] MBartForCausalLM
- forward
</pt>
<tf>
## TFMBartModel
[[autodoc]] TFMBartModel
- call
## TFMBartForConditionalGeneration
[[autodoc]] TFMBartForConditionalGeneration
- call
</tf>
<jax>
## FlaxMBartModel
[[autodoc]] FlaxMBartModel
- __call__
- encode
- decode
## FlaxMBartForConditionalGeneration
[[autodoc]] FlaxMBartForConditionalGeneration
- __call__
- encode
- decode
## FlaxMBartForSequenceClassification
[[autodoc]] FlaxMBartForSequenceClassification
- __call__
- encode
- decode
## FlaxMBartForQuestionAnswering
[[autodoc]] FlaxMBartForQuestionAnswering
- __call__
- encode
- decode
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/mbart.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/mbart.md",
"repo_id": "transformers",
"token_count": 3130
} | 273 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MPT
## Overview
The MPT model was proposed by the [MosaicML](https://www.mosaicml.com/) team and released with multiple sizes and finetuned variants. The MPT models is a series of open source and commercially usable LLMs pre-trained on 1T tokens.
MPT models are GPT-style decoder-only transformers with several improvements: performance-optimized layer implementations, architecture changes that provide greater training stability, and the elimination of context length limits by replacing positional embeddings with ALiBi.
- MPT base: MPT base pre-trained models on next token prediction
- MPT instruct: MPT base models fine-tuned on instruction based tasks
- MPT storywriter: MPT base models fine-tuned for 2500 steps on 65k-token excerpts of fiction books contained in the books3 corpus, this enables the model to handle very long sequences
The original code is available at the [`llm-foundry`](https://github.com/mosaicml/llm-foundry/tree/main) repository.
Read more about it [in the release blogpost](https://www.mosaicml.com/blog/mpt-7b)
## Usage tips
- Learn more about some techniques behind training of the model [in this section of llm-foundry repository](https://github.com/mosaicml/llm-foundry/blob/main/TUTORIAL.md#faqs)
- If you want to use the advanced version of the model (triton kernels, direct flash attention integration), you can still use the original model implementation by adding `trust_remote_code=True` when calling `from_pretrained`.
## Resources
- [Fine-tuning Notebook](https://colab.research.google.com/drive/1HCpQkLL7UXW8xJUJJ29X7QAeNJKO0frZ?usp=sharing) on how to fine-tune MPT-7B on a free Google Colab instance to turn the model into a Chatbot.
## MptConfig
[[autodoc]] MptConfig
- all
## MptModel
[[autodoc]] MptModel
- forward
## MptForCausalLM
[[autodoc]] MptForCausalLM
- forward
## MptForSequenceClassification
[[autodoc]] MptForSequenceClassification
- forward
## MptForTokenClassification
[[autodoc]] MptForTokenClassification
- forward
## MptForQuestionAnswering
[[autodoc]] MptForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/mpt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/mpt.md",
"repo_id": "transformers",
"token_count": 824
} | 274 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# OpenAI GPT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=openai-gpt">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-openai--gpt-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/openai-gpt">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
OpenAI GPT model was proposed in [Improving Language Understanding by Generative Pre-Training](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf)
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It's a causal (unidirectional) transformer
pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus.
The abstract from the paper is the following:
*Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering,
semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant,
labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to
perform adequately. We demonstrate that large gains on these tasks can be realized by generative pretraining of a
language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In
contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve
effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our
approach on a wide range of benchmarks for natural language understanding. Our general task-agnostic model outperforms
discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon
the state of the art in 9 out of the 12 tasks studied.*
[Write With Transformer](https://transformer.huggingface.co/doc/gpt) is a webapp created and hosted by Hugging Face
showcasing the generative capabilities of several models. GPT is one of them.
This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/openai/finetune-transformer-lm).
## Usage tips
- GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
the left.
- GPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be
observed in the *run_generation.py* example script.
Note:
If you want to reproduce the original tokenization process of the *OpenAI GPT* paper, you will need to install `ftfy`
and `SpaCy`:
```bash
pip install spacy ftfy==4.4.3
python -m spacy download en
```
If you don't install `ftfy` and `SpaCy`, the [`OpenAIGPTTokenizer`] will default to tokenize
using BERT's `BasicTokenizer` followed by Byte-Pair Encoding (which should be fine for most usage, don't worry).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with OpenAI GPT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- A blog post on [outperforming OpenAI GPT-3 with SetFit for text-classification](https://www.philschmid.de/getting-started-setfit).
- See also: [Text classification task guide](../tasks/sequence_classification)
<PipelineTag pipeline="text-generation"/>
- A blog on how to [Finetune a non-English GPT-2 Model with Hugging Face](https://www.philschmid.de/fine-tune-a-non-english-gpt-2-model-with-huggingface).
- A blog on [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) with GPT-2.
- A blog on [Training CodeParrot ๐ฆ from Scratch](https://huggingface.co/blog/codeparrot), a large GPT-2 model.
- A blog on [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) with GPT-2.
- A blog on [How to train a Language Model with Megatron-LM](https://huggingface.co/blog/megatron-training) with a GPT-2 model.
- A notebook on how to [finetune GPT2 to generate lyrics in the style of your favorite artist](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb). ๐
- A notebook on how to [finetune GPT2 to generate tweets in the style of your favorite Twitter user](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb). ๐
- [Causal language modeling](https://huggingface.co/course/en/chapter7/6?fw=pt#training-a-causal-language-model-from-scratch) chapter of the ๐ค Hugging Face Course.
- [`OpenAIGPTLMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling), [text generation example script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-generation/run_generation.py) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
- [`TFOpenAIGPTLMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_clmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
- See also: [Causal language modeling task guide](../tasks/language_modeling)
<PipelineTag pipeline="token-classification"/>
- A course material on [Byte-Pair Encoding tokenization](https://huggingface.co/course/en/chapter6/5).
## OpenAIGPTConfig
[[autodoc]] OpenAIGPTConfig
## OpenAIGPTTokenizer
[[autodoc]] OpenAIGPTTokenizer
- save_vocabulary
## OpenAIGPTTokenizerFast
[[autodoc]] OpenAIGPTTokenizerFast
## OpenAI specific outputs
[[autodoc]] models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput
[[autodoc]] models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput
<frameworkcontent>
<pt>
## OpenAIGPTModel
[[autodoc]] OpenAIGPTModel
- forward
## OpenAIGPTLMHeadModel
[[autodoc]] OpenAIGPTLMHeadModel
- forward
## OpenAIGPTDoubleHeadsModel
[[autodoc]] OpenAIGPTDoubleHeadsModel
- forward
## OpenAIGPTForSequenceClassification
[[autodoc]] OpenAIGPTForSequenceClassification
- forward
</pt>
<tf>
## TFOpenAIGPTModel
[[autodoc]] TFOpenAIGPTModel
- call
## TFOpenAIGPTLMHeadModel
[[autodoc]] TFOpenAIGPTLMHeadModel
- call
## TFOpenAIGPTDoubleHeadsModel
[[autodoc]] TFOpenAIGPTDoubleHeadsModel
- call
## TFOpenAIGPTForSequenceClassification
[[autodoc]] TFOpenAIGPTForSequenceClassification
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/openai-gpt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/openai-gpt.md",
"repo_id": "transformers",
"token_count": 2422
} | 275 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ResNet
## Overview
The ResNet model was proposed in [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. Our implementation follows the small changes made by [Nvidia](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/resnet_50_v1_5_for_pytorch), we apply the `stride=2` for downsampling in bottleneck's `3x3` conv and not in the first `1x1`. This is generally known as "ResNet v1.5".
ResNet introduced residual connections, they allow to train networks with an unseen number of layers (up to 1000). ResNet won the 2015 ILSVRC & COCO competition, one important milestone in deep computer vision.
The abstract from the paper is the following:
*Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.
The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.*
The figure below illustrates the architecture of ResNet. Taken from the [original paper](https://arxiv.org/abs/1512.03385).
<img width="600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/resnet_architecture.png"/>
This model was contributed by [Francesco](https://huggingface.co/Francesco). The TensorFlow version of this model was added by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://github.com/KaimingHe/deep-residual-networks).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with ResNet.
<PipelineTag pipeline="image-classification"/>
- [`ResNetForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- See also: [Image classification task guide](../tasks/image_classification)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## ResNetConfig
[[autodoc]] ResNetConfig
<frameworkcontent>
<pt>
## ResNetModel
[[autodoc]] ResNetModel
- forward
## ResNetForImageClassification
[[autodoc]] ResNetForImageClassification
- forward
</pt>
<tf>
## TFResNetModel
[[autodoc]] TFResNetModel
- call
## TFResNetForImageClassification
[[autodoc]] TFResNetForImageClassification
- call
</tf>
<jax>
## FlaxResNetModel
[[autodoc]] FlaxResNetModel
- __call__
## FlaxResNetForImageClassification
[[autodoc]] FlaxResNetForImageClassification
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/resnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/resnet.md",
"repo_id": "transformers",
"token_count": 1253
} | 276 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Speech Encoder Decoder Models
The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-to-text model
with any pretrained speech autoencoding model as the encoder (*e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert)) and any pretrained autoregressive model as the decoder.
The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech
recognition and speech translation has *e.g.* been shown in [Large-Scale Self- and Semi-Supervised Learning for Speech
Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli,
Alexis Conneau.
An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in [Speech2Text2](speech_to_text_2).
## Randomly initializing `SpeechEncoderDecoderModel` from model configurations.
[`SpeechEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`Wav2Vec2Model`] configuration for the encoder
and the default [`BertForCausalLM`] configuration for the decoder.
```python
>>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
>>> config_encoder = Wav2Vec2Config()
>>> config_decoder = BertConfig()
>>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> model = SpeechEncoderDecoderModel(config=config)
```
## Initialising `SpeechEncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
[`SpeechEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based speech model, *e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert) can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
Initializing [`SpeechEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
To do so, the `SpeechEncoderDecoderModel` class provides a [`SpeechEncoderDecoderModel.from_encoder_decoder_pretrained`] method.
```python
>>> from transformers import SpeechEncoderDecoderModel
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
... "facebook/hubert-large-ll60k", "google-bert/bert-base-uncased"
... )
```
## Loading an existing `SpeechEncoderDecoderModel` checkpoint and perform inference.
To load fine-tuned checkpoints of the `SpeechEncoderDecoderModel` class, [`SpeechEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
```python
>>> from transformers import Wav2Vec2Processor, SpeechEncoderDecoderModel
>>> from datasets import load_dataset
>>> import torch
>>> # load a fine-tuned speech translation model and corresponding processor
>>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> # let's perform inference on a piece of English speech (which we'll translate to German)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
>>> # autoregressively generate transcription (uses greedy decoding by default)
>>> generated_ids = model.generate(input_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heiรen zu kรถnnen.
```
## Training
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (speech, text) pairs.
As you can see, only 2 inputs are required for the model in order to compute a loss: `input_values` (which are the
speech inputs) and `labels` (which are the `input_ids` of the encoded target sequence).
```python
>>> from transformers import AutoTokenizer, AutoFeatureExtractor, SpeechEncoderDecoderModel
>>> from datasets import load_dataset
>>> encoder_id = "facebook/wav2vec2-base-960h" # acoustic model encoder
>>> decoder_id = "google-bert/bert-base-uncased" # text decoder
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
>>> tokenizer = AutoTokenizer.from_pretrained(decoder_id)
>>> # Combine pre-trained encoder and pre-trained decoder to form a Seq2Seq model
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id)
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> # load an audio input and pre-process (normalise mean/std to 0/1)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values
>>> # load its corresponding transcription and tokenize to generate labels
>>> labels = tokenizer(ds[0]["text"], return_tensors="pt").input_ids
>>> # the forward function automatically creates the correct decoder_input_ids
>>> loss = model(input_values=input_values, labels=labels).loss
>>> loss.backward()
```
## SpeechEncoderDecoderConfig
[[autodoc]] SpeechEncoderDecoderConfig
## SpeechEncoderDecoderModel
[[autodoc]] SpeechEncoderDecoderModel
- forward
- from_encoder_decoder_pretrained
## FlaxSpeechEncoderDecoderModel
[[autodoc]] FlaxSpeechEncoderDecoderModel
- __call__
- from_encoder_decoder_pretrained
| transformers/docs/source/en/model_doc/speech-encoder-decoder.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/speech-encoder-decoder.md",
"repo_id": "transformers",
"token_count": 2092
} | 277 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Table Transformer
## Overview
The Table Transformer model was proposed in [PubTables-1M: Towards comprehensive table extraction from unstructured documents](https://arxiv.org/abs/2110.00061) by
Brandon Smock, Rohith Pesala, Robin Abraham. The authors introduce a new dataset, PubTables-1M, to benchmark progress in table extraction from unstructured documents,
as well as table structure recognition and functional analysis. The authors train 2 [DETR](detr) models, one for table detection and one for table structure recognition, dubbed Table Transformers.
The abstract from the paper is the following:
*Recently, significant progress has been made applying machine learning to the problem of table structure inference and extraction from unstructured documents.
However, one of the greatest challenges remains the creation of datasets with complete, unambiguous ground truth at scale. To address this, we develop a new, more
comprehensive dataset for table extraction, called PubTables-1M. PubTables-1M contains nearly one million tables from scientific articles, supports multiple input
modalities, and contains detailed header and location information for table structures, making it useful for a wide variety of modeling approaches. It also addresses a significant
source of ground truth inconsistency observed in prior datasets called oversegmentation, using a novel canonicalization procedure. We demonstrate that these improvements lead to a
significant increase in training performance and a more reliable estimate of model performance at evaluation for table structure recognition. Further, we show that transformer-based
object detection models trained on PubTables-1M produce excellent results for all three tasks of detection, structure recognition, and functional analysis without the need for any
special customization for these tasks.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/table_transformer_architecture.jpeg"
alt="drawing" width="600"/>
<small> Table detection and table structure recognition clarified. Taken from the <a href="https://arxiv.org/abs/2110.00061">original paper</a>. </small>
The authors released 2 models, one for [table detection](https://huggingface.co/microsoft/table-transformer-detection) in
documents, one for [table structure recognition](https://huggingface.co/microsoft/table-transformer-structure-recognition)
(the task of recognizing the individual rows, columns etc. in a table).
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be
found [here](https://github.com/microsoft/table-transformer).
## Resources
<PipelineTag pipeline="object-detection"/>
- A demo notebook for the Table Transformer can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Table%20Transformer).
- It turns out padding of images is quite important for detection. An interesting Github thread with replies from the authors can be found [here](https://github.com/microsoft/table-transformer/issues/68).
## TableTransformerConfig
[[autodoc]] TableTransformerConfig
## TableTransformerModel
[[autodoc]] TableTransformerModel
- forward
## TableTransformerForObjectDetection
[[autodoc]] TableTransformerForObjectDetection
- forward
| transformers/docs/source/en/model_doc/table-transformer.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/table-transformer.md",
"repo_id": "transformers",
"token_count": 978
} | 278 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# UPerNet
## Overview
The UPerNet model was proposed in [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)
by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. UPerNet is a general framework to effectively segment
a wide range of concepts from images, leveraging any vision backbone like [ConvNeXt](convnext) or [Swin](swin).
The abstract from the paper is the following:
*Humans recognize the visual world at multiple levels: we effortlessly categorize scenes and detect objects inside, while also identifying the textures and surfaces of the objects along with their different compositional parts. In this paper, we study a new task called Unified Perceptual Parsing, which requires the machine vision systems to recognize as many visual concepts as possible from a given image. A multi-task framework called UPerNet and a training strategy are developed to learn from heterogeneous image annotations. We benchmark our framework on Unified Perceptual Parsing and show that it is able to effectively segment a wide range of concepts from images. The trained networks are further applied to discover visual knowledge in natural scenes.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/upernet_architecture.jpg"
alt="drawing" width="600"/>
<small> UPerNet framework. Taken from the <a href="https://arxiv.org/abs/1807.10221">original paper</a>. </small>
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code is based on OpenMMLab's mmsegmentation [here](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/uper_head.py).
## Usage examples
UPerNet is a general framework for semantic segmentation. It can be used with any vision backbone, like so:
```py
from transformers import SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
backbone_config = SwinConfig(out_features=["stage1", "stage2", "stage3", "stage4"])
config = UperNetConfig(backbone_config=backbone_config)
model = UperNetForSemanticSegmentation(config)
```
To use another vision backbone, like [ConvNeXt](convnext), simply instantiate the model with the appropriate backbone:
```py
from transformers import ConvNextConfig, UperNetConfig, UperNetForSemanticSegmentation
backbone_config = ConvNextConfig(out_features=["stage1", "stage2", "stage3", "stage4"])
config = UperNetConfig(backbone_config=backbone_config)
model = UperNetForSemanticSegmentation(config)
```
Note that this will randomly initialize all the weights of the model.
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with UPerNet.
- Demo notebooks for UPerNet can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UPerNet).
- [`UperNetForSemanticSegmentation`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/semantic-segmentation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb).
- See also: [Semantic segmentation task guide](../tasks/semantic_segmentation)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## UperNetConfig
[[autodoc]] UperNetConfig
## UperNetForSemanticSegmentation
[[autodoc]] UperNetForSemanticSegmentation
- forward | transformers/docs/source/en/model_doc/upernet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/upernet.md",
"repo_id": "transformers",
"token_count": 1188
} | 279 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Video Vision Transformer (ViViT)
## Overview
The Vivit model was proposed in [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Luฤiฤ, Cordelia Schmid.
The paper proposes one of the first successful pure-transformer based set of models for video understanding.
The abstract from the paper is the following:
*We present pure-transformer based models for video classification, drawing upon the recent success of such models in image classification. Our model extracts spatio-temporal tokens from the input video, which are then encoded by a series of transformer layers. In order to handle the long sequences of tokens encountered in video, we propose several, efficient variants of our model which factorise the spatial- and temporal-dimensions of the input. Although transformer-based models are known to only be effective when large training datasets are available, we show how we can effectively regularise the model during training and leverage pretrained image models to be able to train on comparatively small datasets. We conduct thorough ablation studies, and achieve state-of-the-art results on multiple video classification benchmarks including Kinetics 400 and 600, Epic Kitchens, Something-Something v2 and Moments in Time, outperforming prior methods based on deep 3D convolutional networks.*
This model was contributed by [jegormeister](https://huggingface.co/jegormeister). The original code (written in JAX) can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/vivit).
## VivitConfig
[[autodoc]] VivitConfig
## VivitImageProcessor
[[autodoc]] VivitImageProcessor
- preprocess
## VivitModel
[[autodoc]] VivitModel
- forward
## VivitForVideoClassification
[[autodoc]] transformers.VivitForVideoClassification
- forward
| transformers/docs/source/en/model_doc/vivit.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/vivit.md",
"repo_id": "transformers",
"token_count": 618
} | 280 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# XLSR-Wav2Vec2
## Overview
The XLSR-Wav2Vec2 model was proposed in [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael
Auli.
The abstract from the paper is the following:
*This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw
waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over
masked latent speech representations and jointly learns a quantization of the latents shared across languages. The
resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly
outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction
of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to
a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong
individual models. Analysis shows that the latent discrete speech representations are shared across languages with
increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing
XLSR-53, a large model pretrained in 53 languages.*
The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec).
Note: Meta (FAIR) released a new version of [Wav2Vec2-BERT 2.0](https://huggingface.co/docs/transformers/en/model_doc/wav2vec2-bert) - it's pretrained on 4.5M hours of audio. We especially recommend using it for fine-tuning tasks, e.g. as per [this guide](https://huggingface.co/blog/fine-tune-w2v2-bert).
## Usage tips
- XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal.
- XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be
decoded using [`Wav2Vec2CTCTokenizer`].
<Tip>
XLSR-Wav2Vec2's architecture is based on the Wav2Vec2 model, so one can refer to [Wav2Vec2's documentation page](wav2vec2).
</Tip>
| transformers/docs/source/en/model_doc/xlsr_wav2vec2.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/xlsr_wav2vec2.md",
"repo_id": "transformers",
"token_count": 813
} | 281 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Efficient Training on CPU
This guide focuses on training large models efficiently on CPU.
## Mixed precision with IPEX
Mixed precision uses single (fp32) and half-precision (bf16/fp16) data types in a model to accelerate training or inference while still preserving much of the single-precision accuracy. Modern CPUs such as 3rd and 4th Gen Intelยฎ Xeonยฎ Scalable processors natively support bf16, so you should get more performance out of the box by enabling mixed precision training with bf16.
To further maximize training performance, you can use Intelยฎ Extension for PyTorch (IPEX), which is a library built on PyTorch and adds additional CPU instruction level architecture (ISA) level support such as Intelยฎ Advanced Vector Extensions 512 Vector Neural Network Instructions (Intelยฎ AVX512-VNNI), and Intelยฎ Advanced Matrix Extensions (Intelยฎ AMX) for an extra performance boost on Intel CPUs. However, CPUs with only AVX2 (e.g., AMD or older Intel CPUs) are not guaranteed to have better performance under IPEX.
Auto Mixed Precision (AMP) for CPU backends has been enabled since PyTorch 1.10. AMP support for bf16 on CPUs and bf16 operator optimization is also supported in IPEX and partially upstreamed to the main PyTorch branch. You can get better performance and user experience with IPEX AMP.
Check more detailed information for [Auto Mixed Precision](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/amp.html).
### IPEX installation:
IPEX release is following PyTorch, to install via pip:
| PyTorch Version | IPEX version |
| :---------------: | :----------: |
| 2.1.x | 2.1.100+cpu |
| 2.0.x | 2.0.100+cpu |
| 1.13 | 1.13.0+cpu |
| 1.12 | 1.12.300+cpu |
Please run `pip list | grep torch` to get your `pytorch_version`, so you can get the `IPEX version_name`.
```bash
pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
```
You can check the latest versions in [ipex-whl-stable-cpu](https://developer.intel.com/ipex-whl-stable-cpu) if needed.
Check more approaches for [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html).
### Usage in Trainer
To enable auto mixed precision with IPEX in Trainer, users should add `use_ipex`, `bf16` and `no_cuda` in training command arguments.
Take an example of the use cases on [Transformers question-answering](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)
- Training with IPEX using BF16 auto mixed precision on CPU:
<pre> python run_qa.py \
--model_name_or_path google-bert/bert-base-uncased \
--dataset_name squad \
--do_train \
--do_eval \
--per_device_train_batch_size 12 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir /tmp/debug_squad/ \
<b>--use_ipex</b> \
<b>--bf16</b> \
<b>--use_cpu</b></pre>
If you want to enable `use_ipex` and `bf16` in your script, add these parameters to `TrainingArguments` like this:
```diff
training_args = TrainingArguments(
output_dir=args.output_path,
+ bf16=True,
+ use_ipex=True,
+ use_cpu=True,
**kwargs
)
```
### Practice example
Blog: [Accelerating PyTorch Transformers with Intel Sapphire Rapids](https://huggingface.co/blog/intel-sapphire-rapids)
| transformers/docs/source/en/perf_train_cpu.md/0 | {
"file_path": "transformers/docs/source/en/perf_train_cpu.md",
"repo_id": "transformers",
"token_count": 1270
} | 282 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Image Segmentation
[[open-in-colab]]
<Youtube id="dKE8SIt9C-w"/>
Image segmentation models separate areas corresponding to different areas of interest in an image. These models work by assigning a label to each pixel. There are several types of segmentation: semantic segmentation, instance segmentation, and panoptic segmentation.
In this guide, we will:
1. [Take a look at different types of segmentation](#types-of-segmentation).
2. [Have an end-to-end fine-tuning example for semantic segmentation](#fine-tuning-a-model-for-segmentation).
Before you begin, make sure you have all the necessary libraries installed:
```py
# uncomment to install the necessary libraries
!pip install -q datasets transformers evaluate accelerate
```
We encourage you to log in to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to log in:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Types of Segmentation
Semantic segmentation assigns a label or class to every single pixel in an image. Let's take a look at a semantic segmentation model output. It will assign the same class to every instance of an object it comes across in an image, for example, all cats will be labeled as "cat" instead of "cat-1", "cat-2".
We can use transformers' image segmentation pipeline to quickly infer a semantic segmentation model. Let's take a look at the example image.
```python
from transformers import pipeline
from PIL import Image
import requests
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/segmentation_input.jpg"
image = Image.open(requests.get(url, stream=True).raw)
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/segmentation_input.jpg" alt="Segmentation Input"/>
</div>
We will use [nvidia/segformer-b1-finetuned-cityscapes-1024-1024](https://huggingface.co/nvidia/segformer-b1-finetuned-cityscapes-1024-1024).
```python
semantic_segmentation = pipeline("image-segmentation", "nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
results = semantic_segmentation(image)
results
```
The segmentation pipeline output includes a mask for every predicted class.
```bash
[{'score': None,
'label': 'road',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'sidewalk',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'building',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'wall',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'pole',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'traffic sign',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'vegetation',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'terrain',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'sky',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': None,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>}]
```
Taking a look at the mask for the car class, we can see every car is classified with the same mask.
```python
results[-1]["mask"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/semantic_segmentation_output.png" alt="Semantic Segmentation Output"/>
</div>
In instance segmentation, the goal is not to classify every pixel, but to predict a mask for **every instance of an object** in a given image. It works very similar to object detection, where there is a bounding box for every instance, there's a segmentation mask instead. We will use [facebook/mask2former-swin-large-cityscapes-instance](https://huggingface.co/facebook/mask2former-swin-large-cityscapes-instance) for this.
```python
instance_segmentation = pipeline("image-segmentation", "facebook/mask2former-swin-large-cityscapes-instance")
results = instance_segmentation(image)
results
```
As you can see below, there are multiple cars classified, and there's no classification for pixels other than pixels that belong to car and person instances.
```bash
[{'score': 0.999944,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999945,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999652,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.903529,
'label': 'person',
'mask': <PIL.Image.Image image mode=L size=612x415>}]
```
Checking out one of the car masks below.
```python
results[2]["mask"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/instance_segmentation_output.png" alt="Semantic Segmentation Output"/>
</div>
Panoptic segmentation combines semantic segmentation and instance segmentation, where every pixel is classified into a class and an instance of that class, and there are multiple masks for each instance of a class. We can use [facebook/mask2former-swin-large-cityscapes-panoptic](https://huggingface.co/facebook/mask2former-swin-large-cityscapes-panoptic) for this.
```python
panoptic_segmentation = pipeline("image-segmentation", "facebook/mask2former-swin-large-cityscapes-panoptic")
results = panoptic_segmentation(image)
results
```
As you can see below, we have more classes. We will later illustrate to see that every pixel is classified into one of the classes.
```bash
[{'score': 0.999981,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999958,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.99997,
'label': 'vegetation',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999575,
'label': 'pole',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999958,
'label': 'building',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999634,
'label': 'road',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.996092,
'label': 'sidewalk',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.999221,
'label': 'car',
'mask': <PIL.Image.Image image mode=L size=612x415>},
{'score': 0.99987,
'label': 'sky',
'mask': <PIL.Image.Image image mode=L size=612x415>}]
```
Let's have a side by side comparison for all types of segmentation.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/segmentation-comparison.png" alt="Segmentation Maps Compared"/>
</div>
Seeing all types of segmentation, let's have a deep dive on fine-tuning a model for semantic segmentation.
Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery.
## Fine-tuning a Model for Segmentation
We will now:
1. Finetune [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) on the [SceneParse150](https://huggingface.co/datasets/scene_parse_150) dataset.
2. Use your fine-tuned model for inference.
<Tip>
To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/image-segmentation)
</Tip>
### Load SceneParse150 dataset
Start by loading a smaller subset of the SceneParse150 dataset from the ๐ค Datasets library. This'll give you a chance to experiment and make sure everything works before spending more time training on the full dataset.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("scene_parse_150", split="train[:50]")
```
Split the dataset's `train` split into a train and test set with the [`~datasets.Dataset.train_test_split`] method:
```py
>>> ds = ds.train_test_split(test_size=0.2)
>>> train_ds = ds["train"]
>>> test_ds = ds["test"]
```
Then take a look at an example:
```py
>>> train_ds[0]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at 0x7F9B0C201F90>,
'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at 0x7F9B0C201DD0>,
'scene_category': 368}
# view the image
>>> train_ds[0]["image"]
```
- `image`: a PIL image of the scene.
- `annotation`: a PIL image of the segmentation map, which is also the model's target.
- `scene_category`: a category id that describes the image scene like "kitchen" or "office". In this guide, you'll only need `image` and `annotation`, both of which are PIL images.
You'll also want to create a dictionary that maps a label id to a label class which will be useful when you set up the model later. Download the mappings from the Hub and create the `id2label` and `label2id` dictionaries:
```py
>>> import json
>>> from pathlib import Path
>>> from huggingface_hub import hf_hub_download
>>> repo_id = "huggingface/label-files"
>>> filename = "ade20k-id2label.json"
>>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text())
>>> id2label = {int(k): v for k, v in id2label.items()}
>>> label2id = {v: k for k, v in id2label.items()}
>>> num_labels = len(id2label)
```
#### Custom dataset
You could also create and use your own dataset if you prefer to train with the [run_semantic_segmentation.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py) script instead of a notebook instance. The script requires:
1. a [`~datasets.DatasetDict`] with two [`~datasets.Image`] columns, "image" and "label"
```py
from datasets import Dataset, DatasetDict, Image
image_paths_train = ["path/to/image_1.jpg/jpg", "path/to/image_2.jpg/jpg", ..., "path/to/image_n.jpg/jpg"]
label_paths_train = ["path/to/annotation_1.png", "path/to/annotation_2.png", ..., "path/to/annotation_n.png"]
image_paths_validation = [...]
label_paths_validation = [...]
def create_dataset(image_paths, label_paths):
dataset = Dataset.from_dict({"image": sorted(image_paths),
"label": sorted(label_paths)})
dataset = dataset.cast_column("image", Image())
dataset = dataset.cast_column("label", Image())
return dataset
# step 1: create Dataset objects
train_dataset = create_dataset(image_paths_train, label_paths_train)
validation_dataset = create_dataset(image_paths_validation, label_paths_validation)
# step 2: create DatasetDict
dataset = DatasetDict({
"train": train_dataset,
"validation": validation_dataset,
}
)
# step 3: push to Hub (assumes you have ran the huggingface-cli login command in a terminal/notebook)
dataset.push_to_hub("your-name/dataset-repo")
# optionally, you can push to a private repo on the Hub
# dataset.push_to_hub("name of repo on the hub", private=True)
```
2. an id2label dictionary mapping the class integers to their class names
```py
import json
# simple example
id2label = {0: 'cat', 1: 'dog'}
with open('id2label.json', 'w') as fp:
json.dump(id2label, fp)
```
As an example, take a look at this [example dataset](https://huggingface.co/datasets/nielsr/ade20k-demo) which was created with the steps shown above.
### Preprocess
The next step is to load a SegFormer image processor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn't actually included in the 150 classes, so you'll need to set `do_reduce_labels=True` to subtract one from all the labels. The zero-index is replaced by `255` so it's ignored by SegFormer's loss function:
```py
>>> from transformers import AutoImageProcessor
>>> checkpoint = "nvidia/mit-b0"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)
```
<frameworkcontent>
<pt>
It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting. In this guide, you'll use the [`ColorJitter`](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html) function from [torchvision](https://pytorch.org/vision/stable/index.html) to randomly change the color properties of an image, but you can also use any image library you like.
```py
>>> from torchvision.transforms import ColorJitter
>>> jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
```
Now create two preprocessing functions to prepare the images and annotations for the model. These functions convert the images into `pixel_values` and annotations to `labels`. For the training set, `jitter` is applied before providing the images to the image processor. For the test set, the image processor crops and normalizes the `images`, and only crops the `labels` because no data augmentation is applied during testing.
```py
>>> def train_transforms(example_batch):
... images = [jitter(x) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
>>> def val_transforms(example_batch):
... images = [x for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
```
To apply the `jitter` over the entire dataset, use the ๐ค Datasets [`~datasets.Dataset.set_transform`] function. The transform is applied on the fly which is faster and consumes less disk space:
```py
>>> train_ds.set_transform(train_transforms)
>>> test_ds.set_transform(val_transforms)
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting.
In this guide, you'll use [`tf.image`](https://www.tensorflow.org/api_docs/python/tf/image) to randomly change the color properties of an image, but you can also use any image
library you like.
Define two separate transformation functions:
- training data transformations that include image augmentation
- validation data transformations that only transpose the images, since computer vision models in ๐ค Transformers expect channels-first layout
```py
>>> import tensorflow as tf
>>> def aug_transforms(image):
... image = tf.keras.utils.img_to_array(image)
... image = tf.image.random_brightness(image, 0.25)
... image = tf.image.random_contrast(image, 0.5, 2.0)
... image = tf.image.random_saturation(image, 0.75, 1.25)
... image = tf.image.random_hue(image, 0.1)
... image = tf.transpose(image, (2, 0, 1))
... return image
>>> def transforms(image):
... image = tf.keras.utils.img_to_array(image)
... image = tf.transpose(image, (2, 0, 1))
... return image
```
Next, create two preprocessing functions to prepare batches of images and annotations for the model. These functions apply
the image transformations and use the earlier loaded `image_processor` to convert the images into `pixel_values` and
annotations to `labels`. `ImageProcessor` also takes care of resizing and normalizing the images.
```py
>>> def train_transforms(example_batch):
... images = [aug_transforms(x.convert("RGB")) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
>>> def val_transforms(example_batch):
... images = [transforms(x.convert("RGB")) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
```
To apply the preprocessing transformations over the entire dataset, use the ๐ค Datasets [`~datasets.Dataset.set_transform`] function.
The transform is applied on the fly which is faster and consumes less disk space:
```py
>>> train_ds.set_transform(train_transforms)
>>> test_ds.set_transform(val_transforms)
```
</tf>
</frameworkcontent>
### Evaluate
Including a metric during training is often helpful for evaluating your model's performance. You can quickly load an evaluation method with the ๐ค [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/accuracy) (IoU) metric (see the ๐ค Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):
```py
>>> import evaluate
>>> metric = evaluate.load("mean_iou")
```
Then create a function to [`~evaluate.EvaluationModule.compute`] the metrics. Your predictions need to be converted to
logits first, and then reshaped to match the size of the labels before you can call [`~evaluate.EvaluationModule.compute`]:
<frameworkcontent>
<pt>
```py
>>> import numpy as np
>>> import torch
>>> from torch import nn
>>> def compute_metrics(eval_pred):
... with torch.no_grad():
... logits, labels = eval_pred
... logits_tensor = torch.from_numpy(logits)
... logits_tensor = nn.functional.interpolate(
... logits_tensor,
... size=labels.shape[-2:],
... mode="bilinear",
... align_corners=False,
... ).argmax(dim=1)
... pred_labels = logits_tensor.detach().cpu().numpy()
... metrics = metric.compute(
... predictions=pred_labels,
... references=labels,
... num_labels=num_labels,
... ignore_index=255,
... reduce_labels=False,
... )
... for key, value in metrics.items():
... if isinstance(value, np.ndarray):
... metrics[key] = value.tolist()
... return metrics
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
```py
>>> def compute_metrics(eval_pred):
... logits, labels = eval_pred
... logits = tf.transpose(logits, perm=[0, 2, 3, 1])
... logits_resized = tf.image.resize(
... logits,
... size=tf.shape(labels)[1:],
... method="bilinear",
... )
... pred_labels = tf.argmax(logits_resized, axis=-1)
... metrics = metric.compute(
... predictions=pred_labels,
... references=labels,
... num_labels=num_labels,
... ignore_index=-1,
... reduce_labels=image_processor.do_reduce_labels,
... )
... per_category_accuracy = metrics.pop("per_category_accuracy").tolist()
... per_category_iou = metrics.pop("per_category_iou").tolist()
... metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)})
... metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)})
... return {"val_" + k: v for k, v in metrics.items()}
```
</tf>
</frameworkcontent>
Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training.
### Train
<frameworkcontent>
<pt>
<Tip>
If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#finetune-with-trainer)!
</Tip>
You're ready to start training your model now! Load SegFormer with [`AutoModelForSemanticSegmentation`], and pass the model the mapping between label ids and label classes:
```py
>>> from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer
>>> model = AutoModelForSemanticSegmentation.from_pretrained(checkpoint, id2label=id2label, label2id=label2id)
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`TrainingArguments`]. It is important you don't remove unused columns because this'll drop the `image` column. Without the `image` column, you can't create `pixel_values`. Set `remove_unused_columns=False` to prevent this behavior! The only other required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the IoU metric and save the training checkpoint.
2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> training_args = TrainingArguments(
... output_dir="segformer-b0-scene-parse-150",
... learning_rate=6e-5,
... num_train_epochs=50,
... per_device_train_batch_size=2,
... per_device_eval_batch_size=2,
... save_total_limit=3,
... eval_strategy="steps",
... save_strategy="steps",
... save_steps=20,
... eval_steps=20,
... logging_steps=1,
... eval_accumulation_steps=5,
... remove_unused_columns=False,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=train_ds,
... eval_dataset=test_ds,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model:
```py
>>> trainer.push_to_hub()
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
<Tip>
If you are unfamiliar with fine-tuning a model with Keras, check out the [basic tutorial](./training#train-a-tensorflow-model-with-keras) first!
</Tip>
To fine-tune a model in TensorFlow, follow these steps:
1. Define the training hyperparameters, and set up an optimizer and a learning rate schedule.
2. Instantiate a pretrained model.
3. Convert a ๐ค Dataset to a `tf.data.Dataset`.
4. Compile your model.
5. Add callbacks to calculate metrics and upload your model to ๐ค Hub
6. Use the `fit()` method to run the training.
Start by defining the hyperparameters, optimizer and learning rate schedule:
```py
>>> from transformers import create_optimizer
>>> batch_size = 2
>>> num_epochs = 50
>>> num_train_steps = len(train_ds) * num_epochs
>>> learning_rate = 6e-5
>>> weight_decay_rate = 0.01
>>> optimizer, lr_schedule = create_optimizer(
... init_lr=learning_rate,
... num_train_steps=num_train_steps,
... weight_decay_rate=weight_decay_rate,
... num_warmup_steps=0,
... )
```
Then, load SegFormer with [`TFAutoModelForSemanticSegmentation`] along with the label mappings, and compile it with the
optimizer. Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to:
```py
>>> from transformers import TFAutoModelForSemanticSegmentation
>>> model = TFAutoModelForSemanticSegmentation.from_pretrained(
... checkpoint,
... id2label=id2label,
... label2id=label2id,
... )
>>> model.compile(optimizer=optimizer) # No loss argument!
```
Convert your datasets to the `tf.data.Dataset` format using the [`~datasets.Dataset.to_tf_dataset`] and the [`DefaultDataCollator`]:
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator(return_tensors="tf")
>>> tf_train_dataset = train_ds.to_tf_dataset(
... columns=["pixel_values", "label"],
... shuffle=True,
... batch_size=batch_size,
... collate_fn=data_collator,
... )
>>> tf_eval_dataset = test_ds.to_tf_dataset(
... columns=["pixel_values", "label"],
... shuffle=True,
... batch_size=batch_size,
... collate_fn=data_collator,
... )
```
To compute the accuracy from the predictions and push your model to the ๐ค Hub, use [Keras callbacks](../main_classes/keras_callbacks).
Pass your `compute_metrics` function to [`KerasMetricCallback`],
and use the [`PushToHubCallback`] to upload the model:
```py
>>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback
>>> metric_callback = KerasMetricCallback(
... metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, batch_size=batch_size, label_cols=["labels"]
... )
>>> push_to_hub_callback = PushToHubCallback(output_dir="scene_segmentation", tokenizer=image_processor)
>>> callbacks = [metric_callback, push_to_hub_callback]
```
Finally, you are ready to train your model! Call `fit()` with your training and validation datasets, the number of epochs,
and your callbacks to fine-tune the model:
```py
>>> model.fit(
... tf_train_dataset,
... validation_data=tf_eval_dataset,
... callbacks=callbacks,
... epochs=num_epochs,
... )
```
Congratulations! You have fine-tuned your model and shared it on the ๐ค Hub. You can now use it for inference!
</tf>
</frameworkcontent>
### Inference
Great, now that you've finetuned a model, you can use it for inference!
Reload the dataset and load an image for inference.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("scene_parse_150", split="train[:50]")
>>> ds = ds.train_test_split(test_size=0.2)
>>> test_ds = ds["test"]
>>> image = ds["test"][0]["image"]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="Image of bedroom"/>
</div>
<frameworkcontent>
<pt>
We will now see how to infer without a pipeline. Process the image with an image processor and place the `pixel_values` on a GPU:
```py
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use GPU if available, otherwise use a CPU
>>> encoding = image_processor(image, return_tensors="pt")
>>> pixel_values = encoding.pixel_values.to(device)
```
Pass your input to the model and return the `logits`:
```py
>>> outputs = model(pixel_values=pixel_values)
>>> logits = outputs.logits.cpu()
```
Next, rescale the logits to the original image size:
```py
>>> upsampled_logits = nn.functional.interpolate(
... logits,
... size=image.size[::-1],
... mode="bilinear",
... align_corners=False,
... )
>>> pred_seg = upsampled_logits.argmax(dim=1)[0]
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
Load an image processor to preprocess the image and return the input as TensorFlow tensors:
```py
>>> from transformers import AutoImageProcessor
>>> image_processor = AutoImageProcessor.from_pretrained("MariaK/scene_segmentation")
>>> inputs = image_processor(image, return_tensors="tf")
```
Pass your input to the model and return the `logits`:
```py
>>> from transformers import TFAutoModelForSemanticSegmentation
>>> model = TFAutoModelForSemanticSegmentation.from_pretrained("MariaK/scene_segmentation")
>>> logits = model(**inputs).logits
```
Next, rescale the logits to the original image size and apply argmax on the class dimension:
```py
>>> logits = tf.transpose(logits, [0, 2, 3, 1])
>>> upsampled_logits = tf.image.resize(
... logits,
... # We reverse the shape of `image` because `image.size` returns width and height.
... image.size[::-1],
... )
>>> pred_seg = tf.math.argmax(upsampled_logits, axis=-1)[0]
```
</tf>
</frameworkcontent>
To visualize the results, load the [dataset color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) as `ade_palette()` that maps each class to their RGB values.
```py
def ade_palette():
return np.asarray([
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
```
Then you can combine and plot your image and the predicted segmentation map:
```py
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8)
>>> palette = np.array(ade_palette())
>>> for label, color in enumerate(palette):
... color_seg[pred_seg == label, :] = color
>>> color_seg = color_seg[..., ::-1] # convert to BGR
>>> img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
>>> img = img.astype(np.uint8)
>>> plt.figure(figsize=(15, 10))
>>> plt.imshow(img)
>>> plt.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png" alt="Image of bedroom overlaid with segmentation map"/>
</div>
| transformers/docs/source/en/tasks/semantic_segmentation.md/0 | {
"file_path": "transformers/docs/source/en/tasks/semantic_segmentation.md",
"repo_id": "transformers",
"token_count": 12038
} | 283 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Export to TorchScript
<Tip>
This is the very beginning of our experiments with TorchScript and we are still
exploring its capabilities with variable-input-size models. It is a focus of interest to
us and we will deepen our analysis in upcoming releases, with more code examples, a more
flexible implementation, and benchmarks comparing Python-based codes with compiled
TorchScript.
</Tip>
According to the [TorchScript documentation](https://pytorch.org/docs/stable/jit.html):
> TorchScript is a way to create serializable and optimizable models from PyTorch code.
There are two PyTorch modules, [JIT and
TRACE](https://pytorch.org/docs/stable/jit.html), that allow developers to export their
models to be reused in other programs like efficiency-oriented C++ programs.
We provide an interface that allows you to export ๐ค Transformers models to TorchScript
so they can be reused in a different environment than PyTorch-based Python programs.
Here, we explain how to export and use our models using TorchScript.
Exporting a model requires two things:
- model instantiation with the `torchscript` flag
- a forward pass with dummy inputs
These necessities imply several things developers should be careful about as detailed
below.
## TorchScript flag and tied weights
The `torchscript` flag is necessary because most of the ๐ค Transformers language models
have tied weights between their `Embedding` layer and their `Decoding` layer.
TorchScript does not allow you to export models that have tied weights, so it is
necessary to untie and clone the weights beforehand.
Models instantiated with the `torchscript` flag have their `Embedding` layer and
`Decoding` layer separated, which means that they should not be trained down the line.
Training would desynchronize the two layers, leading to unexpected results.
This is not the case for models that do not have a language model head, as those do not
have tied weights. These models can be safely exported without the `torchscript` flag.
## Dummy inputs and standard lengths
The dummy inputs are used for a models forward pass. While the inputs' values are
propagated through the layers, PyTorch keeps track of the different operations executed
on each tensor. These recorded operations are then used to create the *trace* of the
model.
The trace is created relative to the inputs' dimensions. It is therefore constrained by
the dimensions of the dummy input, and will not work for any other sequence length or
batch size. When trying with a different size, the following error is raised:
```
`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2`
```
We recommended you trace the model with a dummy input size at least as large as the
largest input that will be fed to the model during inference. Padding can help fill the
missing values. However, since the model is traced with a larger input size, the
dimensions of the matrix will also be large, resulting in more calculations.
Be careful of the total number of operations done on each input and follow the
performance closely when exporting varying sequence-length models.
## Using TorchScript in Python
This section demonstrates how to save and load models as well as how to use the trace
for inference.
### Saving a model
To export a `BertModel` with TorchScript, instantiate `BertModel` from the `BertConfig`
class and then save it to disk under the filename `traced_bert.pt`:
```python
from transformers import BertModel, BertTokenizer, BertConfig
import torch
enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
# Tokenizing input text
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = enc.tokenize(text)
# Masking one of the input tokens
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Creating a dummy input
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
dummy_input = [tokens_tensor, segments_tensors]
# Initializing the model with the torchscript flag
# Flag set to True even though it is not necessary as this model does not have an LM Head.
config = BertConfig(
vocab_size_or_config_json_file=32000,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
torchscript=True,
)
# Instantiating the model
model = BertModel(config)
# The model needs to be in evaluation mode
model.eval()
# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag
model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True)
# Creating the trace
traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
torch.jit.save(traced_model, "traced_bert.pt")
```
### Loading a model
Now you can load the previously saved `BertModel`, `traced_bert.pt`, from disk and use
it on the previously initialised `dummy_input`:
```python
loaded_model = torch.jit.load("traced_bert.pt")
loaded_model.eval()
all_encoder_layers, pooled_output = loaded_model(*dummy_input)
```
### Using a traced model for inference
Use the traced model for inference by using its `__call__` dunder method:
```python
traced_model(tokens_tensor, segments_tensors)
```
## Deploy Hugging Face TorchScript models to AWS with the Neuron SDK
AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/)
instance family for low cost, high performance machine learning inference in the cloud.
The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware
accelerator, specializing in deep learning inferencing workloads. [AWS
Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) is the SDK for
Inferentia that supports tracing and optimizing transformers models for deployment on
Inf1. The Neuron SDK provides:
1. Easy-to-use API with one line of code change to trace and optimize a TorchScript
model for inference in the cloud.
2. Out of the box performance optimizations for [improved
cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>).
3. Support for Hugging Face transformers models built with either
[PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html)
or
[TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html).
### Implications
Transformers models based on the [BERT (Bidirectional Encoder Representations from
Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert)
architecture, or its variants such as
[distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) and
[roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) run best on
Inf1 for non-generative tasks such as extractive question answering, sequence
classification, and token classification. However, text generation tasks can still be
adapted to run on Inf1 according to this [AWS Neuron MarianMT
tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html).
More information about models that can be converted out of the box on Inferentia can be
found in the [Model Architecture
Fit](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia)
section of the Neuron documentation.
### Dependencies
Using AWS Neuron to convert models requires a [Neuron SDK
environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide)
which comes preconfigured on [AWS Deep Learning
AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html).
### Converting a model for AWS Neuron
Convert a model for AWS NEURON using the same code from [Using TorchScript in
Python](torchscript#using-torchscript-in-python) to trace a `BertModel`. Import the
`torch.neuron` framework extension to access the components of the Neuron SDK through a
Python API:
```python
from transformers import BertModel, BertTokenizer, BertConfig
import torch
import torch.neuron
```
You only need to modify the following line:
```diff
- torch.jit.trace(model, [tokens_tensor, segments_tensors])
+ torch.neuron.trace(model, [token_tensor, segments_tensors])
```
This enables the Neuron SDK to trace the model and optimize it for Inf1 instances.
To learn more about AWS Neuron SDK features, tools, example tutorials and latest
updates, please see the [AWS NeuronSDK
documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
| transformers/docs/source/en/torchscript.md/0 | {
"file_path": "transformers/docs/source/en/torchscript.md",
"repo_id": "transformers",
"token_count": 2740
} | 284 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Debugging
## Debug de problemas de Network multi-GPU
Cuando entrenas o infieres con `DistributedDataParallel` y varias GPUs, si encuentras problemas de intercomunicaciรณn entre procesos y/o nodos, puedes usar el siguiente script para diagnosticar problemas de red.
```bash
wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py
```
Por ejemplo, para probar cรณmo interactรบan 2 GPUs, haz lo siguiente:
```bash
python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
```
Si ambos procesos pueden hablar entre sรญ y asignar la memoria de la GPU, cada uno imprimirรก un status OK.
Para mรกs GPUs o nodos, ajusta los argumentos en el script.
Encontrarรกs muchos mรกs detalles dentro del script de diagnรณstico e incluso una receta de cรณmo ejecutarlo en un entorno SLURM.
Un nivel adicional de debug es agregar la variable de entorno `NCCL_DEBUG=INFO` de la siguiente manera:
```bash
NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
```
Esto mostrarรก mucha informaciรณn de debug relacionada con NCCL, que luego puedes buscar online si encuentras que reporta algรบn problema. O si no estรกs seguro de cรณmo interpretar el output, puedes compartir el archivo de log en un Issue.
## Detecciรณn de Underflow y Overflow
<Tip>
Esta funciรณn estรก disponible actualmente sรณlo para PyTorch.
</Tip>
<Tip>
Para el entrenamiento multi-GPU, requiere DDP (`torch.distributed.launch`).
</Tip>
<Tip>
Esta funciรณn puede utilizarse con cualquier modelo basado en `nn.Module`.
</Tip>
Si empiezas a obtener `loss=NaN` o el modelo muestra algรบn otro comportamiento anormal debido a `inf` o `nan` en
activations o weights hay que descubrir dรณnde se produce el primer underflow o overflow y quรฉ lo ha provocado. Por suerte
puedes lograrlo fรกcilmente activando un mรณdulo especial que harรก la detecciรณn automรกticamente.
Si estรกs usando [`Trainer`], solo necesitas aรฑadir:
```bash
--debug underflow_overflow
```
a los argumentos normales de la lรญnea de comandos, o pasar `debug="underflow_overflow"` al crear el objeto [`TrainingArguments`].
Si estรกs usando tu propio bucle de entrenamiento u otro Trainer puedes lograr lo mismo con:
```python
from .debug_utils import DebugUnderflowOverflow
debug_overflow = DebugUnderflowOverflow(model)
```
[`~debug_utils.DebugUnderflowOverflow`] inserta hooks en el modelo que inmediatamente despuรฉs de cada forward
testearรก las variables de input y output y tambiรฉn los weights del mรณdulo correspondiente. Tan pronto como se detecte `inf` o
`nan` se detecta en al menos un elemento de las activations o weights, el programa afirmarรก e imprimirรก un informe
como este (esto fue capturado con `google/mt5-small` bajo fp16 mixed precision):
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
0.00e+00 2.57e+02 input[0]
0.00e+00 2.85e+02 output
[...]
encoder.block.2.layer.0 T5LayerSelfAttention
6.78e-04 3.15e+03 input[0]
2.65e-04 3.42e+03 output[0]
None output[1]
2.25e-01 1.00e+04 output[2]
encoder.block.2.layer.1.layer_norm T5LayerNorm
8.69e-02 4.18e-01 weight
2.65e-04 3.42e+03 input[0]
1.79e-06 4.65e+00 output
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
0.00e+00 8.76e+03 input[0]
0.00e+00 9.74e+03 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
El output del ejemplo se ha recortado en el centro por razones de brevedad.
La segunda columna muestra el valor del elemento mรกs grande en tรฉrminos absolutos, por lo que si observas con detenimiento los รบltimos fotogramas,
los inputs y outputs estaban en el rango de `1e4`. Asรญ que cuando este entrenamiento se hizo con fp16 mixed precision,
el รบltimo paso sufriรณ overflow (ya que bajo `fp16` el mayor nรบmero antes de `inf` es `64e3`). Para evitar overflows en
`fp16` las activations deben permanecer muy por debajo de `1e4`, porque `1e4 * 1e4 = 1e8` por lo que cualquier matrix multiplication con
grandes activations va a llevar a una condiciรณn de overflow numรฉrico.
Al principio del output puedes descubrir en quรฉ nรบmero de batch se produjo el problema (aquรญ `Detected inf/nan during batch_number=0` significa que el problema se produjo en el primer batch).
Cada frame del informe comienza declarando la entrada completamente calificada para el mรณdulo correspondiente que este frame estรก reportando.
Si nos fijamos sรณlo en este frame:
```
encoder.block.2.layer.1.layer_norm T5LayerNorm
8.69e-02 4.18e-01 weight
2.65e-04 3.42e+03 input[0]
1.79e-06 4.65e+00 output
```
Aquรญ, `encoder.block.2.layer.1.layer_norm` indica que era una layer norm para la primera capa, del segundo
block del encoder. Y la call especรญfica del `forward` es `T5LayerNorm`.
Veamos los รบltimos frames de ese informe:
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
[...]
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
El รบltimo frame informa para la funciรณn `Dropout.forward` con la primera entrada para el รบnico input y la segunda para el
รบnico output. Puedes ver que fue llamada desde un atributo `dropout` dentro de la clase `DenseReluDense`. Podemos ver
que ocurriรณ durante la primera capa, del segundo block, durante el primer batch. Por รบltimo, el mayor absoluto
elementos de input fue `6.27e+04` y el mismo para el output fue `inf`.
Puedes ver aquรญ, que `T5DenseGatedGeluDense.forward` resultรณ en output activations, cuyo valor mรกximo absoluto fue
alrededor de 62.7K, que estรก muy cerca del lรญmite mรกximo de fp16 de 64K. En el siguiente frame tenemos `Dropout`, el cual renormaliza
los weights, despuรฉs de poner a cero algunos de los elementos, lo que empuja el valor mรกximo absoluto a mรกs de 64K, y obtenemos un
overflow (`inf`).
Como puedes ver son los frames anteriores los que tenemos que mirar cuando los nรบmeros empiezan a ser muy grandes para nรบmeros fp16.
Combinemos el informe con el cรณdigo de `models/t5/modeling_t5.py`:
```python
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
```
Ahora es fรกcil ver la call `dropout`, y tambiรฉn todas las calls anteriores.
Dado que la detecciรณn se produce en un forward hook, estos informes se imprimen inmediatamente despuรฉs de que cada `forward`
responda.
Volviendo al informe completo, para actuar sobre รฉl y arreglar el problema, tenemos que subir unos cuantos frames donde los nรบmeros
empezaron a subir y probablemente cambiar al modo `fp32` aquรญ, para que los nรบmeros no sufran overflow cuando se multipliquen
o al sumarlos. Por supuesto, puede haber otras soluciones. Por ejemplo, podrรญamos desactivar `amp` temporalmente si estรก
activado, despuรฉs de mover el original `forward` dentro de un helper wrapper, asรญ:
```python
def _forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
import torch
def forward(self, hidden_states):
if torch.is_autocast_enabled():
with torch.cuda.amp.autocast(enabled=False):
return self._forward(hidden_states)
else:
return self._forward(hidden_states)
```
Como el detector automรกtico sรณlo informa de los inputs y outputs de los frames completos, una vez que sepas dรณnde buscar, puedes
analizar tambiรฉn las etapas intermedias de una funciรณn especรญfica de `forward`. En este caso, puede utilizar la funciรณn
funciรณn de ayuda `detect_overflow` para inyectar el detector donde quieras, por ejemplo:
```python
from debug_utils import detect_overflow
class T5LayerFF(nn.Module):
[...]
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
detect_overflow(forwarded_states, "after layer_norm")
forwarded_states = self.DenseReluDense(forwarded_states)
detect_overflow(forwarded_states, "after DenseReluDense")
return hidden_states + self.dropout(forwarded_states)
```
Puedes ver que hemos aรฑadido 2 de estos y ahora se trackea si `inf` o `nan` para `forwarded_states` fue detectado
en algรบn punto intermedio.
De hecho, el detector ya informa de esto porque cada una de las llamadas en el ejemplo anterior es un `nn.Module`, pero
digamos que si tuvieras algunos cรกlculos directos locales, asรญ es como lo harรญas.
Ademรกs, si estรกs instanciando el debugger en tu propio cรณdigo, puedes ajustar el nรบmero de frames impresos de
su valor por defecto, por ejemplo:
```python
from .debug_utils import DebugUnderflowOverflow
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
```
### Rastreo de valores mรญnimos y mรกximos absolutos de batches especรญficos
La misma clase de debugging se puede utilizar para el rastreo por batches con la funciรณn de detecciรณn de underflow/overflow desactivada.
Digamos que quieres ver los valores mรญnimos y mรกximos absolutos de todos los ingredientes de cada call `forward` de un determinado
batch, y sรณlo hacerlo para los batches 1 y 3. Entonces instancias esta clase como:
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
```
Y ahora los batches 1 y 3 completos serรกn rastreados usando el mismo formato que el detector de underflow/overflow.
Los batches son 0-index.
Esto es muy รบtil si sabes que el programa empieza a comportarse mal despuรฉs de un determinado nรบmero de batch, para que puedas avanzar rรกpidamente
hasta esa รกrea. Aquรญ hay un ejemplo de output recortado para tal configuraciรณn:
```
*** Starting batch number=1 ***
abs min abs max metadata
shared Embedding
1.01e-06 7.92e+02 weight
0.00e+00 2.47e+04 input[0]
5.36e-05 7.92e+02 output
[...]
decoder.dropout Dropout
1.60e-07 2.27e+01 input[0]
0.00e+00 2.52e+01 output
decoder T5Stack
not a tensor output
lm_head Linear
1.01e-06 7.92e+02 weight
0.00e+00 1.11e+00 input[0]
6.06e-02 8.39e+01 output
T5ForConditionalGeneration
not a tensor output
*** Starting batch number=3 ***
abs min abs max metadata
shared Embedding
1.01e-06 7.92e+02 weight
0.00e+00 2.78e+04 input[0]
5.36e-05 7.92e+02 output
[...]
```
Aquรญ obtendrรกs un gran nรบmero de frames mostrados - tantos como forward calls haya en tu modelo, por lo que puede o no ser lo que quieras, pero a veces puede ser mรกs fรกcil de usar para debug que un debugger normal.
Por ejemplo, si un problema comienza a ocurrir en el batch 150. Entonces puedes mostrar las trazas de los batches 149 y 150 y comparar dรณnde
los nรบmeros empezaron a divergir.
Tambiรฉn puedes especificar el nรบmero de batch despuรฉs del cual se debe detener el entrenamiento, con:
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
```
| transformers/docs/source/es/debugging.md/0 | {
"file_path": "transformers/docs/source/es/debugging.md",
"repo_id": "transformers",
"token_count": 5532
} | 285 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Tour rรกpido
[[open-in-colab]]
ยกEntra en marcha con los ๐ค Transformers! Comienza usando [`pipeline`] para una inferencia veloz, carga un modelo preentrenado y un tokenizador con una [AutoClass](./model_doc/auto) para resolver tu tarea de texto, visiรณn o audio.
<Tip>
Todos los ejemplos de cรณdigo presentados en la documentaciรณn tienen un botรณn arriba a la derecha para elegir si quieres ocultar o mostrar el cรณdigo en Pytorch o TensorFlow.
Si no fuese asรญ, se espera que el cรณdigo funcione para ambos backends sin ningรบn cambio.
</Tip>
## Pipeline
[`pipeline`] es la forma mรกs fรกcil de usar un modelo preentrenado para una tarea dada.
<Youtube id="tiZFewofSLM"/>
El [`pipeline`] soporta muchas tareas comunes listas para usar:
**Texto**:
* Anรกlisis de Sentimiento (Sentiment Analysis, en inglรฉs): clasifica la polaridad de un texto dado.
* Generaciรณn de Texto (Text Generation, en inglรฉs): genera texto a partir de un input dado.
* Reconocimiento de Entidades (Name Entity Recognition o NER, en inglรฉs): etiqueta cada palabra con la entidad que representa (persona, fecha, ubicaciรณn, etc.).
* Responder Preguntas (Question answering, en inglรฉs): extrae la respuesta del contexto dado un contexto y una pregunta.
* Rellenar Mรกscara (Fill-mask, en inglรฉs): rellena el espacio faltante dado un texto con palabras enmascaradas.
* Resumir (Summarization, en inglรฉs): genera un resumen de una secuencia larga de texto o un documento.
* Traducciรณn (Translation, en inglรฉs): traduce un texto a otro idioma.
* Extracciรณn de Caracterรญsticas (Feature Extraction, en inglรฉs): crea una representaciรณn tensorial del texto.
**Imagen**:
* Clasificaciรณn de Imรกgenes (Image Classification, en inglรฉs): clasifica una imagen.
* Segmentaciรณn de Imรกgenes (Image Segmentation, en inglรฉs): clasifica cada pixel de una imagen.
* Detecciรณn de Objetos (Object Detection, en inglรฉs): detecta objetos dentro de una imagen.
**Audio**:
* Clasificaciรณn de Audios (Audio Classification, en inglรฉs): asigna una etiqueta a un segmento de audio.
* Reconocimiento de Voz Automรกtico (Automatic Speech Recognition o ASR, en inglรฉs): transcribe datos de audio a un texto.
<Tip>
Para mรกs detalles acerca del [`pipeline`] y tareas asociadas, consulta la documentaciรณn [aquรญ](./main_classes/pipelines).
</Tip>
### Uso del Pipeline
En el siguiente ejemplo, usarรกs el [`pipeline`] para anรกlisis de sentimiento.
Instala las siguientes dependencias si aรบn no lo has hecho:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
Importa [`pipeline`] y especifica la tarea que deseas completar:
```py
>>> from transformers import pipeline
>>> clasificador = pipeline("sentiment-analysis", model="pysentimiento/robertuito-sentiment-analysis")
```
El pipeline descarga y almacena en cachรฉ el [modelo preentrenado](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) y tokeniza para anรกlisis de sentimiento. Si no hubieramos elegido un modelo el pipeline habrรญa elegido uno por defecto. Ahora puedes usar `clasificador` en tu texto objetivo:
```py
>>> clasificador("Estamos muy felices de mostrarte la biblioteca de ๐ค Transformers.")
[{'label': 'POS', 'score': 0.9320}]
```
Para mรกs de un enunciado, entrega una lista al [`pipeline`] que devolverรก una lista de diccionarios:
El [`pipeline`] tambiรฉn puede iterar sobre un dataset entero. Comienza instalando la biblioteca [๐ค Datasets](https://huggingface.co/docs/datasets/):
```bash
pip install datasets
```
Crea un [`pipeline`] con la tarea que deseas resolver y el modelo que quieres usar. Coloca el parรกmetro `device` a `0` para poner los tensores en un dispositivo CUDA:
```py
>>> import torch
>>> from transformers import pipeline
>>> reconocedor_de_voz = pipeline(
... "automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish", device=0
... )
```
A continuaciรณn, carga el dataset (ve ๐ค Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) para mรกs detalles) sobre el que quisieras iterar. Por ejemplo, vamos a cargar el dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14):
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="es-ES", split="train") # doctest: +IGNORE_RESULT
```
Debemos asegurarnos de que la frecuencia de muestreo del conjunto de datos coincide con la frecuencia de muestreo con la que se entrenรณ `jonatasgrosman/wav2vec2-large-xlsr-53-spanish`.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=reconocedor_de_voz.feature_extractor.sampling_rate))
```
Los archivos de audio se cargan y remuestrean automรกticamente cuando llamamos a la columna `"audio"`.
Extraigamos las matrices de onda cruda (raw waveform, en inglรฉs) de las primeras 4 muestras y pasรฉmosla como una lista al pipeline:
```py
>>> resultado = reconocedor_de_voz(dataset[:4]["audio"])
>>> print([d["text"] for d in resultado])
['ahora buenas eh a ver tengo un problema con vuestra aplicaciรณn resulta que que quiero hacer una transferencia bancaria a una cuenta conocida pero me da error la aplicaciรณn a ver que a ver que puede ser', 'la aplicaciรณn no cargue saldo de mi nueva cuenta', 'hola tengo un problema con la aplicaciรณn no carga y y tampoco veo que carga el saldo de mi cuenta nueva dice que la aplicaciรณn estรก siendo reparada y ahora no puedo acceder a mi cuenta no necesito inmediatamente', 'hora buena la aplicaciรณn no se carga la vida no carga el saldo de mi cuenta nueva dice que la villadenta siendo reparada y oro no puedo hacer a mi cuenta']
```
Para un dataset mรกs grande, donde los inputs son de mayor tamaรฑo (como en habla/audio o visiรณn), querrรกs pasar un generador en lugar de una lista que carga todos los inputs en memoria. Ve la [documentaciรณn del pipeline](./main_classes/pipelines) para mรกs informaciรณn.
### Usa otro modelo y otro tokenizador en el pipeline
El [`pipeline`] puede acomodarse a cualquier modelo del [Model Hub](https://huggingface.co/models) haciendo mรกs fรกcil adaptar el [`pipeline`] para otros casos de uso. Por ejemplo, si quisieras un modelo capaz de manejar texto en francรฉs, usa los tags en el Model Hub para filtrar entre los modelos apropiados. El resultado mejor filtrado devuelve un [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) multilingual fine-tuned para el anรกlisis de sentimiento. Genial, ยกvamos a usar este modelo!
```py
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
```
<frameworkcontent>
<pt>
Usa [`AutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (mรกs en un `AutoClass` debajo):
```py
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</pt>
<tf>
Usa [`TFAutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (mรกs en un `TFAutoClass` debajo):
```py
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</tf>
</frameworkcontent>
Despuรฉs puedes especificar el modelo y el tokenizador en el [`pipeline`], y aplicar el `classifier` en tu texto objetivo:
```py
>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
>>> classifier("Nous sommes trรจs heureux de vous prรฉsenter la bibliothรจque ๐ค Transformers.")
[{'label': '5 stars', 'score': 0.7273}]
```
Si no pudieras encontrar el modelo para tu caso respectivo de uso necesitarรกs ajustar un modelo preentrenado a tus datos. Mira nuestro [tutorial de fine-tuning](./training) para aprender cรณmo. Finalmente, despuรฉs de que has ajustado tu modelo preentrenado, ยกpor favor considera compartirlo (ve el tutorial [aquรญ](./model_sharing)) con la comunidad en el Model Hub para democratizar el NLP! ๐ค
## AutoClass
<Youtube id="AhChOFRegn4"/>
Por debajo, las clases [`AutoModelForSequenceClassification`] y [`AutoTokenizer`] trabajan juntas para dar poder al [`pipeline`]. Una [AutoClass](./model_doc/auto) es un atajo que automรกticamente recupera la arquitectura de un modelo preentrenado con su nombre o el path. Sรณlo necesitarรกs seleccionar el `AutoClass` apropiado para tu tarea y tu tokenizador asociado con [`AutoTokenizer`].
Regresemos a nuestro ejemplo y veamos cรณmo puedes usar el `AutoClass` para reproducir los resultados del [`pipeline`].
### AutoTokenizer
Un tokenizador es responsable de procesar el texto a un formato que sea entendible para el modelo. Primero, el tokenizador separarรก el texto en palabras llamadas *tokens*. Hay mรบltiples reglas que gobiernan el proceso de tokenizaciรณn incluyendo el cรณmo separar una palabra y en quรฉ nivel (aprende mรกs sobre tokenizaciรณn [aquรญ](./tokenizer_summary)). Lo mรกs importante es recordar que necesitarรกs instanciar el tokenizador con el mismo nombre del modelo para asegurar que estรกs usando las mismas reglas de tokenizaciรณn con las que el modelo fue preentrenado.
Carga un tokenizador con [`AutoTokenizer`]:
```py
>>> from transformers import AutoTokenizer
>>> nombre_del_modelo = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tokenizer = AutoTokenizer.from_pretrained(nombre_del_modelo)
```
Despuรฉs, el tokenizador convierte los tokens a nรบmeros para construir un tensor que servirรก como input para el modelo. Esto es conocido como el *vocabulario* del modelo.
Pasa tu texto al tokenizador:
```py
>>> encoding = tokenizer("Estamos muy felices de mostrarte la biblioteca de ๐ค Transformers.")
>>> print(encoding)
{'input_ids': [101, 10602, 14000, 13653, 43353, 10107, 10102, 47201, 10218, 10106, 18283, 10102, 100, 58263, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
El tokenizador devolverรก un diccionario conteniendo:
* [input_ids](./glossary#input-ids): representaciones numรฉricas de los tokens.
* [atttention_mask](.glossary#attention-mask): indica cuรกles tokens deben ser atendidos.
Como con el [`pipeline`], el tokenizador aceptarรก una lista de inputs. Ademรกs, el tokenizador tambiรฉn puede rellenar (pad, en inglรฉs) y truncar el texto para devolver un lote (batch, en inglรฉs) de longitud uniforme:
<frameworkcontent>
<pt>
```py
>>> pt_batch = tokenizer(
... ["We are very happy to show you the ๐ค Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="pt",
... )
```
</pt>
<tf>
```py
>>> tf_batch = tokenizer(
... ["We are very happy to show you the ๐ค Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="tf",
... )
```
</tf>
</frameworkcontent>
Lee el tutorial de [preprocessing](./preprocessing) para mรกs detalles acerca de la tokenizaciรณn.
### AutoModel
<frameworkcontent>
<pt>
๐ค Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`AutoModel`] como cargarรญas un [`AutoTokenizer`]. La รบnica diferencia es seleccionar el [`AutoModel`] correcto para la tarea. Ya que estรกs clasificando texto, o secuencias, carga [`AutoModelForSequenceClassification`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
Ve el [task summary](./task_summary) para revisar quรฉ clase del [`AutoModel`] deberรญas usar para cada tarea.
</Tip>
Ahora puedes pasar tu lote (batch) preprocesado de inputs directamente al modelo. Solo tienes que desempacar el diccionario aรฑadiendo `**`:
```py
>>> pt_outputs = pt_model(**pt_batch)
```
El modelo producirรก las activaciones finales en el atributo `logits`. Aplica la funciรณn softmax a `logits` para obtener las probabilidades:
```py
>>> from torch import nn
>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> print(pt_predictions)
tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
[0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>)
```
</pt>
<tf>
๐ค Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`TFAutoModel`] como cargarรญas un [`AutoTokenizer`]. La รบnica diferencia es seleccionar el [`TFAutoModel`] correcto para la tarea. Ya que estรกs clasificando texto, o secuencias, carga [`TFAutoModelForSequenceClassification`]:
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
Ve el [task summary](./task_summary) para revisar quรฉ clase del [`AutoModel`]
deberรญas usar para cada tarea.
</Tip>
Ahora puedes pasar tu lote preprocesado de inputs directamente al modelo pasando las llaves del diccionario directamente a los tensores:
```py
>>> tf_outputs = tf_model(tf_batch)
```
El modelo producirรก las activaciones finales en el atributo `logits`. Aplica la funciรณn softmax a `logits` para obtener las probabilidades:
```py
>>> import tensorflow as tf
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
>>> print(tf.math.round(tf_predictions * 10**4) / 10**4)
tf.Tensor(
[[0.0021 0.0018 0.0116 0.2121 0.7725]
[0.2084 0.1826 0.1969 0.1755 0.2365]], shape=(2, 5), dtype=float32)
```
</tf>
</frameworkcontent>
<Tip>
Todos los modelos de ๐ค Transformers (PyTorch o TensorFlow) producirรกn los tensores *antes* de la funciรณn de activaciรณn
final (como softmax) porque la funciรณn de activaciรณn final es comรบnmente fusionada con la pรฉrdida.
</Tip>
Los modelos son [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) o [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) estรกndares asรญ que podrรกs usarlos en tu training loop usual. Sin embargo, para facilitar las cosas, ๐ค Transformers provee una clase [`Trainer`] para PyTorch que aรฑade funcionalidades para entrenamiento distribuido, preciciรณn mixta, y mรกs. Para TensorFlow, puedes usar el mรฉtodo `fit` desde [Keras](https://keras.io/). Consulta el [tutorial de entrenamiento](./training) para mรกs detalles.
<Tip>
Los outputs del modelo de ๐ค Transformers son dataclasses especiales por lo que sus atributos pueden ser completados en un IDE.
Los outputs del modelo tambiรฉn se comportan como tuplas o diccionarios (e.g., puedes indexar con un entero, un slice o una cadena) en cuyo caso los atributos que son `None` son ignorados.
</Tip>
### Guarda un modelo
<frameworkcontent>
<pt>
Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`PreTrainedModel.save_pretrained`]:
```py
>>> pt_save_directory = "./pt_save_pretrained"
>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT
>>> pt_model.save_pretrained(pt_save_directory)
```
Cuando quieras usar el modelo otra vez cรกrgalo con [`PreTrainedModel.from_pretrained`]:
```py
>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")
```
</pt>
<tf>
Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`TFPreTrainedModel.save_pretrained`]:
```py
>>> tf_save_directory = "./tf_save_pretrained"
>>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT
>>> tf_model.save_pretrained(tf_save_directory)
```
Cuando quieras usar el modelo otra vez cรกrgalo con [`TFPreTrainedModel.from_pretrained`]:
```py
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")
```
</tf>
</frameworkcontent>
Una caracterรญstica particularmente interesante de ๐ค Transformers es la habilidad de guardar el modelo y cargarlo como un modelo de PyTorch o TensorFlow. El parรกmetro `from_pt` o `from_tf` puede convertir el modelo de un framework al otro:
<frameworkcontent>
<pt>
```py
>>> from transformers import AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
```
</pt>
<tf>
```py
>>> from transformers import TFAutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
```
</tf>
</frameworkcontent>
| transformers/docs/source/es/quicktour.md/0 | {
"file_path": "transformers/docs/source/es/quicktour.md",
"repo_id": "transformers",
"token_count": 6360
} | 286 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Come creare una pipeline personalizzata?
In questa guida, scopriremo come creare una pipeline personalizzata e condividerla sull' [Hub](https://hf.co/models) o aggiungerla nella libreria
Transformers.
Innanzitutto, รจ necessario decidere gli input grezzi che la pipeline sarร in grado di accettare. Possono essere strings, raw bytes,
dictionaries o qualsiasi cosa sia l'input desiderato piรน probabile. Cerca di mantenere questi input il piรน possibile in Python
in quanto facilita la compatibilitร (anche con altri linguaggi tramite JSON). Questi saranno gli `inputs` della
pipeline (`preprocess`).
Poi definire gli `outputs`. Stessa strategia degli `inputs`. Piรน รจ seplice e meglio รจ. Questi saranno gli output del metodo
`postprocess`.
Si parte ereditando la classe base `Pipeline`. con i 4 metodi che bisogna implementare `preprocess`,
`_forward`, `postprocess` e `_sanitize_parameters`.
```python
from transformers import Pipeline
class MyPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, inputs, maybe_arg=2):
model_input = Tensor(inputs["input_ids"])
return {"model_input": model_input}
def _forward(self, model_inputs):
# model_inputs == {"model_input": model_input}
outputs = self.model(**model_inputs)
# Maybe {"logits": Tensor(...)}
return outputs
def postprocess(self, model_outputs):
best_class = model_outputs["logits"].softmax(-1)
return best_class
```
La struttura di questa suddivisione consiste nel supportare in modo relativamente continuo CPU/GPU, supportando allo stesso tempo l'esecuzione di
pre/postelaborazione sulla CPU su thread diversi.
`preprocess` prenderร gli input originariamente definiti e li trasformerร in qualcosa di alimentabile dal modello. Potrebbe
contenere piรน informazioni e di solito รจ un `Dict`.
`_forward` รจ il dettaglio dell'implementazione e non รจ destinato a essere chiamato direttamente. `forward` รจ il metodo preferito per assicurarsi che tutto funzioni correttamente perchรจ contiene delle slavaguardie. Se qualcosa รจ
รจ collegato a un modello reale, appartiene al metodo `_forward`, tutto il resto รจ nel preprocess/postprocess.
`postprocess` prende l'otput di `_forward` e lo trasforma nell'output finale che era stato deciso in precedenza.
`_sanitize_parameters` esiste per consentire agli utenti di passare i parametri ogni volta che desiderano sia a inizialization time `pipeline(...., maybe_arg=4)` che al call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
`_sanitize_parameters` ritorna 3 dicts di kwargs che vengono passati direttamente a `preprocess`,
`_forward` e `postprocess`. Non riempire nulla se il chiamante non ha chiamato con alcun parametro aggiuntivo. Questo
consente di mantenere gli argomenti predefiniti nella definizione della funzione, che รจ sempre piรน "naturale".
Un esempio classico potrebbe essere l'argomento `top_k` nel post processing dei classification tasks.
```python
>>> pipe = pipeline("my-new-task")
>>> pipe("This is a test")
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
>>> pipe("This is a test", top_k=2)
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
```
In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit
`_sanitize_parameters` to allow this new parameter.
```python
def postprocess(self, model_outputs, top_k=5):
best_class = model_outputs["logits"].softmax(-1)
# Add logic to handle top_k
return best_class
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
postprocess_kwargs = {}
if "top_k" in kwargs:
postprocess_kwargs["top_k"] = kwargs["top_k"]
return preprocess_kwargs, {}, postprocess_kwargs
```
Cercare di mantenere gli input/output molto semplici e idealmente serializzabili in JSON, in quanto ciรฒ rende l'uso della pipeline molto facile
senza richiedere agli utenti di comprendere nuovi tipi di oggetti. ร anche relativamente comune supportare molti tipi di argomenti
per facilitarne l'uso (ad esempio file audio, possono essere nomi di file, URL o byte puri).
## Aggiungilo alla lista dei tasks supportati
Per registrar il tuo `new-task` alla lista dei tasks supportati, devi aggiungerlo al `PIPELINE_REGISTRY`:
```python
from transformers.pipelines import PIPELINE_REGISTRY
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
)
```
Puoi specificare il modello di default che desideri, in questo caso dovrebbe essere accompagnato da una revisione specifica (che puรฒ essere il nome di un branch o l'hash di un commit, in questo caso abbiamo preso `"abcdef"`) e anche dal type:
```python
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
default={"pt": ("user/awesome_model", "abcdef")},
type="text", # current support type: text, audio, image, multimodal
)
```
## Condividi la tua pipeline sull'Hub
Per condividere la tua pipeline personalizzata sull'Hub, devi solo salvare il codice della tua sottoclasse `Pipeline` in un file
python. Per esempio, supponiamo di voler utilizzare una pipeline personalizzata per la classificazione delle coppie di frasi come la seguente:
```py
import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
```
L'implementazione รจ agnostica al framework, e lavorerร sia con modelli PyTorch che con TensorFlow. Se l'abbiamo salvato in un file chiamato `pair_classification.py`, puรฒ essere successivamente importato e registrato in questo modo:
```py
from pair_classification import PairClassificationPipeline
from transformers.pipelines import PIPELINE_REGISTRY
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
tf_model=TFAutoModelForSequenceClassification,
)
```
Una volta fatto, possiamo usarla con un modello pretrained. L'istanza `sgugger/finetuned-bert-mrpc` รจ stata
fine-tuned sul dataset MRPC, che classifica le coppie di frasi come parafrasi o no.
```py
from transformers import pipeline
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
```
Successivamente possiamo condividerlo sull'Hub usando il metodo `push_to_hub`
```py
classifier.push_to_hub("test-dynamic-pipeline")
```
Questo codice copierร il file dove รจ stato definitp `PairClassificationPipeline` all'interno della cartella `"test-dynamic-pipeline"`,
insieme al salvataggio del modello e del tokenizer della pipeline, prima di pushare il tutto nel repository
`{your_username}/test-dynamic-pipeline`. Dopodichรฉ chiunque potrร utilizzarlo, purchรฉ fornisca l'opzione
`trust_remote_code=True`:
```py
from transformers import pipeline
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
```
## Aggiungere la pipeline a Transformers
Se vuoi contribuire con la tua pipeline a Transformers, dovrai aggiungere un modulo nel sottomodulo `pipelines`
con il codice della tua pipeline, quindi aggiungilo all'elenco dei tasks definiti in `pipelines/__init__.py`.
Poi hai bisogno di aggiungere i test. Crea un nuovo file `tests/test_pipelines_MY_PIPELINE.py` con esempi ed altri test.
La funzione `run_pipeline_test` sarร molto generica e su piccoli modelli casuali su ogni possibile
architettura, come definito da `model_mapping` e `tf_model_mapping`.
Questo รจ molto importante per testare la compatibilitร futura, nel senso che se qualcuno aggiunge un nuovo modello di
`XXXForQuestionAnswering` allora il test della pipeline tenterร di essere eseguito su di esso. Poichรฉ i modelli sono casuali, รจ
รจ impossibile controllare i valori effettivi, per questo esiste un aiuto `ANY` che tenterร solamente di far corrispondere l'output della pipeline TYPE.
Hai anche *bisogno* di implementare 2 (idealmente 4) test.
- `test_small_model_pt` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso)
e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_tf`.
- `test_small_model_tf` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso)
e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_pt`.
- `test_large_model_pt` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere
senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo รจ mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future
- `test_large_model_tf` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere
senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo รจ mostrare la pipeline e assicurarsi
che non ci siano derive nelle versioni future | transformers/docs/source/it/add_new_pipeline.md/0 | {
"file_path": "transformers/docs/source/it/add_new_pipeline.md",
"repo_id": "transformers",
"token_count": 4072
} | 287 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Inferenza efficiente su GPU singola
Questo documento sarร presto completato con informazioni su come effetture l'inferenza su una singola GPU. Nel frattempo รจ possibile consultare [la guida per l'addestramento su una singola GPU](perf_train_gpu_one) e [la guida per l'inferenza su CPU](perf_infer_cpu).
## `BetterTransformer` per l'inferenza piรน veloce
Abbiamo recentemente integrato `BetterTransformer` per velocizzare l'inferenza su GPU per modelli di testo, immagini e audio. Per maggiori dettagli, consultare la documentazione su questa integrazione [qui](https://huggingface.co/docs/optimum/bettertransformer/overview).
## Integrazione di `bitsandbytes` per Int8 mixed-precision matrix decomposition
<Tip>
Nota che questa funzione puรฒ essere utilizzata anche nelle configurazioni multi GPU.
</Tip>
Dal paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), noi supportiamo l'integrazione di Hugging Face per tutti i modelli dell'Hub con poche righe di codice.
Il metodo `nn.Linear` riduce la dimensione di 2 per i pesi `float16` e `bfloat16` e di 4 per i pesi `float32`, con un impatto quasi nullo sulla qualitร , operando sugli outlier in half-precision.

Il metodo Int8 mixed-precision matrix decomposition funziona separando la moltiplicazione tra matrici in due flussi: (1) una matrice di flusso di outlier di caratteristiche sistematiche moltiplicata in fp16, (2) in flusso regolare di moltiplicazione di matrici int8 (99,9%). Con questo metodo, รจ possibile effettutare inferenza int8 per modelli molto grandi senza degrado predittivo.
Per maggiori dettagli sul metodo, consultare il [paper](https://arxiv.org/abs/2208.07339) o il nostro [blogpost sull'integrazione](https://huggingface.co/blog/hf-bitsandbytes-integration).

Nota che รจ necessaria una GPU per eseguire modelli di tipo mixed-8bit, poichรฉ i kernel sono stati compilati solo per le GPU. Prima di utilizzare questa funzione, assicurarsi di disporre di memoria sufficiente sulla GPU per memorizzare un quarto del modello (o la metร se i pesi del modello sono in mezza precisione).
Di seguito sono riportate alcune note per aiutarvi a utilizzare questo modulo, oppure seguite le dimostrazioni su [Google colab](#colab-demos).
### Requisiti
- Se si dispone di `bitsandbytes<0.37.0`, assicurarsi di eseguire su GPU NVIDIA che supportano tensor cores a 8 bit (Turing, Ampere o architetture piรน recenti - ad esempio T4, RTX20s RTX30s, A40-A100). Per `bitsandbytes>=0.37.0`, tutte le GPU dovrebbero essere supportate.
- Installare la versione corretta di `bitsandbytes` eseguendo:
`pip install bitsandbytes>=0.31.5`.
- Installare `accelerate`
`pip install accelerate>=0.12.0`
### Esecuzione di modelli mixed-Int8 - configurazione per singola GPU
Dopo aver installato le librerie necessarie, per caricare il tuo modello mixed 8-bit รจ il seguente:
```py
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
model_name = "bigscience/bloom-2b5"
model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True))
```
Per la generazione di testo, si consiglia di:
* utilizzare il metodo `generate()` del modello invece della funzione `pipeline()`. Sebbene l'inferenza sia possibile con la funzione `pipeline()`, essa non รจ ottimizzata per i modelli mixed-8bit e sarร piรน lenta rispetto all'uso del metodo `generate()`. Inoltre, alcune strategie di campionamento, come il campionamento nucleaus, non sono supportate dalla funzione `pipeline()` per i modelli mixed-8bit.
* collocare tutti gli ingressi sullo stesso dispositivo del modello.
Ecco un semplice esempio:
```py
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
model_name = "bigscience/bloom-2b5"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True))
text = "Hello, my llama is cute"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
generated_ids = model.generate(**inputs)
outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
```
### Esecuzione di modelli mixed-8bit - configurazione multi GPU
Usare il seguente modo caricare il modello mixed-8bit su piรน GPU (stesso comando della configurazione a GPU singola):
```py
model_name = "bigscience/bloom-2b5"
model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True))
```
Puoi controllare la RAM della GPU che si vuole allocare su ogni GPU usando `accelerate`. Utilizzare l'argomento `max_memory` come segue:
```py
max_memory_mapping = {0: "1GB", 1: "2GB"}
model_name = "bigscience/bloom-3b"
model_8bit = AutoModelForCausalLM.from_pretrained(
model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping
)
```
In questo esempio, la prima GPU utilizzerร 1 GB di memoria e la seconda 2 GB.
### Colab demos
Con questo metodo รจ possibile inferire modelli che prima non era possibile inferire su Google Colab.
Guardate la demo per l'esecuzione di T5-11b (42GB in fp32)! Utilizzo la quantizzazione a 8 bit su Google Colab:
[](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing)
Oppure questa demo di BLOOM-3B:
[](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) | transformers/docs/source/it/perf_infer_gpu_one.md/0 | {
"file_path": "transformers/docs/source/it/perf_infer_gpu_one.md",
"repo_id": "transformers",
"token_count": 2331
} | 288 |
<!--
Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ ใใฎใใกใคใซใฏMarkdownๅฝขๅผใงใใใใใญใฅใกใณใใผใทใงใณใใซใใผ็จใฎ็นๅฎใฎๆงๆใๅซใใงใใใMarkdownใใฅใผใขใผใงใฏๆญฃใใ่กจ็คบใใใชใใใจใซๆณจๆใใฆใใ ใใใ
-->
# Attention mechanism
ใปใจใใฉใฎTransformerใขใใซใฏใใขใใณใทใงใณ่กๅใๆญฃๆนๅฝขใงใใใจใใๆๅณใงๅฎๅ
จใชใขใใณใทใงใณใไฝฟ็จใใพใใ
ใใใฏใ้ทใใใญในใใๆฑใๅ ดๅใซ่จ็ฎใฎใใใซใใใฏใจใชใใใจใใใใพใใLongformerใReformerใฏใใใๅน็็ใงใใฌใผใใณใฐใ้ซ้ๅใใใใใซใขใใณใทใงใณ่กๅใฎในใใผในใใผใธใงใณใไฝฟ็จใใใใจใใใขใใซใงใใ
## LSH attention
[Reformer](model_doc/reformer)ใฏLSH๏ผๅฑๆ็ใซๆฃๅจใใใทใฅ๏ผใขใใณใทใงใณใไฝฟ็จใใพใใ
ใฝใใใใใฏใน(QK^t)ใงใฏใ่กๅQK^tใฎไธญใง๏ผใฝใใใใใฏในๆฌกๅ
ใง๏ผๆใๅคงใใช่ฆ็ด ใฎใฟใๆ็จใชๅฏไธใๆไพใใพใใ
ใใใใฃใฆใๅใฏใจใชqใซใคใใฆใใฏใจใชqใซ่ฟใใญใผkใฎใฟใ่ๆ
ฎใงใใพใใ
qใจkใ่ฟใใใฉใใใๆฑบๅฎใใใใใซใใใใทใฅ้ขๆฐใไฝฟ็จใใใพใใ
ใขใใณใทใงใณใในใฏใฏๅคๆดใใใ็พๅจใฎใใผใฏใณใใในใฏๅใใพใ๏ผๆๅใฎไฝ็ฝฎใ้คใ๏ผใ
ใชใใชใใใใใฏใฏใจใชใจใญใผใ็ญใใ๏ผใคใพใ้ๅธธใซไผผใฆใใ๏ผใฏใจใชใจใญใผใๆไพใใใใใงใใ
ใใใทใฅใฏๅคๅฐใฉใณใใ ใใใใใชใใใใๅฎ้ใซใฏใใใคใใฎใใใทใฅ้ขๆฐใไฝฟ็จใใ๏ผn_roundsใใฉใกใผใฟใงๆฑบๅฎใใใพใ๏ผใใใใใๅนณๅๅใใใพใใ
## Local attention
[Longformer](model_doc/longformer)ใฏใญใผใซใซใขใใณใทใงใณใไฝฟ็จใใพใใ
ใใฐใใฐใใญใผใซใซใณใณใใญในใ๏ผไพ๏ผๅทฆๅณใฎ2ใคใฎใใผใฏใณใฏไฝใงใใ๏ผ๏ผใฏใ็นๅฎใฎใใผใฏใณใซๅฏพใใฆ่กๅใ่ตทใใใฎใซๅๅใงใใ
ใพใใๅฐใใชใฆใฃใณใใฆใๆใคใขใใณใทใงใณใฌใคใคใผใ็ฉใฟ้ใญใใใจใงใๆๅพใฎใฌใคใคใผใฏใฆใฃใณใใฆๅ
ใฎใใผใฏใณใ ใใงใชใใใฆใฃใณใใฆๅ
ใฎใใผใฏใณใ่ถ
ใใฆๅๅฎน้ใๆใคใใใซใชใใๆๅ
จไฝใฎ่กจ็พใๆง็ฏใงใใพใใ
ไธ้จใฎไบๅ้ธๆใใใๅ
ฅๅใใผใฏใณใซใฏใฐใญใผใใซใขใใณใทใงใณใไธใใใใพใใ
ใใใใฎๅฐๆฐใฎใใผใฏใณใซๅฏพใใฆใใขใใณใทใงใณ่กๅใฏใในใฆใฎใใผใฏใณใซใขใฏใปในใงใใใใฎใใญใปในใฏๅฏพ็งฐ็ใงใใ
ไปใฎใในใฆใฎใใผใฏใณใฏใใใใใฎ็นๅฎใฎใใผใฏใณใซใขใฏใปในใงใใพใ๏ผใญใผใซใซใฆใฃใณใใฆๅ
ใฎใใผใฏใณใซๅ ใใฆ๏ผใ
ใใใฏใ่ซๆใฎๅณ2dใซ็คบใใใฆใใใไปฅไธใฏใตใณใใซใฎใขใใณใทใงใณใในใฏใงใ๏ผ
<div class="flex justify-center">
<img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/>
</div>
## Other tricks
### Axial positional encodings
[Reformer](model_doc/reformer)ใฏ่ปธๆนๅใฎไฝ็ฝฎใจใณใณใผใใฃใณใฐใไฝฟ็จใใฆใใพใใไผ็ตฑ็ใชใใฉใณในใใฉใผใใผใขใใซใงใฏใไฝ็ฝฎใจใณใณใผใใฃใณใฐEใฏใตใคใบใ \\(l\\) ร \\(d\\) ใฎ่กๅใงใ\\(l\\) ใฏใทใผใฑใณในใฎ้ทใใ\\(d\\) ใฏ้ ใ็ถๆ
ใฎๆฌกๅ
ใงใใ้ๅธธใซ้ทใใใญในใใๆฑใๅ ดๅใใใฎ่กๅใฏ้ๅธธใซๅคงใใใGPUไธใงๅคง้ใฎในใใผในใๅ ๆใใพใใใใใ็ทฉๅใใใใใซใ่ปธๆนๅใฎไฝ็ฝฎใจใณใณใผใใฃใณใฐใฏใใใฎๅคงใใช่กๅEใ2ใคใฎๅฐใใช่กๅE1ใจE2ใซๅ่งฃใใพใใใใใใใฎ่กๅใฏใตใคใบ \\(l_{1} \times d_{1}\\) ใใใณ \\(l_{2} \times d_{2}\\) ใๆใกใ \\(l_{1} \times l_{2} = l\\) ใใใณ \\(d_{1} + d_{2} = d\\) ใจใใๆกไปถใๆบใใใพใ๏ผ้ทใใฎ็ฉใ่ใใใจใใใใใฏใใใซๅฐใใใชใใพใ๏ผใ่กๅEๅ
ใฎๆๅป \\(j\\) ใฎๅใ่พผใฟใฏใE1ๅ
ใฎๆๅป \\(j \% l1\\) ใฎๅใ่พผใฟใจE2ๅ
ใฎๆๅป \\(j // l1\\) ใฎๅใ่พผใฟใ้ฃ็ตใใใใจใซใใฃใฆๅพใใใพใใ
| transformers/docs/source/ja/attention.md/0 | {
"file_path": "transformers/docs/source/ja/attention.md",
"repo_id": "transformers",
"token_count": 1963
} | 289 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Audio Spectrogram Transformer
## ๆฆ่ฆ
Audio Spectrogram Transformerใขใใซใฏใ[AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778)ใจใใ่ซๆใงYuan GongใYu-An ChungใJames Glassใซใใฃใฆๆๆกใใใพใใใใใใฏใ้ณๅฃฐใ็ปๅ๏ผในใใฏใใญใฐใฉใ ๏ผใซๅคๆใใใใจใงใ้ณๅฃฐใซ[Vision Transformer](vit)ใ้ฉ็จใใพใใใใฎใขใใซใฏ้ณๅฃฐๅ้กใซใใใฆๆๅ
็ซฏใฎ็ตๆใๅพใฆใใพใใ
่ซๆใฎ่ฆๆจใฏไปฅไธใฎ้ใใงใ๏ผ
*้ๅป10ๅนด้ใงใ็ณใฟ่พผใฟใใฅใผใฉใซใใใใฏใผใฏ๏ผCNN๏ผใฏใ้ณๅฃฐในใใฏใใญใฐใฉใ ใใๅฏพๅฟใใใฉใใซใธใฎ็ดๆฅ็ใชใใใใณใฐใๅญฆ็ฟใใใใจใ็ฎๆใใใจใณใใใผใจใณใใฎ้ณๅฃฐๅ้กใขใใซใฎไธป่ฆใชๆงๆ่ฆ็ด ใจใใฆๅบใๆก็จใใใฆใใพใใใ้ท่ท้ขใฎใฐใญใผใใซใชใณใณใใญในใใใใ่ฏใๆใใใใใๆ่ฟใฎๅพๅใจใใฆใCNNใฎไธใซใปใซใใขใใณใทใงใณๆฉๆงใ่ฟฝๅ ใใCNN-ใขใใณใทใงใณใใคใใชใใใขใใซใๅฝขๆใใใใจใใใใพใใใใใใCNNใธใฎไพๅญใๅฟ
่ฆใใฉใใใใใใฆ็ด็ฒใซใขใใณใทใงใณใซๅบใฅใใใฅใผใฉใซใใใใฏใผใฏใ ใใง้ณๅฃฐๅ้กใซใใใฆ่ฏใใใใฉใผใใณในใๅพใใใจใใงใใใใฉใใใฏๆใใใงใฏใใใพใใใๆฌ่ซๆใงใฏใใใใใฎๅใใซ็ญใใใใใ้ณๅฃฐๅ้ก็จใงใฏๆๅใฎ็ณใฟ่พผใฟใชใใง็ด็ฒใซใขใใณใทใงใณใใผในใฎใขใใซใงใใAudio Spectrogram Transformer๏ผAST๏ผใ็ดนไปใใพใใๆใ
ใฏASTใๆงใ
ใชใชใผใใฃใชๅ้กใใณใใใผใฏใง่ฉไพกใใAudioSetใง0.485 mAPใESC-50ใง95.6%ใฎๆญฃ่งฃ็ใSpeech Commands V2ใง98.1%ใฎๆญฃ่งฃ็ใจใใๆฐใใชๆๅ
็ซฏใฎ็ตๆใ้ๆใใพใใใ*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png"
alt="drawing" width="600"/>
<small> Audio Spectrogram Transformerใฎใขใผใญใใฏใใฃใ<a href="https://arxiv.org/abs/2104.01778">ๅ
่ซๆ</a>ใใๆ็ฒใ</small>
ใใฎใขใใซใฏ[nielsr](https://huggingface.co/nielsr)ใใๆไพใใใพใใใ
ใชใชใธใใซใฎใณใผใใฏ[ใใกใ](https://github.com/YuanGongND/ast)ใง่ฆใใใจใใงใใพใใ
## ไฝฟ็จไธใฎใใณใ
- ็ฌ่ชใฎใใผใฟใปใใใงAudio Spectrogram Transformer๏ผAST๏ผใใใกใคใณใใฅใผใใณใฐใใๅ ดๅใๅ
ฅๅใฎๆญฃ่ฆๅ๏ผๅ
ฅๅใฎๅนณๅใ0ใๆจๆบๅๅทฎใ0.5ใซใใใใจ๏ผๅฆ็ใใใใจใๆจๅฅจใใใพใใ[`ASTFeatureExtractor`]ใฏใใใๅฆ็ใใพใใใใใฉใซใใงใฏAudioSetใฎๅนณๅใจๆจๆบๅๅทฎใไฝฟ็จใใฆใใใใจใซๆณจๆใใฆใใ ใใใ่่
ใไธๆตใฎใใผใฟใปใใใฎ็ตฑ่จใใฉใฎใใใซ่จ็ฎใใฆใใใใฏใ[`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py)ใง็ขบ่ชใใใใจใใงใใพใใ
- ASTใฏไฝใๅญฆ็ฟ็ใๅฟ
่ฆใงใใ ่่
ใฏ[PSLA่ซๆ](https://arxiv.org/abs/2102.01243)ใงๆๆกใใใCNNใขใใซใซๆฏในใฆ10ๅๅฐใใๅญฆ็ฟ็ใไฝฟ็จใใฆใใพใ๏ผใ็ด ๆฉใๅๆใใใใใใฟในใฏใซ้ฉใใๅญฆ็ฟ็ใจๅญฆ็ฟ็ในใฑใธใฅใผใฉใผใๆขใใใจใใๅงใใใพใใ
## ๅ่่ณๆ
Audio Spectrogram Transformerใฎไฝฟ็จใ้ๅงใใใฎใซๅฝน็ซใคๅ
ฌๅผใฎHugging Faceใใใณใณใใฅใใใฃ๏ผ๐ใง็คบใใใฆใใ๏ผใฎๅ่่ณๆใฎไธ่ฆงใงใใ
<PipelineTag pipeline="audio-classification"/>
- ASTใ็จใใ้ณๅฃฐๅ้กใฎๆจ่ซใ่ชฌๆใใใใผใใใใฏใฏ[ใใกใ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST)ใง่ฆใใใจใใงใใพใใ
- [`ASTForAudioClassification`]ใฏใใใฎ[ไพ็คบในใฏใชใใ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification)ใจ[ใใผใใใใฏ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)ใซใใฃใฆใตใใผใใใใฆใใพใใ
- ใใกใใๅ็
ง๏ผ[้ณๅฃฐๅ้กใฟในใฏ](../tasks/audio_classification)ใ
ใใใซๅ่่ณๆใๆๅบใใใๅ ดๅใฏใๆฐๅ
ผใญใชใPull Requestใ้ใใฆใใ ใใใ็งใใกใฏใใใใฌใใฅใผใใใใพใ๏ผๅ่่ณๆใฏใๆขๅญใฎใใฎใ่ค่ฃฝใใใฎใงใฏใชใใไฝใๆฐใใใใจใ็คบใใใจใ็ๆณ็ใงใใ
## ASTConfig
[[autodoc]] ASTConfig
## ASTFeatureExtractor
[[autodoc]] ASTFeatureExtractor
- __call__
## ASTModel
[[autodoc]] ASTModel
- forward
## ASTForAudioClassification
[[autodoc]] ASTForAudioClassification
- forward
| transformers/docs/source/ja/model_doc/audio-spectrogram-transformer.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/audio-spectrogram-transformer.md",
"repo_id": "transformers",
"token_count": 2249
} | 290 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Blenderbot Small
[`BlenderbotSmallModel`] ใจ
[`BlenderbotSmallForConditionalGeneration`] ใฏใใงใใฏใใคใณใใจ็ตใฟๅใใใฆใฎใฟไฝฟ็จใใใพใ
[facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M)ใใใๅคง่ฆๆจกใช Blenderbot ใใงใใฏใใคใณใใฏใ
ไปฃใใใซ [`BlenderbotModel`] ใจใจใใซไฝฟ็จใใฆใใ ใใใ
[`BlenderbotForConditionalGeneration`]
## Overview
Blender ใใฃใใใใใ ใขใใซใฏใ[Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen RollerใEmily DinanใNaman GoyalใDa JuใMary Williamsonใyinghan Liuใใงๆๆกใใใพใใใ
ใธใณใปใทใฅใผใใใคใซใปใชใใใใซใผใใปใทใฃในใฟใผใใจใชใใฏใปMใปในใในใY-ใฉใณใปใใผใญใผใใธใงใคใฝใณใปใฆใงในใใณใ2020ๅนด4ๆ30ๆฅใ
่ซๆใฎ่ฆๆจใฏๆฌกใฎใจใใใงใใ
*ใชใผใใณใใกใคใณใฎใใฃใใใใใใฎๆง็ฏใฏใๆฉๆขฐๅญฆ็ฟ็ ็ฉถใซใจใฃใฆ้ฃใใๅ้ใงใใใใใพใงใฎ็ ็ฉถใงใฏๆฌกใฎใใจใ็คบใใใฆใใพใใใ
ใใฅใผใฉใซ ใขใใซใใใฉใกใผใฟใผใฎๆฐใจใใฌใผใใณใฐๅฏพ่ฑกใฎใใผใฟใฎใตใคใบใงในใฑใผใชใณใฐใใใจใ็ตๆใๅไธใใพใใ
้ซๆง่ฝใฎใใฃใใใใใใซใฏไปใฎ่ฆ็ด ใ้่ฆใงใใใใจใ็คบใใพใใ่ฏใไผ่ฉฑใซใฏๅคใใฎใใจใๅฟ
่ฆใงใ
ไผ่ฉฑใฎๅฐ้ๅฎถใใทใผใ ใฌในใซ่ๅใใในใญใซ: ้ญ
ๅ็ใช่ฉฑใฎใใคใณใใๆไพใใ่ฉฑใ่ใ
ไธ่ฒซใใๆ
ๅบฆใ็ถญๆใใชใใใ็ฅ่ญใๅ
ฑๆใๅๆงใ้ฉๅใซ่กจ็พใใ
ใใซใฝใใ้ฉๅใชใใฌใผใใณใฐ ใใผใฟใจ้ธๆใไธใใใใๅ ดๅใๅคง่ฆๆจกใขใใซใใใใใฎในใญใซใๅญฆ็ฟใงใใใใจใ็คบใใพใใ
ไธไปฃๆฆ็ฅใ 90Mใ2.7Bใ9.4B ใใฉใกใผใฟใผ ใขใใซใไฝฟ็จใใฆใใใใฎใฌใทใใฎใใชใขใณใใๆง็ฏใใใขใใซใไฝๆใใพใใ
ใณใผใใฏๅ
ฌ้ใใใฆใใพใใไบบ้ใซใใ่ฉไพกใงใฏใๅฝ็คพใฎๆ่ฏใฎใขใใซใๆขๅญใฎใขใใญใผใใใใๅชใใฆใใใใจใใใซใใฟใผใณใง็คบใใใฆใใพใ
้ญ
ๅใจไบบ้ๆงใฎๆธฌๅฎใจใใ่ฆณ็นใใใฎๅฏพ่ฉฑใๆฌกใซใๅๆใซใใฃใฆใใฎไฝๆฅญใฎ้็ใซใคใใฆ่ชฌๆใใพใใ
ๅผ็คพๆฉ็จฎใฎๆ
้ไบไพ*
ใใใ๏ผ
- Blenderbot Small ใฏ็ตถๅฏพไฝ็ฝฎๅใ่พผใฟใๅใใใขใใซใชใฎใงใ้ๅธธใฏๅ
ฅๅใๅณๅดใซใใใฃใณใฐใใใใจใใๅงใใใพใใ
ๅทฆใ
ใใฎใขใใซใฏใ[patrickvonplaten](https://huggingface.co/patrickvonplaten) ใซใใฃใฆๆไพใใใพใใใ่่
ใฎใณใผใใฏๆฌกใฎใจใใใงใ
[ใใ](https://github.com/facebookresearch/ParlAI) ใใ่ฆงใใ ใใใ
## Documentation resources
- [ๅ ๆ่จ่ชใขใใชใณใฐ ใฟในใฏ ใฌใคใ](../tasks/language_modeling)
- [็ฟป่จณใฟในใฏใฌใคใ](../tasks/translation)
- [่ฆ็ดใฟในใฏใฌใคใ](../tasks/summarization)
## BlenderbotSmallConfig
[[autodoc]] BlenderbotSmallConfig
## BlenderbotSmallTokenizer
[[autodoc]] BlenderbotSmallTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## BlenderbotSmallTokenizerFast
[[autodoc]] BlenderbotSmallTokenizerFast
## BlenderbotSmallModel
[[autodoc]] BlenderbotSmallModel
- forward
## BlenderbotSmallForConditionalGeneration
[[autodoc]] BlenderbotSmallForConditionalGeneration
- forward
## BlenderbotSmallForCausalLM
[[autodoc]] BlenderbotSmallForCausalLM
- forward
## TFBlenderbotSmallModel
[[autodoc]] TFBlenderbotSmallModel
- call
## TFBlenderbotSmallForConditionalGeneration
[[autodoc]] TFBlenderbotSmallForConditionalGeneration
- call
## FlaxBlenderbotSmallModel
[[autodoc]] FlaxBlenderbotSmallModel
- __call__
- encode
- decode
## FlaxBlenderbotForConditionalGeneration
[[autodoc]] FlaxBlenderbotSmallForConditionalGeneration
- __call__
- encode
- decode
| transformers/docs/source/ja/model_doc/blenderbot-small.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/blenderbot-small.md",
"repo_id": "transformers",
"token_count": 1831
} | 291 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# CodeLlama
## Overview
Code Llama ใขใใซใฏใซใใฃใฆ [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) ใงๆๆกใใใพใใใ Baptiste Roziรจre, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jรฉrรฉmy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Dรฉfossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.
่ซๆใฎ่ฆ็ดใฏๆฌกใฎใจใใใงใใ
*็งใใกใฏ Code Llama ใใชใชใผในใใพใใใใใฏ Llama 2 ใซๅบใฅใใณใผใใฎๅคง่ฆๆจก่จ่ชใขใใซ ใใกใใชใงใใใใชใผใใณ ใขใใซใฎไธญใงๆๅ
็ซฏใฎใใใฉใผใใณในใๅใ่พผใฟๆฉ่ฝใๅคง่ฆๆจกใชๅ
ฅๅใณใณใใญในใใฎใตใใผใใใใญใฐใฉใใณใฐ ใฟในใฏใฎใผใญใทใงใใๅฝไปค่ฟฝๅพๆฉ่ฝใๆไพใใพใใ ใๅน
ๅบใใขใใชใฑใผใทใงใณใใซใใผใใใใใฎ่คๆฐใฎใใฌใผใใผใๆไพใใฆใใพใใๅบ็คใขใใซ (Code Llama)ใPython ็นๅ (Code Llama - Python)ใใใใณใใใใ 7Bใ13Bใใใใณ 34B ใใฉใกใผใฟใผใๅใใๅฝไปค่ฟฝๅพใขใใซ (Code Llama - Instruct) ใงใใใในใฆใฎใขใใซใฏ 16,000 ใใผใฏใณใฎใทใผใฑใณในใงใใฌใผใใณใฐใใใๆๅคง 100,000 ใใผใฏใณใฎๅ
ฅๅใงๆนๅใ่ฆใใใพใใ 7B ใใใณ 13B ใณใผใ ใฉใใจใณใผใ ใฉใ - ๅฝไปคใใชใขใณใใฏใๅจๅฒใฎใณใณใใณใใซๅบใฅใใๅใ่พผใฟใใตใใผใใใพใใ Code Llama ใฏใใใใคใใฎใณใผใ ใใณใใใผใฏใงใชใผใใณ ใขใใซใฎไธญใงๆๅ
็ซฏใฎใใใฉใผใใณในใซ้ใใHumanEval ใจ MBPP ใงใใใใๆๅคง 53% ใจ 55% ใฎในใณใขใ็ฒๅพใใพใใใ็นใซใCode Llama - Python 7B ใฏ HumanEval ใใใณ MBPP ไธใง Llama 2 70B ใใใๅชใใใใใฉใผใใณในใ็คบใใใในใฆใฎใขใใซใฏ MultiPL-E ไธใงๅ
ฌ้ใใใฆใใไปใฎใในใฆใฎใขใใซใใใๅชใใฆใใพใใ็งใใกใฏใ็ ็ฉถใจๅๆฅญๅฉ็จใฎไธกๆนใ่จฑๅฏใใๅฏๅฎนใชใฉใคใปใณในใซๅบใฅใใฆ Code Llama ใใชใชใผในใใฆใใพใใ*
ใในใฆใฎ Code Llama ใขใใซ ใใงใใฏใใคใณใใ [ใใกใ](https://huggingface.co/models?search=code_llama) ใง็ขบ่ชใใ[meta llama org](https://huggingface.co/meta-llama) ใงๆญฃๅผใซใชใชใผในใใใใใงใใฏใใคใณใใ็ขบ่ชใใฆใใ ใใใ
ใใฎใขใใซใฏ [ArthurZucker](https://huggingface.co/ArthurZ) ใซใใฃใฆๆไพใใใพใใใ่่
ใฎใชใชใธใใซใฎใณใผใใฏ [ใใกใ](https://github.com/facebookresearch/llama) ใซใใใพใใ
## Usage tips and examples
<Tip warning={true}>
Code Llama ใฎใใผในใจใชใ`Llama2`ใใกใใชใผ ใขใใซใฏใ`bfloat16`ใไฝฟ็จใใฆใใฌใผใใณใฐใใใพใใใใๅ
ใฎๆจ่ซใงใฏ`float16`ใไฝฟ็จใใพใใใใพใใพใช็ฒพๅบฆใ่ฆใฆใฟใพใใใใ
* `float32`: ใขใใซใฎๅๆๅใซ้ขใใ PyTorch ใฎ่ฆ็ดใงใฏใใขใใซใฎ้ใฟใใฉใฎ `dtype` ใงๆ ผ็ดใใใใใซ้ขไฟใชใใใขใใซใ `float32` ใซใญใผใใใพใใ ใtransformersใใใPyTorch ใจใฎไธ่ฒซๆงใไฟใคใใใซใใฎ่ฆๅใซๅพใฃใฆใใพใใใใใฏใใใฉใซใใง้ธๆใใใพใใ `AutoModel` API ใงในใใฌใผใธใฎ้ใฟไปใใฟใคใใไฝฟ็จใใฆใใงใใฏใใคใณใใฎใญใผใใใญใฃในใใใๅ ดๅใฏใ`torch_dtype="auto"` ใๆๅฎใใๅฟ
่ฆใใใใพใใ `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`ใ
* `bfloat16`: ใณใผใ Llama ใฏใใฎ็ฒพๅบฆใงใใฌใผใใณใฐใใใฆใใใใใใใใชใใใฌใผใใณใฐใๅพฎ่ชฟๆดใซไฝฟ็จใใใใจใใๅงใใใพใใ
* `float16`: ใใฎ็ฒพๅบฆใไฝฟ็จใใฆๆจ่ซใๅฎ่กใใใใจใใๅงใใใพใใ้ๅธธใฏ `bfloat16` ใใ้ซ้ใงใใใ่ฉไพกใกใใชใฏในใซใฏ `bfloat16` ใจๆฏในใฆๆใใใชไฝไธใ่ฆใใใชใใใใงใใ bfloat16 ใไฝฟ็จใใฆๆจ่ซใๅฎ่กใใใใจใใงใใพใใๅพฎ่ชฟๆดๅพใfloat16 ใจ bfloat16 ใฎไธกๆนใงๆจ่ซ็ตๆใ็ขบ่ชใใใใจใใๅงใใใพใใ
ไธใง่ฟฐในใใใใซใใขใใซใๅๆๅใใใจใใซ `torch_dtype="auto"` ใไฝฟ็จใใชใ้ใใในใใฌใผใธใฎ้ใฟใฎ `dtype` ใฏใปใจใใฉ็ก้ขไฟใงใใใใฎ็็ฑใฏใใขใใซใๆๅใซใใฆใณใญใผใใใ (ใชใณใฉใคใณใฎใใงใใฏใใคใณใใฎ `dtype` ใไฝฟ็จ)ใๆฌกใซ `torch` ใฎใใใฉใซใใฎ `dtype` ใซใญใฃในใใใใใใใงใ (`torch.float32` ใซใชใใพใ)ใๆๅฎใใใ `torch_dtype` ใใใๅ ดๅใฏใไปฃใใใซใใใไฝฟ็จใใใพใใ
</Tip>
ใใใ๏ผ
- ๅ
ๅกซใฟในใฏใฏใใใซใตใใผใใใใพใใๅ
ฅๅใๅใใใๅ ดๆใซใฏ `tokenizer.fill_token` ใไฝฟ็จใใๅฟ
่ฆใใใใพใใ
- ใขใใซๅคๆในใฏใชใใใฏใ`Llama2` ใใกใใชใฎๅ ดๅใจๅใใงใใ
ไฝฟ็จไพใฏๆฌกใฎใจใใใงใใ
```bash
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
```
ในใฏใชใใใๅฎ่กใใใซใฏใ(ๆๅคงใฎใใผใธใงใณใงใใฃใฆใ) float16 ็ฒพๅบฆใงใขใใซๅ
จไฝใใในใใใใฎใซๅๅใช CPU RAM ใๅฟ
่ฆใงใใใใจใซๆณจๆใใฆใใ ใใใ
ใใใคใใฎใใงใใฏใใคใณใใใใใใใใใใซใขใใซใฎๅ้ใฟใฎไธ้จใๅซใพใใฆใใใใใใในใฆใ RAM ใซใญใผใใใๅฟ
่ฆใใใใพใ)ใ
ๅคๆๅพใใขใใซใจใใผใฏใใคใถใผใฏๆฌกใฎๆนๆณใงใญใผใใงใใพใใ
```python
>>> from transformers import LlamaForCausalLM, CodeLlamaTokenizer
>>> tokenizer = CodeLlamaTokenizer.from_pretrained("meta-llama/CodeLlama-7b-hf")
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/CodeLlama-7b-hf")
>>> PROMPT = '''def remove_non_ascii(s: str) -> str:
""" <FILL_ME>
return result
'''
>>> input_ids = tokenizer(PROMPT, return_tensors="pt")["input_ids"]
>>> generated_ids = model.generate(input_ids, max_new_tokens=128)
>>> filling = tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens = True)[0]
>>> print(PROMPT.replace("<FILL_ME>", filling))
def remove_non_ascii(s: str) -> str:
""" Remove non-ASCII characters from a string.
Args:
s: The string to remove non-ASCII characters from.
Returns:
The string with non-ASCII characters removed.
"""
result = ""
for c in s:
if ord(c) < 128:
result += c
return result
```
ๅกใใคใถใใใ้จๅใ ใใๅฟ
่ฆใชๅ ดๅ:
```python
>>> from transformers import pipeline
>>> import torch
>>> generator = pipeline("text-generation",model="meta-llama/CodeLlama-7b-hf",torch_dtype=torch.float16, device_map="auto")
>>> generator('def remove_non_ascii(s: str) -> str:\n """ <FILL_ME>\n return result', max_new_tokens = 128)
[{'generated_text': 'def remove_non_ascii(s: str) -> str:\n """ <FILL_ME>\n return resultRemove non-ASCII characters from a string. """\n result = ""\n for c in s:\n if ord(c) < 128:\n result += c'}]
```
ๅ
้จใงใฏใใใผใฏใใคใถใผใ [`<FILL_ME>` ใซใใฃใฆ่ชๅ็ใซๅๅฒ](https://huggingface.co/docs/transformers/main/model_doc/code_llama#transformers.CodeLlamaTokenizer.fill_token) ใใฆใ[ ใซ็ถใๆธๅผ่จญๅฎใใใๅ
ฅๅๆๅญๅใไฝๆใใพใใใชใชใธใใซใฎใใฌใผใใณใฐ ใใฟใผใณ](https://github.com/facebookresearch/codellama/blob/cb51c14ec761370ba2e2bc351374a79265d0465e/llama/generation.py#L402)ใใใใฏใใใฟใผใณใ่ชๅใงๆบๅใใใใใๅ
็ขใงใใใใผใฏใณใฎๆฅ็ใชใฉใใใใใฐใ้ๅธธใซ้ฃใใ่ฝใจใ็ฉดใๅ้ฟใงใใพใใใใฎใขใใซใพใใฏไปใฎใขใใซใซๅฟ
่ฆใช CPU ใใใณ GPU ใกใขใชใฎ้ใ็ขบ่ชใใใซใฏใใใฎๅคใๆฑบๅฎใใใฎใซๅฝน็ซใค [ใใฎ่จ็ฎใใผใซ](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) ใ่ฉฆใใฆใใ ใใใ
LLaMA ใใผใฏใใคใถใผใฏใ[sentencepiece](https://github.com/google/sentencepiece) ใซๅบใฅใ BPE ใขใใซใงใใใปใณใใณในใใผในใฎ็ใฎ 1 ใคใฏใใทใผใฑใณในใใใณใผใใใใจใใซใๆๅใฎใใผใฏใณใๅ่ชใฎๅ
้ ญ (ไพ: ใBananaใ) ใงใใๅ ดๅใใใผใฏใใคใถใผใฏๆๅญๅใฎๅ
้ ญใซใใฌใใฃใใฏใน ในใใผในใ่ฟฝๅ ใใชใใใจใงใใ
<Tip>
ใณใผใ Llama ใฏใ`Llama2` ใขใใซใจๅใใขใผใญใใฏใใฃใๆใฃใฆใใพใใAPI ใชใใกใฌใณในใซใคใใฆใฏใ[Llama2 ใฎใใญใฅใกใณใ ใใผใธ](llama2) ใๅ็
งใใฆใใ ใใใ
ไปฅไธใฎ Code Llama ใใผใฏใใคใถใผใฎใชใใกใฌใณในใ่ฆใคใใฆใใ ใใใ
</Tip>
## CodeLlamaTokenizer
[[autodoc]] CodeLlamaTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## CodeLlamaTokenizerFast
[[autodoc]] CodeLlamaTokenizerFast
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- update_post_processor
- save_vocabulary
| transformers/docs/source/ja/model_doc/code_llama.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/code_llama.md",
"repo_id": "transformers",
"token_count": 4114
} | 292 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DePlot
## Overview
DePlot ใฏใFangyu LiuใJulian Martin AisenschlosใFrancesco PiccinnoใSyrine KricheneใChenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun. ใฎ่ซๆ [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) ใงๆๆกใใใพใใใใใณใป
่ซๆใฎ่ฆ็ดใซใฏๆฌกใฎใใใซ่จ่ผใใใฆใใพใใ
*ใใฃใผใใใใญใใใชใฉใฎ่ฆ่ฆ่จ่ชใฏไบบ้ใฎไธ็ใซ้ๅจใใฆใใพใใใใญใใใใใฃใผใใ็่งฃใใใซใฏใๅผทๅใชๆจ่ซในใญใซใๅฟ
่ฆใงใใๅพๆฅใฎๆๅ
็ซฏ (SOTA) ใขใใซใซใฏๅฐใชใใจใๆฐไธใฎใใฌใผใใณใฐ ใตใณใใซใๅฟ
่ฆใงใใใใใฎๆจ่ซ่ฝๅใฏใ็นใซไบบ้ใไฝๆใใ่ค้ใชใฏใจใชใงใฏไพ็ถใจใใฆๅคงๅน
ใซๅถ้ใใใฆใใพใใใใฎ่ซๆใงใฏใ่ฆ่ฆ่จ่ชๆจ่ซใซๅฏพใใๆๅใฎใฏใณใทใงใใ ใฝใชใฅใผใทใงใณใ็ดนไปใใพใใ็งใใกใฏใ่ฆ่ฆ่จ่ชๆจ่ซใฎ่ชฒ้กใ 2 ใคใฎในใใใใซๅ่งฃใใพใใ(1) ใใญใใใใใใญในใใธใฎ็ฟป่จณใจใ(2) ็ฟป่จณใใใใใญในใใซๅฏพใใๆจ่ซใงใใใใฎๆนๆณใฎ้ตใจใชใใฎใฏใใใญใใใพใใฏใใฃใผใใฎ็ปๅใ็ทๅฝขๅใใใใใผใใซใซๅคๆใใใDePlot ใจใใๅๅใฎใขใใชใใฃๅคๆใขใธใฅใผใซใงใใใใฎๅพใDePlot ใฎๅบๅใ็ดๆฅไฝฟ็จใใฆใไบๅใใฌใผใใณใฐๆธใฟใฎๅคง่ฆๆจก่จ่ชใขใใซ (LLM) ใใใญใณใใใใLLM ใฎๅฐๆฐใทใงใใๆจ่ซๆฉ่ฝใๅฉ็จใงใใพใใ DePlot ใๅๅพใใใซใฏใ็ตฑไธใใใใฟในใฏๅฝขๅผใจใกใใชใฏในใ็ขบ็ซใใใใจใงใใญใใใใใใผใใซใธใฎใฟในใฏใๆจๆบๅใใใใฎใฟในใฏใง DePlot ใใจใณใใใผใจใณใใงใใฌใผใใณใฐใใพใใ DePlot ใฏใใใฉใฐใขใณใใใฌใคๆนๅผใง LLM ใจใจใใซๆข่ฃฝใงไฝฟ็จใงใใพใใ 28,000 ใ่ถ
ใใใใผใฟ ใใคใณใใงๅพฎ่ชฟๆดใใใ SOTA ใขใใซใจๆฏ่ผใใฆใใฏใณใทใงใใ ใใญใณใใใฎใฟใไฝฟ็จใใ DePlot+LLM ใฏใใใฃใผใ QA ใฟในใฏใใใฎไบบใไฝๆใใใฏใจใชใซ้ขใใฆใๅพฎ่ชฟๆดใใใ SOTA ใใ 24.0% ใฎๆนๅใ้ๆใใพใใใ*
DePlot ใฏใ`Pix2Struct` ใขใผใญใใฏใใฃใไฝฟ็จใใฆใใฌใผใใณใฐใใใใขใใซใงใใ `Pix2Struct` ใฎ่ฉณ็ดฐใซใคใใฆใฏใ[Pix2Struct ใใญใฅใกใณใ](https://huggingface.co/docs/transformers/main/en/model_doc/pix2struct) ใๅ็
งใใฆใใ ใใใ
DePlot ใฏใ`Pix2Struct` ใขใผใญใใฏใใฃใฎ Visual Question Answering ใตใใปใใใงใใๅ
ฅๅใใใ่ณชๅใ็ปๅไธใซใฌใณใใชใณใฐใใ็ญใใไบๆธฌใใพใใ
## Usage example
็พๅจใDePlot ใงไฝฟ็จใงใใใใงใใฏใใคใณใใฏ 1 ใคใงใใ
- `google/deplot`: ChartQA ใใผใฟใปใใใงๅพฎ่ชฟๆดใใใ DePlot
```python
from transformers import AutoProcessor, Pix2StructForConditionalGeneration
import requests
from PIL import Image
model = Pix2StructForConditionalGeneration.from_pretrained("google/deplot")
processor = AutoProcessor.from_pretrained("google/deplot")
url = "https://raw.githubusercontent.com/vis-nlp/ChartQA/main/ChartQA%20Dataset/val/png/5090.png"
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(images=image, text="Generate underlying data table of the figure below:", return_tensors="pt")
predictions = model.generate(**inputs, max_new_tokens=512)
print(processor.decode(predictions[0], skip_special_tokens=True))
```
## Fine-tuning
DePlot ใๅพฎ่ชฟๆดใใใซใฏใpix2struct [ๅพฎ่ชฟๆดใใผใใใใฏ](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_pix2struct.ipynb) ใๅ็
งใใฆใใ ใใใ `Pix2Struct` ใขใใซใฎๅ ดๅใAdafactor ใจใณใตใคใณๅญฆ็ฟ็ในใฑใธใฅใผใฉใไฝฟ็จใใฆใขใใซใๅพฎ่ชฟๆดใใใจใๅๆใ้ซ้ๅใใใใใจใใใใใพใใใ
```python
from transformers.optimization import Adafactor, get_cosine_schedule_with_warmup
optimizer = Adafactor(self.parameters(), scale_parameter=False, relative_step=False, lr=0.01, weight_decay=1e-05)
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=1000, num_training_steps=40000)
```
<Tip>
DePlot ใฏใ`Pix2Struct`ใขใผใญใใฏใใฃใไฝฟ็จใใฆใใฌใผใใณใฐใใใใขใใซใงใใ API ใชใใกใฌใณในใซใคใใฆใฏใ[`Pix2Struct` ใใญใฅใกใณใ](pix2struct) ใๅ็
งใใฆใใ ใใใ
</Tip> | transformers/docs/source/ja/model_doc/deplot.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/deplot.md",
"repo_id": "transformers",
"token_count": 2027
} | 293 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Optimize inference using torch.compile()
ใใฎใฌใคใใฏใ[`torch.compile()`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) ใไฝฟ็จใใๆจ่ซ้ๅบฆใฎๅไธใซ้ขใใใใณใใใผใฏใๆไพใใใใจใ็ฎ็ใจใใฆใใพใใใใใฏใ[๐ค Transformers ใฎใณใณใใฅใผใฟใใธใงใณใขใใซ](https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending)ๅใใฎใใฎใงใใ
## Benefits of torch.compile
`torch.compile()`ใฎๅฉ็น
ใขใใซใจGPUใซใใฃใฆใฏใtorch.compile()ใฏๆจ่ซๆใซๆๅคง30%ใฎ้ซ้ๅใๅฎ็พใใพใใ `torch.compile()`ใไฝฟ็จใใใซใฏใใใผใธใงใณ2.0ไปฅไธใฎtorchใใคใณในใใผใซใใใ ใใงใใ
ใขใใซใฎใณใณใใคใซใซใฏๆ้ใใใใใใใๆฏๅๆจ่ซใใใฎใงใฏใชใใใขใใซใ1ๅบฆใ ใใณใณใใคใซใใๅ ดๅใซๅฝน็ซใกใพใใ
ไปปๆใฎใณใณใใฅใผใฟใใธใงใณใขใใซใใณใณใใคใซใใใซใฏใไปฅไธใฎใใใซใขใใซใซ`torch.compile()`ใๅผใณๅบใใพใ๏ผ
```diff
from transformers import AutoModelForImageClassification
model = AutoModelForImageClassification.from_pretrained(MODEL_ID).to("cuda")
+ model = torch.compile(model)
```
`compile()` ใฏใใณใณใใคใซใซ้ขใใ็ฐใชใใขใผใใๅใใฆใใใๅบๆฌ็ใซใฏใณใณใใคใซๆ้ใจๆจ่ซใฎใชใผใใผใใใใ็ฐใชใใพใใ`max-autotune` ใฏ `reduce-overhead` ใใใๆ้ใใใใใพใใใๆจ่ซ้ๅบฆใ้ใใชใใพใใใใใฉใซใใขใผใใฏใณใณใใคใซใซใใใฆใฏๆ้ใงใใใๆจ่ซๆ้ใซใใใฆใฏ `reduce-overhead` ใซๆฏในใฆๅน็ใ่ฏใใใใพใใใใใฎใฌใคใใงใฏใใใใฉใซใใขใผใใไฝฟ็จใใพใใใ่ฉณ็ดฐใซใคใใฆใฏใ[ใใกใ](https://pytorch.org/get-started/pytorch-2.0/#user-experience) ใๅ็
งใใฆใใ ใใใ
`torch` ใใผใธใงใณ 2.0.1 ใง็ฐใชใใณใณใใฅใผใฟใใธใงใณใขใใซใใฟในใฏใใใผใใฆใงใขใฎ็จฎ้กใใใใณใใใใตใคใบใไฝฟ็จใใฆ `torch.compile` ใใใณใใใผใฏใใพใใใ
## Benchmarking code
ไปฅไธใซใๅใฟในใฏใฎใใณใใใผใฏใณใผใใ็คบใใพใใๆจ่ซๅใซGPUใใฆใฉใผใ ใขใใใใๆฏๅๅใ็ปๅใไฝฟ็จใใฆ300ๅใฎๆจ่ซใฎๅนณๅๆ้ใๅๅพใใพใใ
### Image Classification with ViT
```python
from PIL import Image
import requests
import numpy as np
from transformers import AutoImageProcessor, AutoModelForImageClassification
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224").to("cuda")
model = torch.compile(model)
processed_input = processor(image, return_tensors='pt').to(device="cuda")
with torch.no_grad():
_ = model(**processed_input)
```
#### Object Detection with DETR
```python
from transformers import AutoImageProcessor, AutoModelForObjectDetection
processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to("cuda")
model = torch.compile(model)
texts = ["a photo of a cat", "a photo of a dog"]
inputs = processor(text=texts, images=image, return_tensors="pt").to("cuda")
with torch.no_grad():
_ = model(**inputs)
```
#### Image Segmentation with Segformer
```python
from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation
processor = SegformerImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to("cuda")
model = torch.compile(model)
seg_inputs = processor(images=image, return_tensors="pt").to("cuda")
with torch.no_grad():
_ = model(**seg_inputs)
```
ไปฅไธใฏใ็งใใกใใใณใใใผใฏใ่กใฃใใขใใซใฎใชในใใงใใ
**Image Classification**
- [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224)
- [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k)
- [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224)
- [microsoft/resnet-50](https://huggingface.co/)
**Image Segmentation**
- [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
- [facebook/mask2former-swin-tiny-coco-panoptic](https://huggingface.co/facebook/mask2former-swin-tiny-coco-panoptic)
- [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade)
- [google/deeplabv3_mobilenet_v2_1.0_513](https://huggingface.co/google/deeplabv3_mobilenet_v2_1.0_513)
**Object Detection**
- [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32)
- [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101)
- [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50)
ไปฅไธใฏใ`torch.compile()`ใไฝฟ็จใใๅ ดๅใจไฝฟ็จใใชใๅ ดๅใฎๆจ่ซๆ้ใฎๅฏ่ฆๅใจใ็ฐใชใใใผใใฆใงใขใจใใใใตใคใบใฎๅใขใใซใซๅฏพใใใใใฉใผใใณในๅไธใฎๅฒๅใงใใ
<div class="flex">
<div>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/a100_batch_comp.png" />
</div>
<div>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_batch_comp.png" />
</div>
<div>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/t4_batch_comp.png" />
</div>
</div>
<div class="flex">
<div>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_duration.png" />
</div>
<div>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_percentage.png" />
</div>
</div>


ไธ่จใฏใๅใขใใซใซใคใใฆ`compile()`ใไฝฟ็จใใๅ ดๅใจไฝฟ็จใใชใใฃใๅ ดๅใฎๆจ่ซๆ้๏ผใใช็งๅไฝ๏ผใงใใใชใใOwlViTใฏๅคงใใชใใใใตใคใบใงใฎไฝฟ็จๆใซใกใขใชไธ่ถณ๏ผOOM๏ผใ็บ็ใใใใจใซๆณจๆใใฆใใ ใใใ
### A100 (batch size: 1)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 9.325 | 7.584 |
| Image Segmentation/Segformer | 11.759 | 10.500 |
| Object Detection/OwlViT | 24.978 | 18.420 |
| Image Classification/BeiT | 11.282 | 8.448 |
| Object Detection/DETR | 34.619 | 19.040 |
| Image Classification/ConvNeXT | 10.410 | 10.208 |
| Image Classification/ResNet | 6.531 | 4.124 |
| Image Segmentation/Mask2former | 60.188 | 49.117 |
| Image Segmentation/Maskformer | 75.764 | 59.487 |
| Image Segmentation/MobileNet | 8.583 | 3.974 |
| Object Detection/Resnet-101 | 36.276 | 18.197 |
| Object Detection/Conditional-DETR | 31.219 | 17.993 |
### A100 (batch size: 4)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 14.832 | 14.499 |
| Image Segmentation/Segformer | 18.838 | 16.476 |
| Image Classification/BeiT | 13.205 | 13.048 |
| Object Detection/DETR | 48.657 | 32.418|
| Image Classification/ConvNeXT | 22.940 | 21.631 |
| Image Classification/ResNet | 6.657 | 4.268 |
| Image Segmentation/Mask2former | 74.277 | 61.781 |
| Image Segmentation/Maskformer | 180.700 | 159.116 |
| Image Segmentation/MobileNet | 14.174 | 8.515 |
| Object Detection/Resnet-101 | 68.101 | 44.998 |
| Object Detection/Conditional-DETR | 56.470 | 35.552 |
### A100 (batch size: 16)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 40.944 | 40.010 |
| Image Segmentation/Segformer | 37.005 | 31.144 |
| Image Classification/BeiT | 41.854 | 41.048 |
| Object Detection/DETR | 164.382 | 161.902 |
| Image Classification/ConvNeXT | 82.258 | 75.561 |
| Image Classification/ResNet | 7.018 | 5.024 |
| Image Segmentation/Mask2former | 178.945 | 154.814 |
| Image Segmentation/Maskformer | 638.570 | 579.826 |
| Image Segmentation/MobileNet | 51.693 | 30.310 |
| Object Detection/Resnet-101 | 232.887 | 155.021 |
| Object Detection/Conditional-DETR | 180.491 | 124.032 |
### V100 (batch size: 1)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 10.495 | 6.00 |
| Image Segmentation/Segformer | 13.321 | 5.862 |
| Object Detection/OwlViT | 25.769 | 22.395 |
| Image Classification/BeiT | 11.347 | 7.234 |
| Object Detection/DETR | 33.951 | 19.388 |
| Image Classification/ConvNeXT | 11.623 | 10.412 |
| Image Classification/ResNet | 6.484 | 3.820 |
| Image Segmentation/Mask2former | 64.640 | 49.873 |
| Image Segmentation/Maskformer | 95.532 | 72.207 |
| Image Segmentation/MobileNet | 9.217 | 4.753 |
| Object Detection/Resnet-101 | 52.818 | 28.367 |
| Object Detection/Conditional-DETR | 39.512 | 20.816 |
### V100 (batch size: 4)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 15.181 | 14.501 |
| Image Segmentation/Segformer | 16.787 | 16.188 |
| Image Classification/BeiT | 15.171 | 14.753 |
| Object Detection/DETR | 88.529 | 64.195 |
| Image Classification/ConvNeXT | 29.574 | 27.085 |
| Image Classification/ResNet | 6.109 | 4.731 |
| Image Segmentation/Mask2former | 90.402 | 76.926 |
| Image Segmentation/Maskformer | 234.261 | 205.456 |
| Image Segmentation/MobileNet | 24.623 | 14.816 |
| Object Detection/Resnet-101 | 134.672 | 101.304 |
| Object Detection/Conditional-DETR | 97.464 | 69.739 |
### V100 (batch size: 16)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 52.209 | 51.633 |
| Image Segmentation/Segformer | 61.013 | 55.499 |
| Image Classification/BeiT | 53.938 | 53.581 |
| Object Detection/DETR | OOM | OOM |
| Image Classification/ConvNeXT | 109.682 | 100.771 |
| Image Classification/ResNet | 14.857 | 12.089 |
| Image Segmentation/Mask2former | 249.605 | 222.801 |
| Image Segmentation/Maskformer | 831.142 | 743.645 |
| Image Segmentation/MobileNet | 93.129 | 55.365 |
| Object Detection/Resnet-101 | 482.425 | 361.843 |
| Object Detection/Conditional-DETR | 344.661 | 255.298 |
### T4 (batch size: 1)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 16.520 | 15.786 |
| Image Segmentation/Segformer | 16.116 | 14.205 |
| Object Detection/OwlViT | 53.634 | 51.105 |
| Image Classification/BeiT | 16.464 | 15.710 |
| Object Detection/DETR | 73.100 | 53.99 |
| Image Classification/ConvNeXT | 32.932 | 30.845 |
| Image Classification/ResNet | 6.031 | 4.321 |
| Image Segmentation/Mask2former | 79.192 | 66.815 |
| Image Segmentation/Maskformer | 200.026 | 188.268 |
| Image Segmentation/MobileNet | 18.908 | 11.997 |
| Object Detection/Resnet-101 | 106.622 | 82.566 |
| Object Detection/Conditional-DETR | 77.594 | 56.984 |
### T4 (batch size: 4)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 43.653 | 43.626 |
| Image Segmentation/Segformer | 45.327 | 42.445 |
| Image Classification/BeiT | 52.007 | 51.354 |
| Object Detection/DETR | 277.850 | 268.003 |
| Image Classification/ConvNeXT | 119.259 | 105.580 |
| Image Classification/ResNet | 13.039 | 11.388 |
| Image Segmentation/Mask2former | 201.540 | 184.670 |
| Image Segmentation/Maskformer | 764.052 | 711.280 |
| Image Segmentation/MobileNet | 74.289 | 48.677 |
| Object Detection/Resnet-101 | 421.859 | 357.614 |
| Object Detection/Conditional-DETR | 289.002 | 226.945 |
### T4 (batch size: 16)
| **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|
| Image Classification/ViT | 163.914 | 160.907 |
| Image Segmentation/Segformer | 192.412 | 163.620 |
| Image Classification/BeiT | 188.978 | 187.976 |
| Object Detection/DETR | OOM | OOM |
| Image Classification/ConvNeXT | 422.886 | 388.078 |
| Image Classification/ResNet | 44.114 | 37.604 |
| Image Segmentation/Mask2former | 756.337 | 695.291 |
| Image Segmentation/Maskformer | 2842.940 | 2656.88 |
| Image Segmentation/MobileNet | 299.003 | 201.942 |
| Object Detection/Resnet-101 | 1619.505 | 1262.758 |
| Object Detection/Conditional-DETR | 1137.513 | 897.390|
## PyTorch Nightly
ใพใใPyTorchใฎใใคใใชใผใใผใธใงใณ๏ผ2.1.0dev๏ผใงใฎใใณใใใผใฏใ่กใใใณใณใใคใซใใใฆใใชใใขใใซใจใณใณใใคใซๆธใฟใขใใซใฎไธกๆนใงใฌใคใใณใทใผใฎๅไธใ่ฆณๅฏใใพใใใใใคใผใซใฏ[ใใกใ](https://download.pytorch.org/whl/nightly/cu118)ใใๅ
ฅๆใงใใพใใ
### A100
| **Task/Model** | **Batch Size** | **torch 2.0 - no compile** | **torch 2.0 -<br> compile** |
|:---:|:---:|:---:|:---:|
| Image Classification/BeiT | Unbatched | 12.462 | 6.954 |
| Image Classification/BeiT | 4 | 14.109 | 12.851 |
| Image Classification/BeiT | 16 | 42.179 | 42.147 |
| Object Detection/DETR | Unbatched | 30.484 | 15.221 |
| Object Detection/DETR | 4 | 46.816 | 30.942 |
| Object Detection/DETR | 16 | 163.749 | 163.706 |
### T4
| **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|:---:|
| Image Classification/BeiT | Unbatched | 14.408 | 14.052 |
| Image Classification/BeiT | 4 | 47.381 | 46.604 |
| Image Classification/BeiT | 16 | 42.179 | 42.147 |
| Object Detection/DETR | Unbatched | 68.382 | 53.481 |
| Object Detection/DETR | 4 | 269.615 | 204.785 |
| Object Detection/DETR | 16 | OOM | OOM |
### V100
| **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|:---:|
| Image Classification/BeiT | Unbatched | 13.477 | 7.926 |
| Image Classification/BeiT | 4 | 15.103 | 14.378 |
| Image Classification/BeiT | 16 | 52.517 | 51.691 |
| Object Detection/DETR | Unbatched | 28.706 | 19.077 |
| Object Detection/DETR | 4 | 88.402 | 62.949|
| Object Detection/DETR | 16 | OOM | OOM |
## Reduce Overhead
NightlyใใซใใงA100ใใใณT4ๅใใฎ `reduce-overhead` ใณใณใใคใซใขใผใใใใณใใใผใฏใใพใใใ
### A100
| **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|:---:|
| Image Classification/ConvNeXT | Unbatched | 11.758 | 7.335 |
| Image Classification/ConvNeXT | 4 | 23.171 | 21.490 |
| Image Classification/ResNet | Unbatched | 7.435 | 3.801 |
| Image Classification/ResNet | 4 | 7.261 | 2.187 |
| Object Detection/Conditional-DETR | Unbatched | 32.823 | 11.627 |
| Object Detection/Conditional-DETR | 4 | 50.622 | 33.831 |
| Image Segmentation/MobileNet | Unbatched | 9.869 | 4.244 |
| Image Segmentation/MobileNet | 4 | 14.385 | 7.946 |
### T4
| **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** |
|:---:|:---:|:---:|:---:|
| Image Classification/ConvNeXT | Unbatched | 32.137 | 31.84 |
| Image Classification/ConvNeXT | 4 | 120.944 | 110.209 |
| Image Classification/ResNet | Unbatched | 9.761 | 7.698 |
| Image Classification/ResNet | 4 | 15.215 | 13.871 |
| Object Detection/Conditional-DETR | Unbatched | 72.150 | 57.660 |
| Object Detection/Conditional-DETR | 4 | 301.494 | 247.543 |
| Image Segmentation/MobileNet | Unbatched | 22.266 | 19.339 |
| Image Segmentation/MobileNet | 4 | 78.311 | 50.983 |
| transformers/docs/source/ja/perf_torch_compile.md/0 | {
"file_path": "transformers/docs/source/ja/perf_torch_compile.md",
"repo_id": "transformers",
"token_count": 6636
} | 294 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Train with a script
๐ค Transformersใฎ[notebooks](./notebooks/README)ใจไธ็ทใซใ[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)ใ[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)ใใพใใฏ[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)ใไฝฟ็จใใฆใขใใซใใใฌใผใใณใฐใใๆนๆณใ็คบใใตใณใใซในใฏใชใใใใใใพใใ
ใพใใ็งใใกใฎ[็ ็ฉถใใญใธใงใฏใ](https://github.com/huggingface/transformers/tree/main/examples/research_projects)ใ[ใฌใฌใทใผใฎไพ](https://github.com/huggingface/transformers/tree/main/examples/legacy)ใงไฝฟ็จใใในใฏใชใใใ่ฆใคใใใพใใใใใใฎในใฏใชใใใฏ็พๅจใกใณใใใณในใใใฆใใใใใใใใๆๆฐใใผใธใงใณใฎใฉใคใใฉใชใจไบๆๆงใใชใ็นๅฎใฎ๐ค Transformersใฎใใผใธใงใณใๅฟ
่ฆใงใใ
ใตใณใใซในใฏใชใใใฏใในใฆใฎๅ้กใงใใฎใพใพๅไฝใใใใจใฏๆๅพ
ใใใฆใใใใ่งฃๆฑบใใใใจใใฆใใๅ้กใซในใฏใชใใใ้ฉๅฟใใใๅฟ
่ฆใใใใใใใใพใใใใใฎ็นใใตใใผใใใใใใซใใปใจใใฉใฎในใฏใชใใใฏใใผใฟใใฉใฎใใใซๅๅฆ็ใใใฆใใใใๅฎๅ
จใซๅ
ฌ้ใใๅฟ
่ฆใซๅฟใใฆ็ทจ้ใงใใใใใซใใฆใใพใใ
ใตใณใใซในใฏใชใใใงๅฎ่ฃ
ใใใๆฉ่ฝใใใๅ ดๅใฏใ[ใใฉใผใฉใ ](https://discuss.huggingface.co/)ใ[ใคใทใฅใผใใฉใใซใผ](https://github.com/huggingface/transformers/issues)ใง่ญฐ่ซใใฆใใใใซใชใฏใจในใใๆๅบใใฆใใ ใใใใใฐไฟฎๆญฃใฏๆญ่ฟใใพใใใ่ชญใฟใใใใฎใณในใใงๆฉ่ฝใ่ฟฝๅ ใใใใซใชใฏใจในใใฏใปใจใใฉใใผใธใใใชใๅฏ่ฝๆงใ้ซใใงใใ
ใใฎใฌใคใใงใฏใ[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization)ใจ[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization)ใงๅฎ่กใใใตใใชใผใผใทใงใณใใฌใผใใณใฐในใฏใชใใใฎๅฎ่กๆนๆณใ็คบใใพใใใในใฆใฎไพใฏใๆ็คบ็ใซๆๅฎใใใฆใใชใ้ใใไธกๆนใฎใใฌใผใ ใฏใผใฏใจใใซๅไฝใใใใจใๆๅพ
ใใใฆใใพใใ
## Setup
ๆๆฐใใผใธใงใณใฎใตใณใใซในใฏใชใใใๆญฃๅธธใซๅฎ่กใใใซใฏใๆฐใใไปฎๆณ็ฐๅขใซ๐ค Transformersใใฝใผในใใใคใณในใใผใซใใๅฟ
่ฆใใใใพใ:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
ไปฅๅใฎในใฏใชใใใฎใใผใธใงใณใซใคใใฆใฏใไปฅไธใฎใใฐใซใใฏใชใใฏใใฆใใ ใใ๏ผ
<details>
<summary>ไปฅๅใฎ๐ค Transformersใฎใใผใธใงใณใซ้ขใใไพ</summary>
<ul>
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
</ul>
</details>
ๆฌกใซใ็พๅจใฎ๐ค Transformersใฎใฏใญใผใณใ็นๅฎใฎใใผใธใงใณใซๅใๆฟใใฆใใ ใใใใใจใใฐใv3.5.1ใชใฉใงใใ
```bash
git checkout tags/v3.5.1
```
้ฉๅใชใฉใคใใฉใชใใผใธใงใณใ่จญๅฎใใใใไปปๆใฎไพใฎใใฉใซใใซ็งปๅใใไพๅบๆใฎ่ฆไปถใใคใณในใใผใซใใพใ๏ผ
```bash
pip install -r requirements.txt
```
## Run a script
<frameworkcontent>
<pt>
ใใฎไพใฎในใฏใชใใใฏใ๐ค [Datasets](https://huggingface.co/docs/datasets/) ใฉใคใใฉใชใใใใผใฟใปใใใใใฆใณใญใผใใใๅๅฆ็ใ่กใใพใใๆฌกใซใ[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) ใไฝฟ็จใใฆ่ฆ็ดใใตใใผใใใใขใผใญใใฏใใฃไธใงใใผใฟใปใใใใใกใคใณใใฅใผใใณใฐใใพใใไปฅไธใฎไพใงใฏใ[CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) ใใผใฟใปใใไธใง [T5-small](https://huggingface.co/google-t5/t5-small) ใใใกใคใณใใฅใผใใณใฐใใๆนๆณใ็คบใใใฆใใพใใT5 ใขใใซใฏใใใฎใใฌใผใใณใฐๆนๆณใซ่ตทๅ ใใฆ่ฟฝๅ ใฎ `source_prefix` ๅผๆฐใๅฟ
่ฆใงใใใใฎใใญใณใใใซใใใT5 ใฏใใใ่ฆ็ดใฟในใฏใงใใใใจใ็ฅใใใจใใงใใพใใ
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
ใใฎไพใฎในใฏใชใใใฏใ๐ค [Datasets](https://huggingface.co/docs/datasets/) ใฉใคใใฉใชใใใใผใฟใปใใใใใฆใณใญใผใใใฆๅๅฆ็ใใพใใใใฎๅพใในใฏใชใใใฏ่ฆ็ดใใตใใผใใใใขใผใญใใฏใใฃไธใง Keras ใไฝฟ็จใใฆใใผใฟใปใใใใใกใคใณใใฅใผใใณใฐใใพใใไปฅไธใฎไพใงใฏใ[T5-small](https://huggingface.co/google-t5/t5-small) ใ [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) ใใผใฟใปใใใงใใกใคใณใใฅใผใใณใฐใใๆนๆณใ็คบใใฆใใพใใT5 ใขใใซใฏใใใฎใใฌใผใใณใฐๆนๆณใซ่ตทๅ ใใฆ่ฟฝๅ ใฎ `source_prefix` ๅผๆฐใๅฟ
่ฆใงใใใใฎใใญใณใใใฏใT5 ใซใใใ่ฆ็ดใฟในใฏใงใใใใจใ็ฅใใใพใใ
```bash
python examples/tensorflow/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Distributed training and mixed precision
[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer)ใฏใๅๆฃใใฌใผใใณใฐใจๆททๅ็ฒพๅบฆใใตใใผใใใฆใใพใใใคใพใใใใฎๆฉ่ฝใในใฏใชใใใงไฝฟ็จใใใใจใใงใใพใใใใใใฎๆฉ่ฝใๆๅนใซใใใซใฏใๆฌกใฎๆ้ ใๅฎ่กใใพใใ
- `fp16`ๅผๆฐใ่ฟฝๅ ใใฆๆททๅ็ฒพๅบฆใๆๅนใซใใพใใ
- `nproc_per_node`ๅผๆฐใงไฝฟ็จใใGPUใฎๆฐใ่จญๅฎใใพใใ
ไปฅไธใฏๆไพใใใBashใณใผใใงใใใใฎใณใผใใฎๆฅๆฌ่ช่จณใMarkdownๅฝขๅผใง่จ่ผใใพใใ
```bash
torchrun \
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
--fp16 \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
TensorFlowในใฏใชใใใฏใๅๆฃใใฌใผใใณใฐใซ[`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy)ใไฝฟ็จใใใใฌใผใใณใฐในใฏใชใใใซ่ฟฝๅ ใฎๅผๆฐใ่ฟฝๅ ใใๅฟ
่ฆใฏใใใพใใใTensorFlowในใฏใชใใใฏใใใใฉใซใใง่คๆฐใฎGPUใๅฉ็จๅฏ่ฝใชๅ ดๅใซใใใใไฝฟ็จใใพใใ
## Run a script on a TPU
<frameworkcontent>
<pt>
Tensor Processing Units (TPUs)ใฏใใใใฉใผใใณในใๅ ้ใใใใใใซ็นๅฅใซ่จญ่จใใใฆใใพใใPyTorchใฏใ[XLA](https://www.tensorflow.org/xla)ใใฃใผใใฉใผใใณใฐใณใณใใคใฉใไฝฟ็จใใฆTPUsใใตใใผใใใฆใใใ่ฉณ็ดฐใซใคใใฆใฏ[ใใกใ](https://github.com/pytorch/xla/blob/master/README.md)ใใ่ฆงใใ ใใใTPUใไฝฟ็จใใใซใฏใ`xla_spawn.py`ในใฏใชใใใ่ตทๅใใ`num_cores`ๅผๆฐใไฝฟ็จใใฆไฝฟ็จใใTPUใณใขใฎๆฐใ่จญๅฎใใพใใ
```bash
python xla_spawn.py --num_cores 8 \
summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
ใใกใใใTensor Processing Units๏ผTPUs๏ผใฏๆง่ฝใ้ซ้ๅใใใใใซ็นๅฅใซ่จญ่จใใใฆใใพใใTensorFlowในใฏใชใใใฏใTPUsใงใใฌใผใใณใฐใใใใใซ[`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy)ใๅฉ็จใใพใใTPUใไฝฟ็จใใใซใฏใTPUใชใฝใผในใฎๅๅใ`tpu`ๅผๆฐใซๆธกใใพใใ
```bash
python run_summarization.py \
--tpu name_of_tpu_resource \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Run a script with ๐ค Accelerate
๐ค [Accelerate](https://huggingface.co/docs/accelerate)ใฏใPyTorchๅฐ็จใฎใฉใคใใฉใชใงใCPUใฎใฟใ่คๆฐใฎGPUใTPUใชใฉใใใพใใพใชใปใใใขใใใงใขใใซใใใฌใผใใณใฐใใใใใฎ็ตฑไธใใใๆนๆณใๆไพใใพใใPyTorchใฎใใฌใผใใณใฐใซใผใใๅฎๅ
จใซๅฏ่ฆๅใใชใใๅฎ่กใงใใพใใใพใ ใคใณในใใผใซใใฆใใชใๅ ดๅใฏใ๐ค Accelerateใใคใณในใใผใซใใฆใใ ใใ๏ผ
> ๆณจๆ๏ผAccelerateใฏๆฅ้ใซ้็บใ้ฒ่กใใฆใใใใใในใฏใชใใใๅฎ่กใใใซใฏaccelerateใฎgitใใผใธใงใณใใคใณในใใผใซใใๅฟ
่ฆใใใใพใ
```bash
pip install git+https://github.com/huggingface/accelerate
```
ไปฃใใใซใ`run_summarization_no_trainer.py` ในใฏใชใใใไฝฟ็จใใๅฟ
่ฆใใใใพใใ ๐ค Accelerate ใใตใใผใใใในใฏใชใใใซใฏใใใฉใซใๅ
ใซ `task_no_trainer.py` ใใกใคใซใๅซใพใใฆใใพใใใพใใๆฌกใฎใณใใณใใๅฎ่กใใฆ่จญๅฎใใกใคใซใไฝๆใใไฟๅญใใพใ๏ผ
```bash
accelerate config
```
ใในใใ่กใใ่จญๅฎใๆญฃใใๆงๆใใใฆใใใ็ขบ่ชใใฆใใ ใใ๏ผ
```bash
accelerate test
```
Now you are ready to launch the training:
```bash
accelerate launch run_summarization_no_trainer.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir ~/tmp/tst-summarization
```
## Use a custom dataset
่ฆ็ดในใฏใชใใใฏใCSVใพใใฏJSON Lineใใกใคใซใงใใใฐใใซในใฟใ ใใผใฟใปใใใใตใใผใใใฆใใพใใ็ฌ่ชใฎใใผใฟใปใใใไฝฟ็จใใๅ ดๅใใใใคใใฎ่ฟฝๅ ใฎๅผๆฐใๆๅฎใใๅฟ
่ฆใใใใพใใ
- `train_file`ใใใณ`validation_file`ใฏใใใฌใผใใณใฐใจใใชใใผใทใงใณใฎใใกใคใซใธใฎใในใๆๅฎใใพใใ
- `text_column`ใฏ่ฆ็ดใใใใใฎๅ
ฅๅใใญในใใงใใ
- `summary_column`ใฏๅบๅใใๅฏพ่ฑกใใญในใใงใใ
ใซในใฟใ ใใผใฟใปใใใไฝฟ็จใใ่ฆ็ดในใฏใชใใใฏใไปฅไธใฎใใใซใชใใพใ๏ผ
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--train_file path_to_csv_or_jsonlines_file \
--validation_file path_to_csv_or_jsonlines_file \
--text_column text_column_name \
--summary_column summary_column_name \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--overwrite_output_dir \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## Test a script
ใในใฆใไบๆณ้ใใซๅไฝใใใใจใ็ขบ่ชใใใใใซใใใผใฟใปใใๅ
จไฝใๅฆ็ใใๅใซใใใผใฟใปใใใฎไธ้จใฎไพใงในใฏใชใใใๅฎ่กใใใใจใฏ่ฏใใขใคใใขใงใใไปฅไธใฎๅผๆฐใไฝฟ็จใใฆใใใผใฟใปใใใๆๅคงใตใณใใซๆฐใซๅใ่ฉฐใใพใ๏ผ
- `max_train_samples`
- `max_eval_samples`
- `max_predict_samples`
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--max_train_samples 50 \
--max_eval_samples 50 \
--max_predict_samples 50 \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
ไธ้จใฎไพใฎในใฏใชใใใฏใ`max_predict_samples`ๅผๆฐใใตใใผใใใฆใใชใใใจใใใใพใใใใฎๅผๆฐใใตใใผใใใใฆใใใใฉใใใใใใใชใๅ ดๅใฏใ`-h`ๅผๆฐใ่ฟฝๅ ใใฆ็ขบ่ชใใฆใใ ใใใ
```bash
examples/pytorch/summarization/run_summarization.py -h
```
## Resume training from checkpoint
ไปฅๅใฎใใงใใฏใใคใณใใใใใฌใผใใณใฐใๅ้ใใใใใฎๅฝน็ซใคใชใใทใงใณใใใใพใใใใใซใใใใใฌใผใใณใฐใไธญๆญใใใๅ ดๅใงใใๆๅใใใใ็ดใใใจใชใใไธญๆญใใใจใใใใๅ้ใงใใพใใใใงใใฏใใคใณใใใใใฌใผใใณใฐใๅ้ใใใใใฎ2ใคใฎๆนๆณใใใใพใใ
ๆๅใฎๆนๆณใฏใ`output_dir previous_output_dir` ๅผๆฐใไฝฟ็จใใฆใ`output_dir` ใซไฟๅญใใใๆๆฐใฎใใงใใฏใใคใณใใใใใฌใผใใณใฐใๅ้ใใๆนๆณใงใใใใฎๅ ดๅใ`overwrite_output_dir` ใๅ้คใใๅฟ
่ฆใใใใพใ๏ผ
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--output_dir previous_output_dir \
--predict_with_generate
```
2็ช็ฎใฎๆนๆณใงใฏใ`resume_from_checkpoint path_to_specific_checkpoint` ๅผๆฐใไฝฟ็จใใฆใ็นๅฎใฎใใงใใฏใใคใณใใใฉใซใใใใใฌใผใใณใฐใๅ้ใใพใใ
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--resume_from_checkpoint path_to_specific_checkpoint \
--predict_with_generate
```
## Share your model
ใในใฆใฎในใฏใชใใใฏใๆ็ต็ใชใขใใซใ [Model Hub](https://huggingface.co/models) ใซใขใใใญใผใใงใใพใใ้ๅงใใๅใซ Hugging Face ใซใญใฐใคใณใใฆใใใใจใ็ขบ่ชใใฆใใ ใใใ
```bash
huggingface-cli login
```
ๆฌกใซใในใฏใชใใใซ `push_to_hub` ๅผๆฐใ่ฟฝๅ ใใพใใใใฎๅผๆฐใฏใHugging Face ใฎใฆใผใถใผๅใจ `output_dir` ใงๆๅฎใใใใฉใซใๅใงใชใใธใใชใไฝๆใใพใใ
็นๅฎใฎๅๅใใชใใธใใชใซไปใใใซใฏใ`push_to_hub_model_id` ๅผๆฐใไฝฟ็จใใฆ่ฟฝๅ ใใพใใใใฎใชใใธใใชใฏ่ชๅ็ใซใใชใใฎๅๅ็ฉบ้ใฎไธใซใชในใใใใพใใ
ไปฅไธใฎไพใฏใ็นๅฎใฎใชใใธใใชๅใงใขใใซใใขใใใญใผใใใๆนๆณใ็คบใใฆใใพใ:
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--push_to_hub \
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
| transformers/docs/source/ja/run_scripts.md/0 | {
"file_path": "transformers/docs/source/ja/run_scripts.md",
"repo_id": "transformers",
"token_count": 8250
} | 295 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LLM prompting guide
[[open-in-colab]]
FalconใLLaMA ใชใฉใฎๅคง่ฆๆจก่จ่ชใขใใซใฏใไบๅใซใใฌใผใใณใฐใใใใใฉใณในใใฉใผใใผ ใขใใซใงใใใๆๅใฏไบๆธฌใใใใใซใใฌใผใใณใฐใใใฆใใพใใ
ๅ
ฅๅใใญในใใไธใใใใๅ ดๅใฎๆฌกใฎใใผใฏใณใ้ๅธธใๆฐๅๅใฎใใฉใกใผใฟใใใใไฝๅ
ใใฎใใฉใกใผใฟใงใใฌใผใใณใฐใใใฆใใพใใ
้ทๆ้ใฎใใผใฏใณใใใฎ็ตๆใใใใใฎใขใใซใฏ้ๅธธใซๅผทๅใงๅค็จ้ใซใชใใๆฌกใฎใใใชใใจใๅฏ่ฝใซใชใใพใใ
่ช็ถ่จ่ชใใญใณใใใงใขใใซใซๆ็คบใใใใจใงใใใใซ่คๆฐใฎ NLP ใฟในใฏใ่งฃๆฑบใงใใพใใ
ๆ้ฉใชๅบๅใไฟ่จผใใใใใซใใฎใใใชใใญใณใใใ่จญ่จใใใใจใฏใๅคใใฎๅ ดๅใใใญใณใใ ใจใณใธใใขใชใณใฐใใจๅผใฐใใพใใใใญใณใใใจใณใธใใขใชใณใฐใจใฏใ
ใใชใใฎ้ใฎๅฎ้จใๅฟ
่ฆใจใใๅๅพฉใใญใปในใ่ช็ถ่จ่ชใฏใฏใใใซๆ่ปใง่กจ็พๅ่ฑใใงใ
ใใ ใใใใญใฐใฉใใณใฐ่จ่ชใใใใใใพใใใ็ใใๅฏ่ฝๆงใใใใพใใๅๆใซใ่ช็ถ่จ่ชใซใใใใญใณใใ
ๅคๅใซใฏใใชใๆๆใงใใใใญใณใใใซใใใใชๅคๆดใๅ ใใใ ใใงใใๅบๅใๅคงๅน
ใซ็ฐใชใๅ ดๅใใใใพใใ
ใในใฆใฎใฑใผในใซ้ฉๅใใใใญใณใใใไฝๆใใใใใฎๆญฃ็ขบใชใฌใทใใฏใใใพใใใใ็ ็ฉถ่
ใฏใใใคใใฎๆ่ฏใฎใฌใทใใ่ๆกใใพใใใ
ๆ้ฉใช็ตๆใใใไธ่ฒซใใฆ้ๆใใใฎใซๅฝน็ซใคๅฎ่ทตใ
ใใฎใฌใคใใงใฏใใใๅชใใ LLM ใใญใณใใใไฝๆใใใใพใใพใช NLP ใฟในใฏใ่งฃๆฑบใใใฎใซๅฝน็ซใคใใญใณใใ ใจใณใธใใขใชใณใฐใฎใในใ ใใฉใฏใใฃในใซใคใใฆ่ชฌๆใใพใใ
ๆฌกใฎใใจใๅญฆใณใพใ:
- [ใใญใณใใใฎๅบๆฌ](#basics-of-prompting)
- [LLM ใใญใณใใใฎใในใ ใใฉใฏใใฃใน](#best-practices-of-llm-prompting)
- [้ซๅบฆใชใใญใณใใ ใใฏใใใฏ: ๆฐๅใฎใใญใณใใใจๆ่ใฎ้ฃ้](#advanced-prompting-techniques)
- [ใใญใณใใใ่กจ็คบใใไปฃใใใซๅพฎ่ชฟๆดใใๅ ดๅ](#prompting-vs-fine-tuning)
<Tip>
่ฟ
้ใชใจใณใธใใขใชใณใฐใฏใLLM ๅบๅๆ้ฉๅใใญใปในใฎไธ้จใซใใใพใใใใใ 1 ใคใฎ้่ฆใช่ฆ็ด ใฏใ
ๆ้ฉใชใใญในใ็ๆๆฆ็ฅใ LLM ใ็ๆๆใซๅพ็ถใฎๅใใผใฏใณใ้ธๆใใๆนๆณใใซในใฟใใคใบใงใใพใใ
ใใฌใผใใณใฐๅฏ่ฝใชใใฉใกใผใฟใไธๅๅคๆดใใใซใใญในใใไฝๆใใพใใใใญในใ็ๆใใฉใกใผใฟใๅพฎ่ชฟๆดใใใใจใงใ
็ๆใใใใใญในใใซ็นฐใ่ฟใใๅซใพใใฆใใใใใใใไธ่ฒซๆงใใใไบบ้ใใใ้ฟใใซใชใใพใใ
ใใญในใ็ๆๆฆ็ฅใจใใฉใกใผใฟใผใฏใใฎใฌใคใใฎ็ฏๅฒๅคใงใใใใใใใฎใใใใฏใซใคใใฆ่ฉณใใใฏใๆฌกใฎใใใใฏใๅ็
งใใฆใใ ใใใ
ๆฌกใฎใฌใคใ:
* [LLM ใซใใ็ๆ](../llm_tutorial)
* [ใใญในใ็ๆๆฆ็ฅ](../generation_strategies)
</Tip>
## Basics of prompting
### Types of models
ๆๆฐใฎ LLM ใฎๅคง้จๅใฏใใใณใผใๅฐ็จใฎใใฉใณในใใฉใผใใผใงใใไพใจใใฆใฏใ[LLaMA](../model_doc/llama)ใ
[Llama2](../model_doc/llama2)ใ[Falcon](../model_doc/falcon)ใ[GPT2](../model_doc/gpt2)ใใใ ใใ้ญ้ใใๅฏ่ฝๆงใใใใพใ
ใจใณใณใผใ ใใณใผใ ใใฉใณในใใฉใผใ LLM ใๅๆงใงใใใใจใใฐใ[Flan-T5](../model_doc/flan-t5) ใ [BART](../model_doc/bart) ใงใใ
ใจใณใณใผใ ใใณใผใ ในใฟใคใซใฎใขใใซใฏ้ๅธธใๅบๅใๅ
ฅๅใซ**ๅคงใใ**ไพๅญใใ็ๆใฟในใฏใงไฝฟ็จใใใพใใ
ใใจใใฐใ็ฟป่จณใจ่ฆ็ดใงใใใใณใผใๅฐ็จใขใใซใฏใไปใฎใในใฆใฎใฟใคใใฎ็ๆใฟในใฏใซไฝฟ็จใใใพใใ
ใใคใใฉใคใณใไฝฟ็จใใฆ LLM ใงใใญในใใ็ๆใใๅ ดๅใไฝฟ็จใใฆใใ LLM ใฎใฟใคใใ็ฅใใใจใ้่ฆใงใใ
็ฐใชใใใคใใฉใคใณใไฝฟ็จใใพใใ
`text-generation`ใใคใใฉใคใณใไฝฟ็จใใฆใใณใผใใฎใฟใฎใขใใซใงๆจ่ซใๅฎ่กใใพใใ
```python
>>> from transformers import pipeline
>>> import torch
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> generator = pipeline('text-generation', model = 'openai-community/gpt2')
>>> prompt = "Hello, I'm a language model"
>>> generator(prompt, max_length = 30)
[{'generated_text': "Hello, I'm a language model expert, so I'm a big believer in the concept that I know very well and then I try to look into"}]
```
ใจใณใณใผใใผ/ใใณใผใใผใไฝฟ็จใใฆๆจ่ซใๅฎ่กใใใซใฏใ`text2text-generation` ใใคใใฉใคใณใไฝฟ็จใใพใใ
```python
>>> text2text_generator = pipeline("text2text-generation", model = 'google/flan-t5-base')
>>> prompt = "Translate from English to French: I'm very happy to see you"
>>> text2text_generator(prompt)
[{'generated_text': 'Je suis trรจs heureuse de vous rencontrer.'}]
```
### Base vs instruct/chat models
๐ค Hub ใงๅฉ็จใงใใๆ่ฟใฎ LLM ใใงใใฏใใคใณใใฎใปใจใใฉใซใฏใbase ใจ instruct (ใพใใฏ chat) ใฎ 2 ใคใฎใใผใธใงใณใใใใพใใไพใใฐใ
[`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b) ใใใณ [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b) -ๆ็คบใใ)ใ
ๅบๆฌใขใใซใฏใๆๅใฎใใญใณใใใไธใใใใใจใใซใใญในใใๅฎๆใใใใฎใซใฏๅชใใฆใใพใใใNLP ใฟในใฏใซใฏ็ๆณ็ใงใฏใใใพใใใ
ๆ็คบใซๅพใๅฟ
่ฆใใใๅ ดๅใใพใใฏไผ่ฉฑใงไฝฟ็จใใๅ ดๅใซไฝฟ็จใใพใใใใใงใๆ็คบ (ใใฃใใ) ใใผใธใงใณใ็ปๅ ดใใพใใ
ใใใใฎใใงใใฏใใคใณใใฏใๅฝไปคใจไผ่ฉฑใใผใฟใซๅบใฅใใฆไบๅใใฌใผใใณใฐใใใใใผใน ใใผใธใงใณใใใใซๅพฎ่ชฟๆดใใ็ตๆใงใใ
ใใฎ่ฟฝๅ ใฎๅพฎ่ชฟๆดใซใใใๅคใใฎ NLP ใฟในใฏใซใจใฃใฆใใ้ฉๅใช้ธๆ่ขใซใชใใพใใ
[`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct) ใงไฝฟ็จใงใใใใใคใใฎ็ฐกๅใชใใญใณใใใ็คบใใฆใฟใพใใใใ
ใใใคใใฎไธ่ฌ็ใช NLP ใฟในใฏใ่งฃๆฑบใใพใใ
### NLP tasks
ใพใใ็ฐๅขใใปใใใขใใใใพใใใใ
```bash
pip install -q transformers accelerate
```
ๆฌกใซใ้ฉๅใชใใคใใฉใคใณ (`text_generation`) ใไฝฟ็จใใฆใขใใซใใญใผใใใพใใใใ
```python
>>> from transformers import pipeline, AutoTokenizer
>>> import torch
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> model = "tiiuae/falcon-7b-instruct"
>>> tokenizer = AutoTokenizer.from_pretrained(model)
>>> pipe = pipeline(
... "text-generation",
... model=model,
... tokenizer=tokenizer,
... torch_dtype=torch.bfloat16,
... device_map="auto",
... )
```
<Tip>
Falcon ใขใใซใฏ `bfloat16` ใใผใฟๅใไฝฟ็จใใฆใใฌใผใใณใฐใใใใใใๅใใใฎใไฝฟ็จใใใใจใใๅงใใใพใใใใใซใฏใๆ่ฟใฎ
CUDA ใฎใใผใธใงใณใซๆบๆ ใใฆใใใๆๆฐใฎใซใผใใงๆ้ฉใซๅไฝใใพใใ
</Tip>
ใใคใใฉใคใณ็ต็ฑใงใขใใซใใญใผใใใใฎใงใใใญใณใใใไฝฟ็จใใฆ NLP ใฟในใฏใ่งฃๆฑบใใๆนๆณใ่ฆใฆใฟใพใใใใ
#### Text classification
ใใญในใๅ้กใฎๆใไธ่ฌ็ใชๅฝขๅผใฎ 1 ใคใฏใปใณใใกใณใๅๆใงใใใใใใธใใฃใใใใใใฌใใฃใใใใใใฌใใฃใใใชใฉใฎใฉใใซใๅฒใๅฝใฆใพใใ
ใพใใฏใไธ้ฃใฎใใญในใใซๅฏพใใฆใไธญ็ซใใงใใไธใใใใใใญในใ (ๆ ็ปใฌใใฅใผ) ใๅ้กใใใใใซใขใใซใซๆ็คบใใใใญใณใใใไฝๆใใฆใฟใพใใใใ
ใพใๆ็คบใไธใใๆฌกใซๅ้กใใใใญในใใๆๅฎใใพใใใใฎใพใพใซใใฆใใใฎใงใฏใชใใ
ๅฟ็ญใฎๅ
้ ญใซใ่ฟฝๅ ใใพใ - `"Sentiment: "`:
```python
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> prompt = """Classify the text into neutral, negative or positive.
... Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen.
... Sentiment:
... """
>>> sequences = pipe(
... prompt,
... max_new_tokens=10,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result: Classify the text into neutral, negative or positive.
Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen.
Sentiment:
Positive
```
ใใฎ็ตๆใๅบๅใซใฏใๆ้ ใงๆไพใใใชในใใฎๅ้กใฉใใซใๅซใพใใฆใใใใใใฏๆญฃใใใฉใใซใงใใ
<Tip>
ใใญใณใใใซๅ ใใฆใ`max_new_tokens`ใใฉใกใผใฟใๆธกใใฆใใใใจใซๆฐใฅใใใใใใพใใใใใผใฏใณใฎๆฐใๅถๅพกใใพใใ
ใขใใซใ็ๆใใพใใใใใฏใๅญฆ็ฟใงใใๅคใใฎใใญในใ็ๆใใฉใกใผใฟใผใฎ 1 ใคใงใใ
[ใใญในใ็ๆๆฆ็ฅ](../generation_strategies) ใฌใคใใๅ็
งใใฆใใ ใใใ
</Tip>
#### Named Entity Recognition
ๅบๆ่กจ็พ่ช่ญ (NER) ใฏใใใญในใๅ
ใฎไบบ็ฉใๅ ดๆใ็ต็นใชใฉใฎๅบๆ่กจ็พใๆค็ดขใใใฟในใฏใงใใ
ใใญใณใใใฎๆ็คบใๅคๆดใใฆใLLM ใซใใฎใฟในใฏใๅฎ่กใใใพใใใใใใใงใฏ`return_full_text = False`ใ่จญๅฎใใพใใใ
ๅบๅใซใใญใณใโโใใๅซโโใพใใชใใใใซใใพใใ
```python
>>> torch.manual_seed(1) # doctest: +IGNORE_RESULT
>>> prompt = """Return a list of named entities in the text.
... Text: The Golden State Warriors are an American professional basketball team based in San Francisco.
... Named entities:
... """
>>> sequences = pipe(
... prompt,
... max_new_tokens=15,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"{seq['generated_text']}")
- Golden State Warriors
- San Francisco
```
ใ่ฆงใฎใจใใใใขใใซใฏๆๅฎใใใใใญในใใใ 2 ใคใฎๅๅไปใใจใณใใฃใใฃใๆญฃใใ่ญๅฅใใพใใใ
#### Translation
LLM ใๅฎ่กใงใใใใ 1 ใคใฎใฟในใฏใฏ็ฟป่จณใงใใใใฎใฟในใฏใซใฏใจใณใณใผใใผ/ใใณใผใใผ ใขใใซใไฝฟ็จใใใใจใ้ธๆใงใใพใใใใใใงใฏ
ไพใ็ฐกๅใซใใใใใซใใใกใใจใใไปไบใใใ Falcon-7b-instruct ใไฝฟใ็ถใใพใใใใไธๅบฆใๆนๆณใฏๆฌกใฎใจใใใงใ
ใใญในใใฎไธ้จใ่ฑ่ชใใใคใฟใชใข่ชใซ็ฟป่จณใใใใใซใขใใซใซๆ็คบใใๅบๆฌ็ใชใใญใณใใใไฝๆใงใใพใใ
```python
>>> torch.manual_seed(2) # doctest: +IGNORE_RESULT
>>> prompt = """Translate the English text to Italian.
... Text: Sometimes, I've believed as many as six impossible things before breakfast.
... Translation:
... """
>>> sequences = pipe(
... prompt,
... max_new_tokens=20,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"{seq['generated_text']}")
A volte, ho creduto a sei impossibili cose prima di colazione.
```
ใใใงใฏใๅบๅ็ๆๆใซใขใใซใใใๅฐใๆ่ปใซใชใใใใซ `do_sample=True` ใจ `top_k=10` ใ่ฟฝๅ ใใพใใใ
#### Text summarization
็ฟป่จณใจๅๆงใซใใใญในใใฎ่ฆ็ดใใๅบๅใๅ
ฅๅใซ**ๅคงใใ**ไพๅญใใ็ๆใฟในใฏใงใใ
ใจใณใณใผใ/ใใณใผใ ใขใใซใฎๆนใ่ฏใ้ธๆใซใชใๅฏ่ฝๆงใใใใพใใใใ ใใใใณใผใ ในใฟใคใซใฎใขใใซใใใฎใฟในใฏใซไฝฟ็จใงใใพใใ
ไปฅๅใฏใใใญใณใใใฎๅ
้ ญใซๆ็คบใ้
็ฝฎใใฆใใพใใใใใ ใใใใญใณใใใฎๆๅพใงใ
ๆ็คบใไธใใใฎใซ้ฉใใๅ ดๆใงใใใใพใใ้ๅธธใๅฝไปคใฏใฉใกใใใฎ็ซฏใซ้
็ฝฎใใใใจใใๅงใใใพใใ
```python
>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
>>> prompt = """Permaculture is a design process mimicking the diversity, functionality and resilience of natural ecosystems. The principles and practices are drawn from traditional ecological knowledge of indigenous cultures combined with modern scientific understanding and technological innovations. Permaculture design provides a framework helping individuals and communities develop innovative, creative and effective strategies for meeting basic needs while preparing for and mitigating the projected impacts of climate change.
... Write a summary of the above text.
... Summary:
... """
>>> sequences = pipe(
... prompt,
... max_new_tokens=30,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"{seq['generated_text']}")
Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding.
```
#### Question answering
่ณชๅๅฟ็ญใฟในใฏใฎๅ ดๅใใใญใณใใใๆฌกใฎ่ซ็ใณใณใใผใใณใใซๆง้ ๅใงใใพใ: ๆ็คบใใณใณใใญในใใ่ณชๅใ
ๅ
้ ญใฎๅ่ชใพใใฏใใฌใผใบ (`"Answer:"`) ใไฝฟ็จใใฆใใขใใซใๆไฝใใฆ็ญใใฎ็ๆใ้ๅงใใพใใ
```python
>>> torch.manual_seed(4) # doctest: +IGNORE_RESULT
>>> prompt = """Answer the question using the context below.
... Context: Gazpacho is a cold soup and drink made of raw, blended vegetables. Most gazpacho includes stale bread, tomato, cucumbers, onion, bell peppers, garlic, olive oil, wine vinegar, water, and salt. Northern recipes often include cumin and/or pimentรณn (smoked sweet paprika). Traditionally, gazpacho was made by pounding the vegetables in a mortar with a pestle; this more laborious method is still sometimes used as it helps keep the gazpacho cool and avoids the foam and silky consistency of smoothie versions made in blenders or food processors.
... Question: What modern tool is used to make gazpacho?
... Answer:
... """
>>> sequences = pipe(
... prompt,
... max_new_tokens=10,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result: Modern tools are used, such as immersion blenders
```
#### Reasoning
LLM ใซใจใฃใฆๆจ่ซใฏๆใๅฐ้ฃใชใฟในใฏใฎ 1 ใคใงใใใ่ฏใ็ตๆใ้ๆใใใซใฏใๅคใใฎๅ ดๅใๆฌกใฎใใใช้ซๅบฆใชใใญใณใใ ใใฏใใใฏใ้ฉ็จใใๅฟ
่ฆใใใใพใใ
[Chain-of-thought](#chain-of-thought)ใ
ๅบๆฌ็ใชใใญใณใใใไฝฟ็จใใฆใๅ็ดใช็ฎ่กใฟในใฏใซ้ขใใใขใใซๆจ่ซใไฝๆใงใใใใฉใใ่ฉฆใใฆใฟใพใใใใ
```python
>>> torch.manual_seed(5) # doctest: +IGNORE_RESULT
>>> prompt = """There are 5 groups of students in the class. Each group has 4 students. How many students are there in the class?"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=30,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result:
There are a total of 5 groups, so there are 5 x 4=20 students in the class.
```
ๆญฃใใ๏ผใใๅฐใ่ค้ใใๅขใใใฆใๅบๆฌ็ใชใใญใณใใใงๅ้กใ่งฃๆฑบใงใใใใฉใใใ็ขบ่ชใใฆใฟใพใใใใ
```python
>>> torch.manual_seed(6) # doctest: +IGNORE_RESULT
>>> prompt = """I baked 15 muffins. I ate 2 muffins and gave 5 muffins to a neighbor. My partner then bought 6 more muffins and ate 2. How many muffins do we now have?"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=10,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result:
The total number of muffins now is 21
```
ใใใฏ้้ใฃใ็ญใใงใใ12 ใงใใๅฟ
่ฆใใใใพใใใใฎๅ ดๅใใใญใณใใใๅบๆฌ็ใใใใใ้ธๆๅ
ๅฎนใๅๅ ใงใใๅฏ่ฝๆงใใใใพใใ
็ตๅฑใฎใจใใใFalcon ใฎๆๅฐใใผใธใงใณใ้ธๆใใพใใใใใใใใตใคใบใฎใขใใซใงใฏๆจ่ซใๅฐ้ฃใงใใใใใๅคงใใชใขใใซใงใฏ
ใขใใซใฎใใใฉใผใใณในใๅไธใใๅฏ่ฝๆงใใใใพใใ
## Best practices of LLM prompting
ใฌใคใใฎใใฎใปใฏใทใงใณใงใฏใใใญใณใใใฎ็ตๆใๆนๅใใๅพๅใซใใใในใ ใใฉใฏใใฃในใฎใชในใใใพใจใใพใใใ
* ไฝฟ็จใใใขใใซใ้ธๆใใๅ ดๅใฏใๆๆฐใใคๆใๆฉ่ฝ็ใชใขใใซใฎๆนใใใใฉใผใใณในใๅไธใใๅฏ่ฝๆงใใใใพใใ
* ใทใณใใซใง็ญใใใญใณใใใใๅงใใฆใใใใใ็นฐใ่ฟใใพใใ
* ๆ็คบใฏใใญใณใใใฎๆๅใพใใฏๆๅพใซๅ
ฅๅใใฆใใ ใใใๅคง่ฆๆจกใชใณใณใใญในใใๆฑใๅ ดๅใใขใใซใฏใใพใใพใชๆ้ฉๅใ้ฉ็จใใฆใใขใใณใทใงใณใฎ่ค้ใใไบๆฌก็ใซๆกๅคงใใใฎใ้ฒใใพใใใใใซใใใใขใใซใฏใใญใณใใใฎ้ไธญใใใๆๅใพใใฏๆๅพใซๆณจๆใๆใใใใซใชใใพใใ
* ๆ็คบใจใใใใ้ฉ็จใใใใใญในใใๆ็ขบใซๅบๅฅใใฆใใ ใใใใใใซใคใใฆใฏใๆฌกใฎใปใฏใทใงใณใง่ฉณใใ่ชฌๆใใพใใ
* ใฟในใฏใจๆใพใใ็ตๆ (ใใฎๅฝขๅผใ้ทใใในใฟใคใซใ่จ่ชใชใฉ) ใซใคใใฆๅ
ทไฝ็ใใค่ชฌๆ็ใซใใพใใ
* ๆๆงใช่ชฌๆใๆ็คบใฏ้ฟใใฆใใ ใใใ
*ใไฝใใใฆใฏใใใชใใใใจใใๆ็คบใงใฏใชใใใไฝใใในใใใใจใใๆ็คบใๅชๅ
ใใพใใ
* ๆๅใฎๅ่ชใๆธใใฆ (ใพใใฏใขใใซใฎๆๅใฎๆใๅงใใฆ)ใๅบๅใๆญฃใใๆนๅใซใๅฐใใใพใใ
* [Few-shot prompting](#few-shot-prompting) ใ [Chain-of-thought](#chain-of-thought) ใชใฉใฎ้ซๅบฆใชใใฏใใใฏใไฝฟ็จใใพใใ
* ใใพใใพใชใขใใซใงใใญใณใใใใในใใใฆใใใฎๅ
็ขๆงใ่ฉไพกใใพใใ
* ใใญใณใใใฎใใผใธใงใณใ็ขบ่ชใใใใใฉใผใใณในใ่ฟฝ่ทกใใพใใ
## Advanced prompting techniques
### Few-shot prompting
ไธ่จใฎใปใฏใทใงใณใฎๅบๆฌ็ใชใใญใณใใใฏใใใผใญใทใงใใใใใญใณใใใฎไพใงใใใคใพใใใขใใซใซใฏใใงใซไธใใใใฆใใพใใ
ๆ็คบใจใณใณใใญในใใฏใใใพใใใ่งฃๆฑบ็ญใๅซใไพใฏใใใพใใใ้ๅธธใๅฝไปคใใผใฟใปใใใซๅบใฅใใฆๅพฎ่ชฟๆดใใใ LLM
ใใฎใใใชใใผใญใทใงใใใใฟในใฏใงใๅชใใใใใฉใผใใณในใ็บๆฎใใพใใใใ ใใใฟในใฏใใใ่ค้ใงใใฃใใๅพฎๅฆใช็นใใใฃใใใใๅ ดๅใใใใพใใ
ๅบๅใซใฏใๅฝไปคใ ใใงใฏใขใใซใ็่งฃใงใใชใใใใคใใฎ่ฆไปถใใใใพใใใใฎๅ ดๅใๆฌกใฎใใจใใงใใพใใ
ๅฐๆฐใทใงใใ ใใญใณใใใจๅผใฐใใใใฏใใใฏใ่ฉฆใใฆใใ ใใใ
ๅฐๆฐใทใงใใ ใใญใณใใใงใฏใใขใใซใซใใใฉใผใใณในใๅไธใใใใใใฎใใๅคใใฎใณใณใใญในใใๆไพใใใใญใณใใๅ
ใฎไพใๆไพใใใพใใ
ไพใงใฏใไพใฎใใฟใผใณใซๅพใฃใฆๅบๅใ็ๆใใใใใซใขใใซใๆกไปถไปใใใพใใ
ไปฅไธใซไพใ็คบใใพใใ
```python
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961.
... Date: 04/12/1961
... Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon.
... Date:"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=8,
... do_sample=True,
... top_k=10,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result: Text: The first human went into space and orbited the Earth on April 12, 1961.
Date: 04/12/1961
Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon.
Date: 09/28/1960
```
ไธ่จใฎใณใผใ ในใใใใใงใฏใใขใใซใธใฎ็ฎ็ใฎๅบๅใ็คบใใใใซ 1 ใคใฎไพใไฝฟ็จใใพใใใใใใใฃใฆใใใใฏใ
ใใฏใณใทใงใใใใใญใณใใใใใ ใใใฟในใฏใฎ่ค้ใใซๅฟใใฆใ่คๆฐใฎไพใไฝฟ็จใใๅฟ
่ฆใใใๅ ดๅใใใใพใใ
ๆฐๅใฎใใญใณใใๆๆณใฎๅถ้:
- LLM ใฏไพใฎใใฟใผใณใ็่งฃใงใใพใใใใใใใฎๆๆณใฏ่ค้ใชๆจ่ซใฟในใฏใงใฏใใพใๆฉ่ฝใใพใใใ
- ๅฐๆฐใทใงใใใฎใใญใณใใใงใฏใ้ทใใใญใณใใใไฝๆใใๅฟ
่ฆใใใใพใใๅคง้ใฎใใผใฏใณใๅซใใใญใณใใใงใฏใ่จ็ฎ้ใจๅพ
ใกๆ้ใๅขๅ ใใๅฏ่ฝๆงใใใใพใใใใญใณใใใฎ้ทใใซใๅถ้ใใใใพใใ
- ๅคใใฎไพใไธใใใจใใขใใซใๅญฆ็ฟใใใคใใใฎใชใใฃใใใฟใผใณใๅญฆ็ฟใใใใจใใใใพใใ 3็ช็ฎใฎๆ ็ปใฌใใฅใผใฏใใคใๅฆๅฎ็ใ ใจใใใใจใ
### Chain-of-thought
ๆ่้ฃ้ (CoT) ใใญใณใใใฏใใขใใซใๅพฎ่ชฟๆดใใฆไธญ้ๆจ่ซในใใใใ็ๆใใๆนๅใใๆๆณใงใใ
่ค้ใชๆจ่ซใฟในใฏใฎ็ตๆใ
ใขใใซใๆไฝใใฆๆจ่ซในใใใใ็ๆใใใซใฏใ2 ใคใฎๆนๆณใใใใพใใ
- ่ณชๅใซๅฏพใใ่ฉณ็ดฐใชๅ็ญใๅซใไพใ็คบใใๅ้กใซๅฏพๅฆใใๆนๆณใใขใใซใซ็คบใใใจใงใๆฐๅใฎใใญใณใใใ่กจ็คบใใพใใ
- ใในใใใใใจใซ่ใใฆใฟใพใใใใใพใใฏใๆทฑๅผๅธใใฆใๅ้กใในใใใใใจใซ่งฃๆฑบใใฆใใ ใใใใชใฉใฎใใฌใผใบใ่ฟฝๅ ใใฆใขใใซใซๆจ่ซใๆ็คบใใพใใ
[ๆจ่ซใปใฏใทใงใณ](#reasoning) ใฎใใใฃใณใฎไพใซ CoT ใใฏใใใฏใ้ฉ็จใใใใๅคงใใชใขใใซใไฝฟ็จใใใจใ
[HuggingChat](https://huggingface.co/chat/)ใง้ในใ(`tiiuae/falcon-180B-chat`)ใชใฉใ
ๆจ่ซ็ตๆใฏๅคงๅน
ใซๆนๅใใใพใใ
```text
Let's go through this step-by-step:
1. You start with 15 muffins.
2. You eat 2 muffins, leaving you with 13 muffins.
3. You give 5 muffins to your neighbor, leaving you with 8 muffins.
4. Your partner buys 6 more muffins, bringing the total number of muffins to 14.
5. Your partner eats 2 muffins, leaving you with 12 muffins.
Therefore, you now have 12 muffins.
```
## Prompting vs fine-tuning
ใใญใณใใใๆ้ฉๅใใใใจใงๅชใใ็ตๆใ้ๆใงใใพใใใใขใใซใๅพฎ่ชฟๆดใใใใฉใใใซใคใใฆใฏใพใ ๆๆกใใใใใใใพใใใ
ใใชใใฎๅ ดๅใซใฏใใฃใจใใพใใใใงใใใใใใๅฐ่ฆๆจกใชใขใใซใๅพฎ่ชฟๆดใใใใจใๅฅฝใพใใใชใใทใงใณใงใใๅ ดๅใฎใใใคใใฎใทใใชใชใๆฌกใซ็คบใใพใใ
- ใใกใคใณใ LLM ใไบๅใซใใฌใผใใณใฐใใใใใฎใจๅคงใใ็ฐใชใฃใฆใใใๅบ็ฏใชใใญใณใใๆ้ฉๅใงใฏๅๅใช็ตๆใๅพใใใพใใใงใใใ
- ใขใใซใไฝใชใฝใผใน่จ่ชใง้ฉๅใซๅไฝใใๅฟ
่ฆใใใใพใใ
- ๅณๆ ผใช่ฆๅถใฎไธใซใใๆฉๅฏใใผใฟใงใขใใซใใใฌใผใใณใฐใใๅฟ
่ฆใใใใพใใ
- ใณในใใใใฉใคใใทใผใใคใณใใฉในใใฉใฏใใฃใใพใใฏใใฎไปใฎๅถ้ใซใใใๅฐ่ฆๆจกใชใขใใซใไฝฟ็จใใๅฟ
่ฆใใใใพใใ
ไธ่จใฎใในใฆใฎไพใงใๅๅใชๅคงใใใฎใใกใคใซใใใงใซๆใฃใฆใใใใ็ฐกๅใซๅ
ฅๆใงใใใใ็ขบ่ชใใๅฟ
่ฆใใใใพใใ
ใใกใคใณๅบๆใฎใใผใฟใปใใใๅ็็ใชใณในใใงใขใใซใๅพฎ่ชฟๆดใงใใพใใๅๅใชๆ้ใจใชใฝใผในใๅฟ
่ฆใซใชใใพใ
ใขใใซใๅพฎ่ชฟๆดใใพใใ
ไธ่จใฎไพใๅฝใฆใฏใพใใชใๅ ดๅใฏใใใญใณใใใๆ้ฉๅใใๆนใๆ็ใงใใใใจใใใใใพใใ
| transformers/docs/source/ja/tasks/prompting.md/0 | {
"file_path": "transformers/docs/source/ja/tasks/prompting.md",
"repo_id": "transformers",
"token_count": 9975
} | 296 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Summary of the tokenizers
[[open-in-colab]]
ใใฎใใผใธใงใฏใใใผใฏใใคใผใผใทใงใณใซใคใใฆ่ฉณใใ่ฆใฆใใใพใใ
<Youtube id="VFp38yj8h3A"/>
[ๅๅฆ็ใฎใใฅใผใใชใขใซ](preprocessing)ใง่ฆใใใใซใใใญในใใใใผใฏใณๅใใใใจใฏใใใใๅ่ชใพใใฏใตใใฏใผใใซๅๅฒใใใใใใใซใใฏใขใใใใผใใซใไปใใฆIDใซๅคๆใใใใจใงใใๅ่ชใพใใฏใตใใฏใผใใIDใซๅคๆใใใใจใฏ็ฐกๅใงใใฎใงใใใฎ่ฆ็ดใงใฏใใญในใใๅ่ชใพใใฏใตใใฏใผใใซๅๅฒใใ๏ผใคใพใใใใญในใใใใผใฏใใคใบใใ๏ผใใจใซ็ฆ็นใๅฝใฆใพใใๅ
ทไฝ็ใซใฏใ๐ค Transformersใงไฝฟ็จใใใ3ใคใฎไธป่ฆใชใใผใฏใใคใถใ[Byte-Pair Encoding๏ผBPE๏ผ](#byte-pair-encoding)ใ[WordPiece](#wordpiece)ใใใใณ[SentencePiece](#sentencepiece)ใ่ฆใฆใใฉใฎใขใใซใใฉใฎใใผใฏใใคใถใฟใคใใไฝฟ็จใใฆใใใใฎไพใ็คบใใพใใ
ๅใขใใซใใผใธใงใฏใไบๅใใฌใผใใณใฐๆธใฟใขใใซใใฉใฎใใผใฏใใคใถใฟใคใใไฝฟ็จใใฆใใใใ็ฅใใใใซใ้ข้ฃใใใใผใฏใใคใถใฎใใญใฅใกใณใใ็ขบ่ชใงใใพใใไพใใฐใ[`BertTokenizer`]ใ่ฆใใจใใขใใซใ[WordPiece](#wordpiece)ใไฝฟ็จใใฆใใใใจใใใใใพใใ
## Introduction
ใใญในใใใใๅฐใใชใใฃใณใฏใซๅๅฒใใใใจใฏใ่ฆใใไปฅไธใซ้ฃใใใฟในใฏใงใใใ่คๆฐใฎๆนๆณใใใใพใใไพใใฐใๆฌกใฎๆใ่ใใฆใฟใพใใใใใ"Don't you love ๐ค Transformers? We sure do."ใ
<Youtube id="nhJxYji1aho"/>
ใใฎใใญในใใใใผใฏใณๅใใ็ฐกๅใชๆนๆณใฏใในใใผในใงๅๅฒใใใใจใงใใใใใซใใใไปฅไธใฎใใใซใชใใพใ๏ผ
```
["Don't", "you", "love", "๐ค", "Transformers?", "We", "sure", "do."]
```
ใใใฏๅ็็ใช็ฌฌไธๆญฉใงใใใใใผใฏใณ "Transformers?" ใจ "do." ใ่ฆใใจใๅฅ่ชญ็นใๅ่ช "Transformer" ใจ "do" ใซ็ตๅใใใฆใใใใจใใใใใใใใฏๆ้ฉใงใฏใใใพใใใๅฅ่ชญ็นใ่ๆ
ฎใซๅ
ฅใใในใใงใใขใใซใๅ่ชใจใใใซ็ถใๅฏ่ฝๆงใฎใใใในใฆใฎๅฅ่ชญ็น่จๅทใฎ็ฐใชใ่กจ็พใๅญฆใฐใชใใใฐใชใใชใใใจใ้ฟใใในใใงใใใใใซใใใใขใใซใๅญฆใฐใชใใใฐใชใใชใ่กจ็พใฎๆฐใ็็บ็ใซๅขๅ ใใพใใๅฅ่ชญ็นใ่ๆ
ฎใซๅ
ฅใใๅ ดๅใไพๆใฎใใผใฏใณๅใฏๆฌกใฎใใใซใชใใพใ๏ผ
```
["Don", "'", "t", "you", "love", "๐ค", "Transformers", "?", "We", "sure", "do", "."]
```
ใใ ใใๅ่ชใ"Don't"ใใใใผใฏใณๅใใๆนๆณใซ้ขใใฆใฏใไธๅฉใชๅด้ขใใใใพใใ ใ"Don't"ใใฏใ"do not"ใใ่กจใใฆใใใใใใ["Do", "n't"]ใใจใใฆใใผใฏใณๅใใๆนใ้ฉใใฆใใพใใใใใใไบๆใ่ค้ใซใชใใๅใขใใซใ็ฌ่ชใฎใใผใฏใใคใถใผใฟใคใใๆใค็็ฑใฎไธ้จใงใใใใพใใใใญในใใใใผใฏใณๅใใใใใซ้ฉ็จใใใซใผใซใซๅฟใใฆใๅใใใญในใใซๅฏพใใฆ็ฐใชใใใผใฏใใคใบใใใๅบๅใ็ๆใใใพใใไบๅใใฌใผใใณใฐๆธใฟใขใใซใฏใใใฌใผใใณใฐใใผใฟใใใผใฏใใคใบใใใฎใซไฝฟ็จใใใใซใผใซใจๅใใซใผใซใงใใผใฏใใคใบใใใๅ
ฅๅใๆไพใใๅ ดๅใซใฎใฟๆญฃๅธธใซๆฉ่ฝใใพใใ
[spaCy](https://spacy.io/)ใจ[Moses](http://www.statmt.org/moses/?n=Development.GetStarted)ใฏใ2ใคใฎไบบๆฐใฎใใใซใผใซใใผในใฎใใผใฏใใคใถใผใงใใใใใใ็งใใกใฎไพใซ้ฉ็จใใใจใ*spaCy*ใจ*Moses*ใฏๆฌกใฎใใใชๅบๅใ็ๆใใพใ๏ผ
```
["Do", "n't", "you", "love", "๐ค", "Transformers", "?", "We", "sure", "do", "."]
```
็ฉบ็ฝใจๅฅ่ชญ็นใฎใใผใฏใณๅใใใใณใซใผใซใใผในใฎใใผใฏใณๅใไฝฟ็จใใใฆใใใใจใใใใใพใใ็ฉบ็ฝใจๅฅ่ชญ็นใฎใใผใฏใณๅใใใใณใซใผใซใใผในใฎใใผใฏใณๅใฏใๆใๅ่ชใซๅๅฒใใใใจใใใใใใซๅฎ็พฉใใใๅ่ชใใผใฏใณๅใฎไพใงใใใใญในใใใใๅฐใใชใใฃใณใฏใซๅๅฒใใใใใฎๆใ็ดๆ็ใชๆนๆณใงใใไธๆนใใใฎใใผใฏใณๅๆนๆณใฏๅคง่ฆๆจกใชใใญในใใณใผใในใซๅฏพใใฆๅ้กใๅผใ่ตทใใใใจใใใใพใใใใฎๅ ดๅใ็ฉบ็ฝใจๅฅ่ชญ็นใฎใใผใฏใณๅใฏ้ๅธธใ้ๅธธใซๅคงใใช่ชๅฝ๏ผใในใฆใฎไธๆใชๅ่ชใจใใผใฏใณใฎใปใใ๏ผใ็ๆใใพใใไพใใฐใ[Transformer XL](model_doc/transformerxl)ใฏ็ฉบ็ฝใจๅฅ่ชญ็นใฎใใผใฏใณๅใไฝฟ็จใใฆใใใ่ชๅฝใตใคใบใฏ267,735ใงใ๏ผ
ใใฎใใใชๅคงใใช่ชๅฝใตใคใบใฏใใขใใซใซ้ๅธธใซๅคงใใชๅใ่พผใฟ่กๅใๅ
ฅๅใใใณๅบๅใฌใคใคใผใจใใฆๆใใใใใจใๅผทๅถใใใกใขใชใใใณๆ้ใฎ่ค้ใใฎๅขๅ ใๅผใ่ตทใใใพใใไธ่ฌ็ใซใใใฉใณในใใฉใผใใผใขใใซใฏใ็นใซๅไธใฎ่จ่ชใงไบๅใใฌใผใใณใฐใใใๅ ดๅใ50,000ใ่ถ
ใใ่ชๅฝใตใคใบใๆใคใใจใฏใปใจใใฉใใใพใใใ
ใใใใฃใฆใใทใณใใซใช็ฉบ็ฝใจๅฅ่ชญ็นใฎใใผใฏใณๅใไธๅๅใชๅ ดๅใใชใๅใซๆๅญๅไฝใงใใผใฏใณๅใใชใใฎใใจใใ็ๅใ็ใใพใใ๏ผ
<Youtube id="ssLq_EK2jLE"/>
ๆๅญๅไฝใฎใใผใฏใณๅใฏ้ๅธธใซใทใณใใซใงใใใใกใขใชใจๆ้ใฎ่ค้ใใๅคงๅน
ใซๅๆธใงใใพใใใใขใใซใซๆๅณใฎใใๅ
ฅๅ่กจ็พใๅญฆ็ฟใใใใใจใ้ๅธธใซ้ฃใใใชใใพใใใใจใใฐใๆๅญใ"t"ใใฎใใใฎๆๅณใฎใใใณใณใใญในใ็ฌ็ซใฎ่กจ็พใๅญฆ็ฟใใใใจใฏใๅ่ชใ"today"ใใฎใใใฎใณใณใใญในใ็ฌ็ซใฎ่กจ็พใๅญฆ็ฟใใใใใใฏใใใซ้ฃใใใงใใใใฎใใใๆๅญๅไฝใฎใใผใฏใณๅใฏใใฐใใฐใใใฉใผใใณในใฎไฝไธใไผดใใพใใใใใใฃใฆใใใฉใณในใใฉใผใใผใขใใซใฏๅ่ชใฌใใซใจๆๅญใฌใใซใฎใใผใฏใณๅใฎใใคใใชใใใงใใ**ใตใใฏใผใ**ใใผใฏใณๅใไฝฟ็จใใฆใไธกๆนใฎไธ็ใฎๅฉ็นใๆดปใใใพใใ
## Subword tokenization
<Youtube id="zHvTiHr506c"/>
ใตใใฏใผใใใผใฏใณๅใขใซใดใชใบใ ใฏใ้ ป็นใซไฝฟ็จใใใๅ่ชใใใๅฐใใชใตใใฏใผใใซๅๅฒใในใใงใฏใชใใใ็ใใๅ่ชใฏๆๅณใฎใใใตใใฏใผใใซๅ่งฃใใใใจใใๅๅใซไพๅญใใฆใใพใใใใจใใฐใใ"annoyingly"ใใฏ็ใใๅ่ชใจ่ฆใชใใใใใฎๅ่ชใฏใ"annoying"ใใจใ"ly"ใใซๅ่งฃใใใใใใใใพใใใ็ฌ็ซใใใ"annoying"ใใจใ"ly"ใใฏใใ้ ป็นใซ็พใใพใใใใ"annoyingly"ใใฎๆๅณใฏใ"annoying"ใใจใ"ly"ใใฎๅๆ็ใชๆๅณใซใใฃใฆไฟๆใใใพใใใใใฏ็นใซใใซใณ่ชใชใฉใฎ็ตๅ่จ่ชใงๅฝน็ซใกใพใใใใใงใฏใตใใฏใผใใ้ฃ็ตใใฆ๏ผใปใผ๏ผไปปๆใฎ้ทใ่ค้ใชๅ่ชใๅฝขๆใงใใพใใ
ใตใใฏใผใใใผใฏใณๅใซใใใใขใใซใฏๅ็็ใช่ชๅฝใตใคใบใๆใคใใจใใงใใๆๅณใฎใใใณใณใใญในใ็ฌ็ซใฎ่กจ็พใๅญฆ็ฟใงใใพใใใใใซใใตใใฏใผใใใผใฏใณๅใซใใใใขใใซใฏไปฅๅใซ่ฆใใใจใฎใชใๅ่ชใๅฆ็ใใใใใใๆข็ฅใฎใตใใฏใผใใซๅ่งฃใใใใจใใงใใพใใไพใใฐใ[`~transformers.BertTokenizer`]ใฏ`"I have a new GPU!"`ใไปฅไธใฎใใใซใใผใฏใณๅใใพใ๏ผ
```py
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> tokenizer.tokenize("I have a new GPU!")
["i", "have", "a", "new", "gp", "##u", "!"]
```
ใuncasedใใขใใซใ่ๆ
ฎใใฆใใใใใใพใๆใๅฐๆๅญใซๅคๆใใพใใใใใผใฏใใคใถใฎ่ชๅฝใซใ["i", "have", "a", "new"]ใใจใใๅ่ชใๅญๅจใใใใจใใใใใพใใใใ"gpu"ใใจใใๅ่ชใฏๅญๅจใใพใใใใใใใฃใฆใใใผใฏใใคใถใฏใ"gpu"ใใๆข็ฅใฎใตใใฏใผใใ["gp"ใ"##u"]ใใซๅๅฒใใพใใใใใงใ"##"ใใฏใใใผใฏใณใฎใใณใผใใพใใฏใใผใฏใใคใผใผใทใงใณใฎ้่ปขใฎใใใซใใใผใฏใณใฎๅใฎ้จๅใซในใใผในใชใใงๆฅ็ถใใๅฟ
่ฆใใใใใจใๆๅณใใพใใ
ๅฅใฎไพใจใใฆใ[`~transformers.XLNetTokenizer`]ใฏไปฅไธใฎใใใซไปฅๅใฎใตใณใใซใใญในใใใใผใฏใณๅใใพใ๏ผ
```py
>>> from transformers import XLNetTokenizer
>>> tokenizer = XLNetTokenizer.from_pretrained("xlnet/xlnet-base-cased")
>>> tokenizer.tokenize("Don't you love ๐ค Transformers? We sure do.")
["โDon", "'", "t", "โyou", "โlove", "โ", "๐ค", "โ", "Transform", "ers", "?", "โWe", "โsure", "โdo", "."]
```
ใใใใฎใโใใฎๆๅณใซใคใใฆใฏใ[SentencePiece](#sentencepiece)ใ่ฆใใจใใซ่ฉณใใ่ชฌๆใใพใใใ่ฆงใฎ้ใใใTransformersใใจใใ็ใใๅ่ชใฏใใใ้ ป็นใซ็พใใใตใใฏใผใใTransformใใจใersใใซๅๅฒใใใฆใใพใใ
ใใฆใ็ฐใชใใตใใฏใผใใใผใฏใณๅใขใซใดใชใบใ ใใฉใฎใใใซๅไฝใใใใ่ฆใฆใฟใพใใใใใใใใฎใใผใฏใใคใผใผใทใงใณใขใซใดใชใบใ ใฏใในใฆใ้ๅธธใฏๅฏพๅฟใใใขใใซใใใฌใผใใณใฐใใใใณใผใในใง่กใใใๅฝขๅผใฎใใฌใผใใณใฐใซไพๅญใใฆใใพใใ
<a id='byte-pair-encoding'></a>
### Byte-Pair Encoding๏ผBPE๏ผ
Byte-Pair Encoding๏ผBPE๏ผใฏใ[Neural Machine Translation of Rare Words with Subword Units๏ผSennrich et al., 2015๏ผ](https://arxiv.org/abs/1508.07909)ใงๅฐๅ
ฅใใใพใใใBPEใฏใใใฌใผใใณใฐใใผใฟใๅ่ชใซๅๅฒใใใใชใใผใฏใใคใถใซไพๅญใใฆใใพใใใใชใใผใฏใใคใผใผใทใงใณใฏใ็ฉบ็ฝใฎใใผใฏใใคใผใผใทใงใณใชใฉใ้ๅธธใซๅ็ดใชใใฎใงใใใใจใใใใพใใไพใใฐใ[GPT-2](model_doc/gpt2)ใ[RoBERTa](model_doc/roberta)ใงใใใใ้ซๅบฆใชใใชใใผใฏใใคใผใผใทใงใณใซใฏใใซใผใซใใผในใฎใใผใฏใใคใผใผใทใงใณ๏ผ[XLM](model_doc/xlm)ใ[FlauBERT](model_doc/flaubert)ใชใฉใๅคง้จๅใฎ่จ่ชใซMosesใไฝฟ็จ๏ผใใ[GPT](model_doc/gpt)๏ผSpacyใจftfyใไฝฟ็จใใฆใใฌใผใใณใฐใณใผใในๅ
ใฎๅๅ่ชใฎ้ ปๅบฆใๆฐใใ๏ผใชใฉใๅซใพใใพใใ
ใใชใใผใฏใใคใผใผใทใงใณใฎๅพใไธๆใฎๅ่ชใปใใใไฝๆใใใๅๅ่ชใใใฌใผใใณใฐใใผใฟใงๅบ็พใใ้ ปๅบฆใๆฑบๅฎใใใพใใๆฌกใซใBPEใฏใใผใน่ชๅฝใไฝๆใใใใผใน่ชๅฝใฎไบใคใฎใทใณใใซใใๆฐใใใทใณใใซใๅฝขๆใใใใใฎใใผใธใซใผใซใๅญฆ็ฟใใพใใใใฎใใญใปในใฏใ่ชๅฝใๆๆใฎ่ชๅฝใตใคใบใซ้ใใใพใง็ถใใใใพใใใชใใๆๆใฎ่ชๅฝใตใคใบใฏใใผใฏใใคใถใใใฌใผใใณใฐใใๅใซๅฎ็พฉใใใใคใใผใใฉใกใผใฟใงใใใใจใซๆณจๆใใฆใใ ใใใ
ไพใจใใฆใใใชใใผใฏใใคใผใผใทใงใณใฎๅพใๆฌกใฎใปใใใฎๅ่ชใจใใฎๅบ็พ้ ปๅบฆใๆฑบๅฎใใใใจไปฎๅฎใใพใใใ๏ผ
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
ใใใใฃใฆใใใผใน่ชๅฝใฏใ["b", "g", "h", "n", "p", "s", "u"]ใใงใใใในใฆใฎๅ่ชใใใผใน่ชๅฝใฎใทใณใใซใซๅๅฒใใใจใๆฌกใฎใใใซใชใใพใ๏ผ
```
("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)
```
ใใฎๅพใBPEใฏๅฏ่ฝใชใในใฆใฎใทใณใใซใใขใฎ้ ปๅบฆใๆฐใใๆใ้ ป็นใซ็บ็ใใใทใณใใซใใขใ้ธๆใใพใใไธ่จใฎไพใงใฏใ`"h"`ใฎๅพใซ`"u"`ใ15ๅ๏ผ`"hug"`ใฎ10ๅใ`"hugs"`ใฎ5ๅ๏ผๅบ็พใใพใใใใใใๆใ้ ป็นใชใทใณใใซใใขใฏใๅ่จใง20ๅ๏ผ`"u"`ใฎ10ๅใ`"g"`ใฎ5ๅใ`"u"`ใฎ5ๅ๏ผๅบ็พใใ`"u"`ใฎๅพใซ`"g"`ใ็ถใใทใณใใซใใขใงใใใใใใฃใฆใใใผใฏใใคใถใๆๅใซๅญฆ็ฟใใใใผใธใซใผใซใฏใ`"u"`ใฎๅพใซ`"g"`ใ็ถใใในใฆใฎ`"u"`ใทใณใใซใไธ็ทใซใฐใซใผใๅใใใใจใงใใๆฌกใซใ`"ug"`ใ่ชๅฝใซ่ฟฝๅ ใใใพใใๅ่ชใฎใปใใใฏๆฌกใซใชใใพใ๏ผ
```
("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)
```
ๆฌกใซใBPEใฏๆฌกใซๆใไธ่ฌ็ใชใทใณใใซใใขใ่ญๅฅใใพใใใใใฏใ"u"ใใซ็ถใใฆใ"n"ใใงใ16ๅๅบ็พใใพใใใใใใฃใฆใใ"u"ใใจใ"n"ใใฏใ"un"ใใซ็ตๅใใใ่ชๅฝใซ่ฟฝๅ ใใใพใใๆฌกใซๆใ้ ปๅบฆใฎ้ซใใทใณใใซใใขใฏใใ"h"ใใซ็ถใใฆใ"ug"ใใงใ15ๅๅบ็พใใพใใๅใณใใขใ็ตๅใใใใhugใใ่ชๅฝใซ่ฟฝๅ ใงใใพใใ
ใใฎๆฎต้ใงใฏใ่ชๅฝใฏ`["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]`ใงใใใไธๆใฎๅ่ชใฎใปใใใฏไปฅไธใฎใใใซ่กจใใใพใ๏ผ
```
("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)
```
ๅๆใจใใฆใByte-Pair Encoding๏ผBPE๏ผใฎใใฌใผใใณใฐใใใฎๆฎต้ใงๅๆญขใใใจใๅญฆ็ฟใใใใใผใธใซใผใซใๆฐใใๅ่ชใซ้ฉ็จใใใพใ๏ผๆฐใใๅ่ชใซใฏใใผในใใญใฃใใฉใชใซๅซใพใใฆใใชใใทใณใใซใๅซใพใใฆใใชใ้ใ๏ผใ ไพใใฐใๅ่ช "bug" ใฏ ["b", "ug"] ใจใใฆใใผใฏใณๅใใใพใใใ"mug" ใฏใใผในใใญใฃใใฉใชใซ "m" ใทใณใใซใๅซใพใใฆใใชใใใใ["<unk>", "ug"] ใจใใฆใใผใฏใณๅใใใพใใ ไธ่ฌ็ใซใ"m" ใฎใใใชๅไธใฎๆๅญใฏใใใฌใผใใณใฐใใผใฟใซใฏ้ๅธธใๅๆๅญใฎๅฐใชใใจใ1ใคใฎๅบ็พใๅซใพใใฆใใใใใ"<unk>" ใทใณใใซใซ็ฝฎใๆใใใใใใจใฏใใใพใใใใ็ตตๆๅญใฎใใใช้ๅธธใซ็นๆฎใชๆๅญใฎๅ ดๅใซใฏ็บ็ใใๅฏ่ฝๆงใใใใพใใ
ๅ่ฟฐใฎใใใซใใใญใฃใใฉใชใตใคใบใใใชใใกใใผในใใญใฃใใฉใชใตใคใบ + ใใผใธใฎๅๆฐใฏ้ธๆใใใใคใใผใใฉใกใผใฟใงใใ ไพใใฐใ[GPT](model_doc/gpt) ใฏใใผในๆๅญใ478ๆๅญใงใ40,000ๅใฎใใผใธๅพใซใใฌใผใใณใฐใๅๆญขใใใใใใใญใฃใใฉใชใตใคใบใฏ40,478ใงใใ
#### Byte-level BPE
ใในใฆใฎUnicodeๆๅญใใใผในๆๅญใจ่ใใใจใใในใฆใฎๅฏ่ฝใชใใผในๆๅญใๅซใพใใใใใใใชใใใผในใใญใฃใใฉใชใฏใใชใๅคงใใใชใใใจใใใใพใใ [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) ใฏใใใผในใใญใฃใใฉใชใ256ใใคใใซใใ่ณขใใใชใใฏใจใใฆใใคใใใใผในใใญใฃใใฉใชใจใใฆไฝฟ็จใใใในใฆใฎใใผในๆๅญใใใญใฃใใฉใชใซๅซใพใใใใใซใใฆใใพใใ ใใณใฏใใฅใจใผใทใงใณใๆฑใใใใฎใใใคใใฎ่ฟฝๅ ใซใผใซใๅใใGPT2ใฎใใผใฏใใคใถใฏใ<unk> ใทใณใใซใๅฟ
่ฆใจใใใซใในใฆใฎใใญในใใใใผใฏใณๅใงใใพใใ [GPT-2](model_doc/gpt) ใฏ50,257ใฎใใญใฃใใฉใชใตใคใบใๆใฃใฆใใใใใใฏ256ใใคใใฎใใผในใใผใฏใณใ็นๅฅใชใใญในใใฎ็ตไบใ็คบใใใผใฏใณใใใใณ50,000ๅใฎใใผใธใงๅญฆ็ฟใใใทใณใใซใซๅฏพๅฟใใฆใใพใใ
### WordPiece
WordPieceใฏใ[BERT](model_doc/bert)ใ[DistilBERT](model_doc/distilbert)ใใใใณ[Electra](model_doc/electra)ใงไฝฟ็จใใใใตใใฏใผใใใผใฏใใคใผใผใทใงใณใขใซใดใชใบใ ใงใใ ใใฎใขใซใดใชใบใ ใฏใ[Japanese and Korean Voice Search (Schuster et al., 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) ใงๆฆ่ชฌใใใฆใใใBPEใซ้ๅธธใซไผผใฆใใพใใ WordPieceใฏๆใ้ ป็นใชใทใณใใซใใขใ้ธๆใใใฎใงใฏใชใใใใฌใผใใณใฐใใผใฟใซ่ฟฝๅ ใใๅ ดๅใซใใฌใผใใณใฐใใผใฟใฎๅฐคๅบฆใๆๅคงๅใใใทใณใใซใใขใ้ธๆใใพใใ
ใใใฏๅ
ทไฝ็ใซใฏใฉใใใๆๅณใงใใ๏ผๅใฎไพใๅ็
งใใใจใใใฌใผใใณใฐใใผใฟใฎๅฐคๅบฆใๆๅคงๅใใใใจใฏใใใฎใทใณใใซใใขใฎ็ขบ็ใใใฎๆๅใฎใทใณใใซใซ็ถใ2็ช็ฎใฎใทใณใใซใฎ็ขบ็ใงๅฒใฃใใใฎใใใในใฆใฎใทใณใใซใใขใฎไธญใงๆใๅคงใใๅ ดๅใซ่ฉฒๅฝใใใทใณใใซใใขใ่ฆใคใใใใจใซ็ญใใใงใใ ใใจใใฐใ"u" ใฎๅพใซ "g" ใ็ถใๅ ดๅใไปใฎใฉใฎใทใณใใซใใขใใใ "ug" ใฎ็ขบ็ใ "u"ใ"g" ใงๅฒใฃใ็ขบ็ใ้ซใใใฐใใใใใฎใทใณใใซใฏ็ตๅใใใพใใ็ดๆ็ใซ่จใใฐใWordPieceใฏ2ใคใฎใทใณใใซใ็ตๅใใใใจใซใใฃใฆๅคฑใใใใใฎใ่ฉไพกใใใใใใใใซๅคใใใใฉใใใ็ขบ่ชใใ็นใงBPEใจใฏใใใใซ็ฐใชใใพใใ
### Unigram
Unigramใฏใ[Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)](https://arxiv.org/pdf/1804.10959.pdf) ใงๅฐๅ
ฅใใใใตใใฏใผใใใผใฏใใคใผใผใทใงใณใขใซใดใชใบใ ใงใใ BPEใWordPieceใจใฏ็ฐใชใใUnigramใฏใใผในใใญใฃใใฉใชใๅคๆฐใฎใทใณใใซใงๅๆๅใใๅใทใณใใซใๅๆธใใฆใใๅฐใใชใใญใฃใใฉใชใๅๅพใใพใใ ใใผในใใญใฃใใฉใชใฏใไบๅใซใใผใฏใณๅใใใใในใฆใฎๅ่ชใจๆใไธ่ฌ็ใช้จๅๆๅญๅใซๅฏพๅฟใใๅฏ่ฝๆงใใใใพใใ Unigramใฏtransformersใฎใขใใซใฎ็ดๆฅใฎไฝฟ็จใซใฏ้ฉใใฆใใพใใใใ[SentencePiece](#sentencepiece)ใจ็ตใฟๅใใใฆไฝฟ็จใใใพใใ
ๅใใฌใผใใณใฐในใใใใงใUnigramใขใซใดใชใบใ ใฏ็พๅจใฎใใญใฃใใฉใชใจใฆใใฐใฉใ ่จ่ชใขใใซใไฝฟ็จใใฆใใฌใผใใณใฐใใผใฟไธใฎๆๅคฑ๏ผ้ๅธธใฏๅฏพๆฐๅฐคๅบฆใจใใฆๅฎ็พฉ๏ผใๅฎ็พฉใใพใใใใฎๅพใใใญใฃใใฉใชๅ
ใฎๅใทใณใใซใซใคใใฆใใใฎใทใณใใซใใใญใฃใใฉใชใใๅ้คใใใๅ ดๅใซๅ
จไฝใฎๆๅคฑใใฉใใ ใๅขๅ ใใใใ่จ็ฎใใพใใ Unigramใฏใๆๅคฑใฎๅขๅ ใๆใไฝใp๏ผ้ๅธธใฏ10๏ผ
ใพใใฏ20๏ผ
๏ผใใผใปใณใใฎใทใณใใซใๅ้คใใพใใใคใพใใใใฌใผใใณใฐใใผใฟๅ
จไฝใฎๆๅคฑใซๆใๅฝฑ้ฟใไธใใชใใๆใๆๅคฑใฎๅฐใชใใทใณใใซใๅ้คใใพใใ ใใฎใใญใปในใฏใใใญใฃใใฉใชใๆใพใใใตใคใบใซ้ใใใพใง็นฐใ่ฟใใใพใใ Unigramใขใซใดใชใบใ ใฏๅธธใซใใผในๆๅญใไฟๆใใใใใไปปๆใฎๅ่ชใใใผใฏใณๅใงใใพใใ
Unigramใฏใใผใธใซใผใซใซๅบใฅใใฆใใชใใใ๏ผBPEใจWordPieceใจใฏๅฏพ็
ง็ใซ๏ผใใใฌใผใใณใฐๅพใฎๆฐใใใใญในใใฎใใผใฏใณๅใซใฏใใใคใใฎๆนๆณใใใใพใใไพใจใใฆใใใฌใผใใณใฐใใใUnigramใใผใฏใใคใถใๆใคใใญใฃใใฉใชใๆฌกใฎใใใชๅ ดๅ๏ผ
```
["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"],
```
`"hugs"`ใฏใ`["hug", "s"]`ใ`["h", "ug", "s"]`ใใพใใฏ`["h", "u", "g", "s"]`ใฎใใใซใใผใฏใณๅใงใใพใใใงใฏใใฉใใ้ธๆใในใใงใใใใ๏ผ Unigramใฏใใใฌใผใใณใฐใณใผใในๅ
ใฎๅใใผใฏใณใฎ็ขบ็ใไฟๅญใใใใฌใผใใณใฐๅพใซๅๅฏ่ฝใชใใผใฏใณๅใฎ็ขบ็ใ่จ็ฎใงใใใใใซใใพใใใใฎใขใซใดใชใบใ ใฏๅฎ้ใซใฏๆใๅฏ่ฝๆงใฎ้ซใใใผใฏใณๅใ้ธๆใใพใใใ็ขบ็ใซๅพใฃใฆๅฏ่ฝใชใใผใฏใณๅใใตใณใใชใณใฐใใใชใใทใงใณใๆไพใใพใใ
ใใใใฎ็ขบ็ใฏใใใผใฏใใคใถใผใใใฌใผใใณใฐใซไฝฟ็จใใๆๅคฑใซใใฃใฆๅฎ็พฉใใใพใใใใฌใผใใณใฐใใผใฟใๅ่ช \\(x_{1}, \dots, x_{N}\\) ใงๆงๆใใใๅ่ช \\(x_{i}\\) ใฎใในใฆใฎๅฏ่ฝใชใใผใฏใณๅใฎใปใใใ \\(S(x_{i})\\) ใจๅฎ็พฉใใใๅ ดๅใๅ
จไฝใฎๆๅคฑใฏๆฌกใฎใใใซๅฎ็พฉใใใพใใ
$$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$
<a id='sentencepiece'></a>
### SentencePiece
ใใใพใงใซ่ชฌๆใใใในใฆใฎใใผใฏใณๅใขใซใดใชใบใ ใซใฏๅใๅ้กใใใใพใใใใใฏใๅ
ฅๅใใญในใใๅ่ชใๅบๅใใใใซในใใผในใไฝฟ็จใใฆใใใจไปฎๅฎใใฆใใใจใใใใจใงใใใใใใใในใฆใฎ่จ่ชใๅ่ชใๅบๅใใใใซในใใผในใไฝฟ็จใใฆใใใใใงใฏใใใพใใใใใฎๅ้กใไธ่ฌ็ใซ่งฃๆฑบใใใใใฎ1ใคใฎๆนๆณใฏใ่จ่ชๅบๆใฎๅใใผใฏใใคใถใผใไฝฟ็จใใใใจใงใ๏ผไพ๏ผ[XLM](model_doc/xlm)ใฏ็นๅฎใฎไธญๅฝ่ชใๆฅๆฌ่ชใใใใณใฟใค่ชใฎๅใใผใฏใใคใถใผใไฝฟ็จใใฆใใพใ๏ผใใใไธ่ฌ็ใซใใฎๅ้กใ่งฃๆฑบใใใใใซใ[SentencePiece๏ผใใฅใผใฉใซใใญในใๅฆ็ใฎใใใฎใทใณใใซใง่จ่ช้ไพๅญใฎใตใใฏใผใใใผใฏใใคใถใผใใใณใใใผใฏใใคใถใผ๏ผKudo et al.ใ2018๏ผ](https://arxiv.org/pdf/1808.06226.pdf) ใฏใๅ
ฅๅใ็ใฎๅ
ฅๅในใใชใผใ ใจใใฆๆฑใใในใใผในใไฝฟ็จใใๆๅญใฎใปใใใซๅซใใพใใใใใใBPEใพใใฏunigramใขใซใดใชใบใ ใไฝฟ็จใใฆ้ฉๅใช่ชๅฝใๆง็ฏใใพใใ
ใใจใใฐใ[`XLNetTokenizer`]ใฏSentencePieceใไฝฟ็จใใฆใใใใใฎใใใซๅ่ฟฐใฎไพใง`"โ"`ๆๅญใ่ชๅฝใซๅซใพใใฆใใพใใใSentencePieceใไฝฟ็จใใใใณใผใใฏ้ๅธธใซ็ฐกๅใงใใในใฆใฎใใผใฏใณใๅ็ดใซ้ฃ็ตใใ`"โ"`ใฏในใใผในใซ็ฝฎๆใใใพใใ
ใฉใคใใฉใชๅ
ใฎใในใฆใฎtransformersใขใใซใฏใSentencePieceใunigramใจ็ตใฟๅใใใฆไฝฟ็จใใพใใSentencePieceใไฝฟ็จใใใขใใซใฎไพใซใฏใ[ALBERT](model_doc/albert)ใ[XLNet](model_doc/xlnet)ใ[Marian](model_doc/marian)ใใใใณ[T5](model_doc/t5)ใใใใพใใ
| transformers/docs/source/ja/tokenizer_summary.md/0 | {
"file_path": "transformers/docs/source/ja/tokenizer_summary.md",
"repo_id": "transformers",
"token_count": 9819
} | 297 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# ๐ค Transformers์ ๊ธฐ์ฌํ๊ธฐ [[contribute-to-transformers]]
๋๊ตฌ๋ ๐ค Transformers์ ๊ธฐ์ฌํ ์ ์์ผ๋ฉฐ, ์ฐ๋ฆฌ๋ ๋ชจ๋ ์ฌ๋์ ๊ธฐ์ฌ๋ฅผ ์์คํ ์๊ฐํฉ๋๋ค. ์ฝ๋ ๊ธฐ์ฌ๋ ์ปค๋ฎค๋ํฐ๋ฅผ ๋๋ ์ ์ผํ ๋ฐฉ๋ฒ์ด ์๋๋๋ค. ์ง๋ฌธ์ ๋ตํ๊ฑฐ๋ ๋ค๋ฅธ ์ฌ๋์ ๋์ ๋ฌธ์๋ฅผ ๊ฐ์ ํ๋ ๊ฒ๋ ๋งค์ฐ ๊ฐ์น๊ฐ ์์ต๋๋ค.
๐ค Transformers๋ฅผ ๋๋ฆฌ ์๋ฆฌ๋ ๊ฒ๋ ํฐ ๋์์ด ๋ฉ๋๋ค! ๋ฉ์ง ํ๋ก์ ํธ๋ค์ ๊ฐ๋ฅํ๊ฒ ํ ๐ค Transformers ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ํด ๋ธ๋ก๊ทธ ๊ฒ์๊ธ์ ์ธ๊ธํ๊ฑฐ๋, ๋์์ด ๋์์ ๋๋ง๋ค Twitter์ ์๋ฆฌ๊ฑฐ๋, ์ ์ฅ์์ โญ๏ธ ๋ฅผ ํ์ํ์ฌ ๊ฐ์ฌ ์ธ์ฌ๋ฅผ ์ ํด์ฃผ์ธ์.
์ด๋ค ๋ฐฉ์์ผ๋ก ๊ธฐ์ฌํ๋ [ํ๋ ๊ท์น](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md)์ ์์งํ๊ณ ์กด์คํด์ฃผ์ธ์.
**์ด ์๋ด์๋ ๋ฉ์ง [scikit-learn ๊ธฐ์ฌ ์๋ด์](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md)์์ ํฐ ์๊ฐ์ ๋ฐ์์ต๋๋ค.**
## ๊ธฐ์ฌํ๋ ๋ฐฉ๋ฒ [[ways-to-contribute]]
์ฌ๋ฌ ๊ฐ์ง ๋ฐฉ๋ฒ์ผ๋ก ๐ค Transformers์ ๊ธฐ์ฌํ ์ ์์ต๋๋ค:
* ๊ธฐ์กด ์ฝ๋์ ๋ฏธํด๊ฒฐ๋ ๋ฌธ์ ๋ฅผ ์์ ํฉ๋๋ค.
* ๋ฒ๊ทธ ๋๋ ์๋ก ์ถ๊ฐ๋๊ธธ ์ํ๋ ๊ธฐ๋ฅ๊ณผ ๊ด๋ จ๋ ์ด์๋ฅผ ์ ์ถํฉ๋๋ค.
* ์๋ก์ด ๋ชจ๋ธ์ ๊ตฌํํฉ๋๋ค.
* ์์ ๋ ๋ฌธ์์ ๊ธฐ์ฌํฉ๋๋ค.
์ด๋์๋ถํฐ ์์ํ ์ง ๋ชจ๋ฅด๊ฒ ๋ค๋ฉด, [Good First Issue](https://github.com/huggingface/transformers/contribute) ๋ชฉ๋ก์ ํ์ธํด๋ณด์ธ์. ์ด ๋ชฉ๋ก์ ์ด๋ณด์๋ ์ฐธ์ฌํ๊ธฐ ์ฌ์ด ์คํ ์ด์ ๋ชฉ๋ก์ ์ ๊ณตํ๋ฉฐ, ๋น์ ์ด ์คํ์์ค์ ์ฒ์์ผ๋ก ๊ธฐ์ฌํ๋ ๋ฐ ํฐ ๋์์ด ๋ ๊ฒ์
๋๋ค. ๊ทธ์ ์์
ํ๊ณ ์ถ์ ์ด์์ ๋๊ธ๋ง ๋ฌ์์ฃผ๋ฉด ๋ฉ๋๋ค.
์กฐ๊ธ ๋ ๋์ ์ ์ธ ์์
์ ์ํ๋ค๋ฉด, [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) ๋ชฉ๋ก๋ ํ์ธํด๋ณด์ธ์. ์ด๋ฏธ ๋น์ ์ด ์ ํ๊ณ ์๋ค๊ณ ์๊ฐ๋๋๋ผ๋, ํ ๋ฒ ์๋ํด๋ณด์ธ์! ์ฐ๋ฆฌ๋ ์ฌ๋ฌ๋ถ์ ๋์ธ ๊ฒ์
๋๋ค. ๐
> ์ปค๋ฎค๋ํฐ์ ์ด๋ฃจ์ด์ง๋ ๋ชจ๋ ๊ธฐ์ฌ๋ ๋๊ฐ์ด ์์คํฉ๋๋ค. ๐ฅฐ
## ๋ฏธํด๊ฒฐ๋ ๋ฌธ์ ์์ ํ๊ธฐ [[fixing-outstanding-issues]]
๊ธฐ์กด ์ฝ๋์์ ๋ฐ๊ฒฌํ ๋ฌธ์ ์ ์ ๋ํ ํด๊ฒฐ์ฑ
์ด ๋ ์ค๋ฅธ ๊ฒฝ์ฐ, ์ธ์ ๋ ์ง [๊ธฐ์ฌ๋ฅผ ์์](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#create-a-pull-request)ํ๊ณ Pull Request๋ฅผ ์์ฑํด์ฃผ์ธ์!
## ๋ฒ๊ทธ ๊ด๋ จ ์ด์๋ฅผ ์ ๊ธฐํ๊ฑฐ๋ ์๋ก์ด ๊ธฐ๋ฅ ์์ฒญํ๊ธฐ [[submitting-a-bugrelated-issue-or-feature-request]]
๋ฒ๊ทธ ๊ด๋ จ ์ด์๋ฅผ ์ ๊ธฐํ๊ฑฐ๋ ์๋ก์ด ๊ธฐ๋ฅ์ ์์ฒญํ ๋๋ ๋ค์ ๊ฐ์ด๋๋ผ์ธ์ ์ต๋ํ ์ค์ํด์ฃผ์ธ์. ์ด๋ ๊ฒ ํ๋ฉด ์ข์ ํผ๋๋ฐฑ๊ณผ ํจ๊ป ๋น ๋ฅด๊ฒ ๋ต๋ณํด ๋๋ฆด ์ ์์ต๋๋ค.
### ๋ฒ๊ทธ๋ฅผ ๋ฐ๊ฒฌํ์
จ๋์? [[did-you-find-a-bug]]
๐ค Transformers ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ ์ฌ์ฉ ์ค์ ๊ฒช๋ ๋ฌธ์ ๋ฅผ ๋ณด๊ณ ํด์ฃผ๋ ์ฌ์ฉ์๋ค ๋๋ถ์ ๋์ฑ ๊ฒฌ๊ณ ํด์ง๊ณ ์ ๋ขฐํ ์ ์๊ฒ ๋์์ต๋๋ค.
์ด์๋ฅผ ๋ณด๊ณ ํ๊ธฐ ์ ์, ๋ฒ๊ทธ๊ฐ ์ด๋ฏธ **๋ณด๊ณ ๋์ง ์์๋์ง** ํ์ธํด์ฃผ์ธ์. (GitHub์ ์ด์ ํญ ์๋์ ๊ฒ์ ๋ฐ๋ฅผ ์ฌ์ฉํ์ธ์). ์ด์๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์์ฒด์์ ๋ฐ์ํ ๋ฒ๊ทธ์ด์ผ ํ๋ฉฐ, ์ฝ๋์ ๋ค๋ฅธ ๋ถ๋ถ๊ณผ ๊ด๋ จ๋ ๊ฒ์ด ์๋์ด์ผ ํฉ๋๋ค. ๋ฒ๊ทธ๊ฐ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ฌธ์ ๋ก ๋ฐ์ํ์๋์ง ํ์คํ์ง ์์ ๊ฒฝ์ฐ ๋จผ์ [ํฌ๋ผ](https://discuss.huggingface.co/)์์ ์ง๋ฌธํด ์ฃผ์ธ์. ์ด๋ ๊ฒ ํ๋ฉด ์ผ๋ฐ์ ์ธ ์ง๋ฌธ๋ณด๋ค ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๊ด๋ จ๋ ๋ฌธ์ ๋ฅผ ๋ ๋น ๋ฅด๊ฒ ํด๊ฒฐํ ์ ์์ต๋๋ค.
๋ฒ๊ทธ๊ฐ ์ด๋ฏธ ๋ณด๊ณ ๋์ง ์์๋ค๋ ๊ฒ์ ํ์ธํ๋ค๋ฉด, ๋ค์ ์ ๋ณด๋ฅผ ํฌํจํ์ฌ ์ด์๋ฅผ ์ ์ถํด ์ฃผ์ธ์. ๊ทธ๋ฌ๋ฉด ์ฐ๋ฆฌ๊ฐ ๋น ๋ฅด๊ฒ ํด๊ฒฐํ ์ ์์ต๋๋ค:
* ์ฌ์ฉ ์ค์ธ **์ด์์ฒด์ ์ข
๋ฅ์ ๋ฒ์ **, ๊ทธ๋ฆฌ๊ณ **Python**, **PyTorch** ๋๋ **TensorFlow** ๋ฒ์ .
* ๋ฒ๊ทธ๋ฅผ 30์ด ์ด๋ด๋ก ์ฌํํ ์ ์๋ ๊ฐ๋จํ๊ณ ๋
๋ฆฝ์ ์ธ ์ฝ๋ ์ค๋ํซ.
* ์์ธ๊ฐ ๋ฐ์ํ ๊ฒฝ์ฐ *์ ์ฒด* ํธ๋ ์ด์ค๋ฐฑ.
* ์คํฌ๋ฆฐ์ท๊ณผ ๊ฐ์ด ๋์์ด ๋ ๊ฒ์ผ๋ก ์๊ฐ๋๋ ์ถ๊ฐ ์ ๋ณด๋ฅผ ์ฒจ๋ถํด ์ฃผ์ธ์.
์ด์์ฒด์ ์ ์ํํธ์จ์ด ๋ฒ์ ์ ์๋์ผ๋ก ๊ฐ์ ธ์ค๋ ค๋ฉด ๋ค์ ๋ช
๋ น์ ์คํํ์ธ์:
```bash
transformers-cli env
```
์ ์ฅ์์ ๋ฃจํธ ๋๋ ํฐ๋ฆฌ์์๋ ๊ฐ์ ๋ช
๋ น์ ์คํํ ์ ์์ต๋๋ค:
```bash
python src/transformers/commands/transformers_cli.py env
```
### ์๋ก์ด ๊ธฐ๋ฅ์ ์ํ์๋์? [[do-you-want-a-new-feature]]
๐ค Transformers์์ ์ฌ์ฉํ๊ณ ์ถ์ ์๋ก์ด ๊ธฐ๋ฅ์ด ์๋ค๋ฉด, ๋ค์ ๋ด์ฉ์ ํฌํจํ์ฌ ์ด์๋ฅผ ์ ์ถํด ์ฃผ์ธ์:
1. ์ด ๊ธฐ๋ฅ์ด ํ์ํ *์ด์ *๋ ๋ฌด์์ธ๊ฐ์? ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ํ ๋ฌธ์ ๋ ๋ถ๋ง๊ณผ ๊ด๋ จ์ด ์๋์? ํ๋ก์ ํธ์ ํ์ํ ๊ธฐ๋ฅ์ธ๊ฐ์? ์ปค๋ฎค๋ํฐ์ ๋์์ด ๋ ๋งํ ๊ธฐ๋ฅ์ธ๊ฐ์?
์ด๋ค ๋ด์ฉ์ด๋ ์ฌ๋ฌ๋ถ์ ์ด์ผ๊ธฐ๋ฅผ ๋ฃ๊ณ ์ถ์ต๋๋ค!
2. ์์ฒญํ๋ ๊ธฐ๋ฅ์ ์ต๋ํ ์์ธํ ์ค๋ช
ํด ์ฃผ์ธ์. ๋ ๋ง์ ์ ๋ณด๋ฅผ ์ ๊ณตํ ์๋ก ๋ ๋์ ๋์์ ๋๋ฆด ์ ์์ต๋๋ค.
3. ํด๋น ๊ธฐ๋ฅ์ ์ฌ์ฉ๋ฒ์ ๋ณด์ฌ์ฃผ๋ *์ฝ๋ ์ค๋ํซ*์ ์ ๊ณตํด ์ฃผ์ธ์.
4. ๊ธฐ๋ฅ๊ณผ ๊ด๋ จ๋ ๋
ผ๋ฌธ์ด ์๋ ๊ฒฝ์ฐ ๋งํฌ๋ฅผ ํฌํจํด ์ฃผ์ธ์.
์ด์๊ฐ ์ ์์ฑ๋์๋ค๋ฉด ์ด์๊ฐ ์์ฑ๋ ์๊ฐ, ์ด๋ฏธ 80% ์ ๋์ ์์
์ด ์๋ฃ๋ ๊ฒ์
๋๋ค.
์ด์๋ฅผ ์ ๊ธฐํ๋ ๋ฐ ๋์์ด ๋ ๋งํ [ํ
ํ๋ฆฟ](https://github.com/huggingface/transformers/tree/main/templates)๋ ์ค๋น๋์ด ์์ต๋๋ค.
## ์๋ก์ด ๋ชจ๋ธ์ ๊ตฌํํ๊ณ ์ถ์ผ์ ๊ฐ์? [[do-you-want-to-implement-a-new-model]]
์๋ก์ด ๋ชจ๋ธ์ ๊ณ์ํด์ ์ถ์๋ฉ๋๋ค. ๋ง์ฝ ์ฌ๋ฌ๋ถ์ด ์๋ก์ด ๋ชจ๋ธ์ ๊ตฌํํ๊ณ ์ถ๋ค๋ฉด ๋ค์ ์ ๋ณด๋ฅผ ์ ๊ณตํด ์ฃผ์ธ์:
* ๋ชจ๋ธ์ ๋ํ ๊ฐ๋จํ ์ค๋ช
๊ณผ ๋
ผ๋ฌธ ๋งํฌ.
* ๊ตฌํ์ด ๊ณต๊ฐ๋์ด ์๋ค๋ฉด ๊ตฌํ ๋งํฌ.
* ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ์ฌ์ฉ ๊ฐ๋ฅํ๋ค๋ฉด ๊ฐ์ค์น ๋งํฌ.
๋ง์ฝ ๋ชจ๋ธ์ ์ง์ ๊ธฐ์ฌํ๊ณ ์ถ์ผ์๋ค๋ฉด, ์๋ ค์ฃผ์ธ์. ๐ค Transformers์ ์ถ๊ฐํ ์ ์๋๋ก ๋์๋๋ฆฌ๊ฒ ์ต๋๋ค!
[๐ค Transformers์ ์๋ก์ด ๋ชจ๋ธ์ ์ถ๊ฐํ๋ ๋ฐฉ๋ฒ](https://huggingface.co/docs/transformers/add_new_model)์ ๋ํ ๊ธฐ์ ์ ์ธ ์๋ด์๋ ์์ต๋๋ค.
## ๋ฌธ์๋ฅผ ์ถ๊ฐํ๊ณ ์ถ์ผ์ ๊ฐ์? [[do-you-want-to-add-documentation]]
์ฐ๋ฆฌ๋ ์ธ์ ๋ ๋ ๋ช
ํํ๊ณ ์ ํํ ๋ฌธ์๋ฅผ ์ ๊ณตํ๊ธฐ ์ํ์ฌ ๊ฐ์ ์ ์ ์ฐพ๊ณ ์์ต๋๋ค. ์คํ์๋ ๋ถ์กฑํ ๋ด์ฉ, ๋ถ๋ช
ํ์ง ์๊ฑฐ๋ ๋ถ์ ํํ ๋ด์ฉ ๋ฑ์ ์๋ ค์ฃผ์๋ฉด ๊ฐ์ ํ๋ ๋ฐ ๋์์ด ๋ฉ๋๋ค. ๊ด์ฌ์ด ์์ผ์๋ค๋ฉด ๋ณ๊ฒฝํ๊ฑฐ๋ ๊ธฐ์ฌํ์ค ์ ์๋๋ก ๋์๋๋ฆฌ๊ฒ ์ต๋๋ค!
๋ฌธ์๋ฅผ ์์ฑ, ๋น๋ ๋ฐ ์์ฑํ๋ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [README](https://github.com/huggingface/transformers/tree/main/docs) ๋ฌธ์๋ฅผ ํ์ธํด ์ฃผ์ธ์.
## ํ ๋ฆฌํ์คํธ(Pull Request) ์์ฑํ๊ธฐ [[create-a-pull-request]]
์ฝ๋๋ฅผ ์์ฑํ๊ธฐ ์ ์ ๊ธฐ์กด์ Pull Request๋ ์ด์๋ฅผ ๊ฒ์ํ์ฌ ๋๊ตฐ๊ฐ ์ด๋ฏธ ๋์ผํ ์์
์ ํ๊ณ ์๋์ง ํ์ธํ๋ ๊ฒ์ด ์ข์ต๋๋ค. ํ์คํ์ง ์๋ค๋ฉด ํผ๋๋ฐฑ์ ๋ฐ๊ธฐ ์ํด ์ด์๋ฅผ ์ด์ด๋ณด๋ ๊ฒ์ด ์ข์ต๋๋ค.
๐ค Transformers์ ๊ธฐ์ฌํ๊ธฐ ์ํด์๋ ๊ธฐ๋ณธ์ ์ธ `git` ์ฌ์ฉ ๋ฅ๋ ฅ์ด ํ์ํฉ๋๋ค. `git`์ ์ฌ์ฉํ๊ธฐ ์ฌ์ด ๋๊ตฌ๋ ์๋์ง๋ง, ๋งค์ฐ ํ๋ฅญํ ๋งค๋ด์ผ์ ์ ๊ณตํฉ๋๋ค. ์(shell)์์ `git --help`์ ์
๋ ฅํ์ฌ ํ์ธํด๋ณด์ธ์! ๋ง์ฝ ์ฑ
์ ์ ํธํ๋ค๋ฉด, [Pro Git](https://git-scm.com/book/en/v2)์ ๋งค์ฐ ์ข์ ์ฐธ๊ณ ์๋ฃ๊ฐ ๋ ๊ฒ์
๋๋ค.
๐ค Transformers์ ๊ธฐ์ฌํ๋ ค๋ฉด **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** ์ด์์ ๋ฒ์ ์ด ํ์ํฉ๋๋ค. ๊ธฐ์ฌ๋ฅผ ์์ํ๋ ค๋ฉด ๋ค์ ๋จ๊ณ๋ฅผ ๋ฐ๋ฅด์ธ์:
1. ์ ์ฅ์ ํ์ด์ง์์ **[Fork](https://github.com/huggingface/transformers/fork)** ๋ฒํผ์ ํด๋ฆญํ์ฌ ์ ์ฅ์๋ฅผ ํฌํฌํ์ธ์. ์ด๋ ๊ฒ ํ๋ฉด ์ฝ๋์ ๋ณต์ฌ๋ณธ์ด ์ฌ๋ฌ๋ถ์ GitHub ์ฌ์ฉ์ ๊ณ์ ์๋์ ์์ฑ๋ฉ๋๋ค.
2. ํฌํฌํ ์ ์ฅ์๋ฅผ ๋ก์ปฌ ๋์คํฌ๋ก ํด๋ก ํ๊ณ , ๊ธฐ๋ณธ ์ ์ฅ์๋ฅผ ์๊ฒฉ(remote)์ผ๋ก ์ถ๊ฐํ์ธ์:
```bash
git clone [email protected]:<your Github handle>/transformers.git
cd transformers
git remote add upstream https://github.com/huggingface/transformers.git
```
3. ๊ฐ๋ฐ ๋ณ๊ฒฝ ์ฌํญ์ ์ ์ฅํ ์ ๋ธ๋์น๋ฅผ ์์ฑํ์ธ์:
```bash
git checkout -b a-descriptive-name-for-my-changes
```
๐จ ์ ๋ `main` ๋ธ๋์น์์ ์์
ํ์ง **๋ง์ธ์!**
4. ๊ฐ์ ํ๊ฒฝ์์ ๋ค์ ๋ช
๋ น์ ์คํํ์ฌ ๊ฐ๋ฐ ํ๊ฒฝ์ ์ค์ ํ์ธ์:
```bash
pip install -e ".[dev]"
```
๋ง์ฝ ์ด๋ฏธ ๊ฐ์ ํ๊ฒฝ์ ๐ค Transformers๊ฐ ์ค์น๋์ด ์๋ค๋ฉด, `-e` ํ๋๊ทธ๋ฅผ ์ฌ์ฉํ์ฌ ์ค์นํ๊ธฐ ์ ์ `pip uninstall transformers`๋ก ์ ๊ฑฐํด์ฃผ์ธ์.
์ฌ๋ฌ๋ถ์ ์ด์์ฒด์ ์ ๋ฐ๋ผ์, ๊ทธ๋ฆฌ๊ณ ๐ค Transformers์ ์ ํ์ ์์กด์ฑ์ ์๊ฐ ์ฆ๊ฐํ๋ฉด์, ์ด ๋ช
๋ น์ด ์คํจํ ์๋ ์์ต๋๋ค. ๊ทธ๋ด ๊ฒฝ์ฐ ์ฌ์ฉํ๋ ค๋ ๋ฅ๋ฌ๋ ํ๋ ์์ํฌ(PyTorch, TensorFlow, ๊ทธ๋ฆฌ๊ณ /๋๋ Flax)๋ฅผ ์ค์นํ ํ ์๋ ๋ช
๋ น์ ์คํํด์ฃผ์ธ์:
```bash
pip install -e ".[quality]"
```
๋๋ถ๋ถ์ ๊ฒฝ์ฐ ์ด๊ฒ์ผ๋ก ์ถฉ๋ถํ ๊ฒ์
๋๋ค.
5. ๋ธ๋์น์์ ๊ธฐ๋ฅ์ ๊ฐ๋ฐํ์ธ์.
์ฝ๋๋ฅผ ์์
ํ๋ ๋์ ํ
์คํธ ์ค์ํธ(test suite)๊ฐ ํต๊ณผํ๋์ง ํ์ธํ์ธ์. ๋ค์๊ณผ ๊ฐ์ด ๋ณ๊ฒฝ ์ฌํญ์ ์ํฅ์ ๋ฐ๋ ํ
์คํธ๋ฅผ ์คํํ์ธ์:
```bash
pytest tests/<TEST_TO_RUN>.py
```
ํ
์คํธ์ ๋ํ ๋ ๋ง์ ์ ๋ณด๋ [ํ
์คํธ](https://huggingface.co/docs/transformers/testing) ๊ฐ์ด๋๋ฅผ ํ์ธํ์ธ์.
๐ค Transformers๋ `black`๊ณผ `ruff`๋ฅผ ์ฌ์ฉํ์ฌ ์์ค ์ฝ๋์ ํ์์ ์ผ๊ด๋๊ฒ ์ ์งํฉ๋๋ค. ๋ณ๊ฒฝ ์ฌํญ์ ์ ์ฉํ ํ์๋ ๋ค์ ๋ช
๋ น์ผ๋ก ์๋์ผ๋ก ์คํ์ผ ๊ต์ ๋ฐ ์ฝ๋ ๊ฒ์ฆ์ ์ํํ์ธ์:
```bash
make fixup
```
์ด๊ฒ์ ๋ํ ์์
์ค์ธ PR์์ ์์ ํ ํ์ผ์์๋ง ์๋ํ๋๋ก ์ต์ ํ๋์ด ์์ต๋๋ค.
๊ฒ์ฌ๋ฅผ ํ๋์ฉ ์คํํ๋ ค๋ ๊ฒฝ์ฐ, ๋ค์ ๋ช
๋ น์ผ๋ก ์คํ์ผ ๊ต์ ์ ์ ์ฉํ ์ ์์ต๋๋ค:
```bash
make style
```
๐ค Transformers๋ ๋ํ `ruff`์ ๋ช ๊ฐ์ง ์ฌ์ฉ์ ์ ์ ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ์ฌ ์ฝ๋ฉ ์ค์๋ฅผ ํ์ธํฉ๋๋ค. CI๋ฅผ ํตํด ํ์ง ๊ด๋ฆฌ๊ฐ ์ํ๋์ง๋ง, ๋ค์ ๋ช
๋ น์ผ๋ก ๋์ผํ ๊ฒ์ฌ๋ฅผ ์คํํ ์ ์์ต๋๋ค:
```bash
make quality
```
๋ง์ง๋ง์ผ๋ก, ์ ๋ชจ๋ธ์ ์ถ๊ฐํ ๋ ์ผ๋ถ ํ์ผ์ ์
๋ฐ์ดํธํ๋ ๊ฒ์ ์์ง ์๋๋ก ํ๊ธฐ ์ํ ๋ง์ ์คํฌ๋ฆฝํธ๊ฐ ์์ต๋๋ค. ๋ค์ ๋ช
๋ น์ผ๋ก ์ด๋ฌํ ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ ์ ์์ต๋๋ค:
```bash
make repo-consistency
```
์ด๋ฌํ ๊ฒ์ฌ์ ๋ํด ์์ธํ ์์๋ณด๊ณ ๊ด๋ จ ๋ฌธ์ ๋ฅผ ํด๊ฒฐํ๋ ๋ฐฉ๋ฒ์ [Pull Request์ ๋ํ ๊ฒ์ฌ](https://huggingface.co/docs/transformers/pr_checks) ๊ฐ์ด๋๋ฅผ ํ์ธํ์ธ์.
๋ง์ฝ `docs/source` ๋๋ ํฐ๋ฆฌ ์๋์ ๋ฌธ์๋ฅผ ์์ ํ๋ ๊ฒฝ์ฐ, ๋ฌธ์๊ฐ ๋น๋๋ ์ ์๋์ง ํ์ธํ์ธ์. ์ด ๊ฒ์ฌ๋ Pull Request๋ฅผ ์ด ๋๋ CI์์ ์คํ๋ฉ๋๋ค. ๋ก์ปฌ ๊ฒ์ฌ๋ฅผ ์คํํ๋ ค๋ฉด ๋ฌธ์ ๋น๋๋ฅผ ์ค์นํด์ผ ํฉ๋๋ค:
```bash
pip install ".[docs]"
```
์ ์ฅ์์ ๋ฃจํธ ๋๋ ํฐ๋ฆฌ์์ ๋ค์ ๋ช
๋ น์ ์คํํ์ธ์:
```bash
doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build
```
์ด ๋ช
๋ น์ `~/tmp/test-build` ํด๋์ ๋ฌธ์๋ฅผ ๋น๋ํ๋ฉฐ, ์์ฑ๋ Markdown ํ์ผ์ ์ ํธํ๋ ํธ์ง๊ธฐ๋ก ํ์ธํ ์ ์์ต๋๋ค. Pull Request๋ฅผ ์ด ๋ GitHub์์ ๋ฌธ์๋ฅผ ๋ฏธ๋ฆฌ ๋ณผ ์๋ ์์ต๋๋ค.
๋ณ๊ฒฝ ์ฌํญ์ ๋ง์กฑํ๋ฉด `git add`๋ก ๋ณ๊ฒฝ๋ ํ์ผ์ ์ถ๊ฐํ๊ณ , `git commit`์ผ๋ก ๋ณ๊ฒฝ ์ฌํญ์ ๋ก์ปฌ์ ๊ธฐ๋กํ์ธ์:
```bash
git add modified_file.py
git commit
```
[์ข์ ์ปค๋ฐ ๋ฉ์์ง](https://chris.beams.io/posts/git-commit/)๋ฅผ ์์ฑํ์ฌ ๋ณ๊ฒฝ ์ฌํญ์ ๋ช
ํํ๊ฒ ์ ๋ฌํ์ธ์!
๋ณ๊ฒฝ ์ฌํญ์ ํ๋ก์ ํธ ์๋ณธ ์ ์ฅ์์ ๋๊ธฐํํ๋ ค๋ฉด, PR์ *์ด๊ธฐ ์ ์* ๋ธ๋์น๋ฅผ `upstream/branch`๋ก ๋ฆฌ๋ฒ ์ด์ค(rebase)ํ์ธ์. ๋๋ ๊ด๋ฆฌ์์ ์์ฒญ์ ์ด ์์
์ด ํ์ํ ์ ์์ต๋๋ค:
```bash
git fetch upstream
git rebase upstream/main
```
๋ณ๊ฒฝ ์ฌํญ์ ๋ธ๋์น์ ํธ์ํ์ธ์:
```bash
git push -u origin a-descriptive-name-for-my-changes
```
์ด๋ฏธ PR์ ์ด์๋ค๋ฉด, `--force` ํ๋๊ทธ์ ํจ๊ป ๊ฐ์ ํธ์ํด์ผ ํฉ๋๋ค. ์์ง PR์ด ์ด๋ฆฌ์ง ์์๋ค๋ฉด ์ ์์ ์ผ๋ก ๋ณ๊ฒฝ ์ฌํญ์ ํธ์ํ๋ฉด ๋ฉ๋๋ค.
6. ์ด์ GitHub์์ ํฌํฌํ ์ ์ฅ์๋ก ์ด๋ํ๊ณ **Pull request(ํ ๋ฆฌํ์คํธ)**๋ฅผ ํด๋ฆญํ์ฌ Pull Request๋ฅผ ์ด ์ ์์ต๋๋ค. ์๋์ [์ฒดํฌ๋ฆฌ์คํธ](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist)์์ ๋ชจ๋ ํญ๋ชฉ์ ์ฒดํฌ ํ์๋ฅผ ํ์ธ์. ์ค๋น๊ฐ ์๋ฃ๋๋ฉด ํ๋ก์ ํธ ๊ด๋ฆฌ์์๊ฒ ๋ณ๊ฒฝ ์ฌํญ์ ๋ณด๋ด ๊ฒํ ๋ฅผ ์์ฒญํ ์ ์์ต๋๋ค.
7. ๊ด๋ฆฌ์๊ฐ ๋ณ๊ฒฝ ์ฌํญ์ ์์ฒญํด๋ ๊ด์ฐฎ์ต๋๋ค. ํต์ฌ ๊ธฐ์ฌ์๋ค๋ ๋์ผํ ์ํฉ์ ๊ฒช์ต๋๋ค! ๋ชจ๋๊ฐ ๋ณ๊ฒฝ ์ฌํญ์ Pull Request์์ ๋ณผ ์ ์๋๋ก, ๋ก์ปฌ ๋ธ๋์น์์ ์์
ํ๊ณ ๋ณ๊ฒฝ ์ฌํญ์ ํฌํฌํ ์ ์ฅ์๋ก ํธ์ํ์ธ์. ๊ทธ๋ฌ๋ฉด ๋ณ๊ฒฝ ์ฌํญ์ด ์๋์ผ๋ก Pull Request์ ๋ํ๋ฉ๋๋ค.
### Pull Request ์ฒดํฌ๋ฆฌ์คํธ [[pull-request-checklist]]
โ Pull Request ์ ๋ชฉ์ ๊ธฐ์ฌ ๋ด์ฉ์ ์์ฝํด์ผ ํฉ๋๋ค.<br>
โ Pull Request๊ฐ ์ด์๋ฅผ ํด๊ฒฐํ๋ ๊ฒฝ์ฐ, Pull Request ์ค๋ช
์ ์ด์ ๋ฒํธ๋ฅผ ์ธ๊ธํ์ฌ ์ฐ๊ด๋์ด ์์์ ์๋ ค์ฃผ์ธ์. (์ด์๋ฅผ ํ์ธํ๋ ์ฌ๋๋ค์ด ํด๋น ์ด์์ ๋ํ ์์
์ด ์งํ ์ค์์ ์ ์ ์๊ฒ ํฉ๋๋ค).<br>
โ ์์
์ด ์งํ์ค์ด๋ผ๋ฉด ์ ๋ชฉ ์์ `[WIP]`๋ฅผ ๋ถ์ฌ์ฃผ์ธ์. ์ค๋ณต ์์
์ ํผํ๊ณ ๋ณํฉํ ์ค๋น๊ฐ ๋ PR๊ณผ ๊ตฌ๋ถํ๊ธฐ์ ์ ์ฉํฉ๋๋ค.<br>
โ ๊ธฐ์กด ํ
์คํธ๋ฅผ ํต๊ณผํ๋์ง ํ์ธํ์ธ์.<br>
โ ์๋ก์ด ๊ธฐ๋ฅ์ ์ถ๊ฐํ๋ ๊ฒฝ์ฐ, ํด๋น ๊ธฐ๋ฅ์ ๋ํ ํ
์คํธ๋ ์ถ๊ฐํ์ธ์.<br>
- ์ ๋ชจ๋ธ์ ์ถ๊ฐํ๋ ๊ฒฝ์ฐ, `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`์ ์ฌ์ฉํ์ฌ ์ผ๋ฐ์ ์ธ ํ
์คํธ๋ฅผ ํ์ฑํํ์ธ์.
- ์ `@slow` ํ
์คํธ๋ฅผ ์ถ๊ฐํ๋ ๊ฒฝ์ฐ, ๋ค์ ๋ช
๋ น์ผ๋ก ํ
์คํธ๋ฅผ ํต๊ณผํ๋์ง ํ์ธํ์ธ์: `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`.
- ์ ํ ํฌ๋์ด์ ๋ฅผ ์ถ๊ฐํ๋ ๊ฒฝ์ฐ, ํ
์คํธ๋ฅผ ์์ฑํ๊ณ ๋ค์ ๋ช
๋ น์ผ๋ก ํ
์คํธ๋ฅผ ํต๊ณผํ๋์ง ํ์ธํ์ธ์: `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py`.
- CircleCI์์๋ ๋๋ฆฐ ํ
์คํธ๋ฅผ ์คํํ์ง ์์ง๋ง, GitHub Actions์์๋ ๋งค์ผ ๋ฐค ์คํ๋ฉ๋๋ค!<br>
โ ๋ชจ๋ ๊ณต๊ฐ ๋ฉ์๋๋ ์ ์ฉํ ๊ธฐ์ ๋ฌธ์๋ฅผ ๊ฐ์ ธ์ผ ํฉ๋๋ค (์๋ฅผ ๋ค์ด [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) ์ฐธ์กฐ).<br>
โ ์ ์ฅ์๊ฐ ๋น ๋ฅด๊ฒ ์ฑ์ฅํ๊ณ ์์ผ๋ฏ๋ก ์ ์ฅ์์ ์๋นํ ๋ถ๋ด์ ์ฃผ๋ ์ด๋ฏธ์ง, ๋์์ ๋ฐ ๊ธฐํ ํ
์คํธ๊ฐ ์๋ ํ์ผ์ ์ถ๊ฐํ์ง ๋ง์ธ์. ๋์ [`hf-internal-testing`](https://huggingface.co/hf-internal-testing)๊ณผ ๊ฐ์ Hub ์ ์ฅ์๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฌํ ํ์ผ์ ํธ์คํ
ํ๊ณ URL๋ก ์ฐธ์กฐํ์ธ์. ๋ฌธ์์ ๊ด๋ จ๋ ์ด๋ฏธ์ง๋ ๋ค์ ์ ์ฅ์์ ๋ฐฐ์นํ๋ ๊ฒ์ ๊ถ์ฅํฉ๋๋ค: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). ์ด ๋ฐ์ดํฐ์
์ ์ฅ์์์ PR์ ์ด์ด์ Hugging Face ๋ฉค๋ฒ์๊ฒ ๋ณํฉ์ ์์ฒญํ ์ ์์ต๋๋ค.
Pull Request์์ ์คํ๋๋ ๊ฒ์ฌ์ ๋ํ ์์ธํ ์ ๋ณด๋ [Pull Request์ ๋ํ ๊ฒ์ฌ](https://huggingface.co/docs/transformers/pr_checks) ๊ฐ์ด๋๋ฅผ ํ์ธํ์ธ์.
### ํ
์คํธ [[tests]]
๋ผ์ด๋ธ๋ฌ๋ฆฌ ๋์๊ณผ ์ฌ๋ฌ ์์ ๋ฅผ ํ
์คํธํ ์ ์๋ ๊ด๋ฒ์ํ ํ
์คํธ ์ค์ํธ๊ฐ ํฌํจ๋์ด ์์ต๋๋ค. ๋ผ์ด๋ธ๋ฌ๋ฆฌ ํ
์คํธ๋ [tests](https://github.com/huggingface/transformers/tree/main/tests) ํด๋์, ์์ ํ
์คํธ๋ [examples](https://github.com/huggingface/transformers/tree/main/examples) ํด๋์ ์์ต๋๋ค.
์๋๊ฐ ๋น ๋ฅธ `pytest`์ `pytest-xdist`๋ฅผ ์ ํธํฉ๋๋ค. ์ ์ฅ์์ ๋ฃจํธ ๋๋ ํฐ๋ฆฌ์์ ํ
์คํธ๋ฅผ ์คํํ *ํ์ ํด๋ ๊ฒฝ๋ก ๋๋ ํ
์คํธ ํ์ผ ๊ฒฝ๋ก*๋ฅผ ์ง์ ํ์ธ์:
```bash
python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
```
๋ง์ฐฌ๊ฐ์ง๋ก `examples` ๋๋ ํฐ๋ฆฌ์์๋ *ํ์ ํด๋ ๊ฒฝ๋ก ๋๋ ํ
์คํธ ํ์ผ ๊ฒฝ๋ก*๋ฅผ ์ง์ ํ์ธ์. ์๋ฅผ ๋ค์ด, ๋ค์ ๋ช
๋ น์ PyTorch `examples` ๋๋ ํฐ๋ฆฌ์ ํ
์คํธ ๋ถ๋ฅ ํ์ ํด๋๋ฅผ ํ
์คํธํฉ๋๋ค:
```bash
pip install -r examples/xxx/requirements.txt # only needed the first time
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
```
์ด๊ฒ์ด ์ค์ ๋ก `make test` ๋ฐ `make test-examples` ๋ช
๋ น์ด ๊ตฌํ๋๋ ๋ฐฉ์์
๋๋ค (`pip install`์ ์ ์ธํฉ๋๋ค)!
๋ํ ํน์ ๊ธฐ๋ฅ๋ง ํ
์คํธํ๊ธฐ ์ํ ๋ ์์ ํ
์คํธ๋ฅผ ์ง์ ํ ์ ์์ต๋๋ค.
๊ธฐ๋ณธ์ ์ผ๋ก ๋๋ฆฐ ํ
์คํธ๋ ๊ฑด๋๋ฐ์ง๋ง `RUN_SLOW` ํ๊ฒฝ ๋ณ์๋ฅผ `yes`๋ก ์ค์ ํ์ฌ ์คํํ ์ ์์ต๋๋ค. ์ด๋ ๊ฒ ํ๋ฉด ๋ง์ ๊ธฐ๊ฐ๋ฐ์ดํธ ๋จ์์ ๋ชจ๋ธ์ด ๋ค์ด๋ก๋๋๋ฏ๋ก ์ถฉ๋ถํ ๋์คํฌ ๊ณต๊ฐ, ์ข์ ์ธํฐ๋ท ์ฐ๊ฒฐ๊ณผ ๋ง์ ์ธ๋ด๊ฐ ํ์ํฉ๋๋ค!
<Tip warning={true}>
ํ
์คํธ๋ฅผ ์คํํ๋ ค๋ฉด *ํ์ ํด๋ ๊ฒฝ๋ก ๋๋ ํ
์คํธ ํ์ผ ๊ฒฝ๋ก*๋ฅผ ์ง์ ํ์ธ์. ๊ทธ๋ ์ง ์์ผ๋ฉด `tests` ๋๋ `examples` ํด๋์ ๋ชจ๋ ํ
์คํธ๋ฅผ ์คํํ๊ฒ ๋์ด ๋งค์ฐ ๊ธด ์๊ฐ์ด ๊ฑธ๋ฆฝ๋๋ค!
</Tip>
```bash
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
```
๋๋ฆฐ ํ
์คํธ์ ๋ง์ฐฌ๊ฐ์ง๋ก, ๋ค์๊ณผ ๊ฐ์ด ํ
์คํธ ์ค์ ๊ธฐ๋ณธ์ ์ผ๋ก ํ์ฑํ๋์ง ์๋ ๋ค๋ฅธ ํ๊ฒฝ ๋ณ์๋ ์์ต๋๋ค:
- `RUN_CUSTOM_TOKENIZERS`: ์ฌ์ฉ์ ์ ์ ํ ํฌ๋์ด์ ํ
์คํธ๋ฅผ ํ์ฑํํฉ๋๋ค.
- `RUN_PT_FLAX_CROSS_TESTS`: PyTorch + Flax ํตํฉ ํ
์คํธ๋ฅผ ํ์ฑํํฉ๋๋ค.
- `RUN_PT_TF_CROSS_TESTS`: TensorFlow + PyTorch ํตํฉ ํ
์คํธ๋ฅผ ํ์ฑํํฉ๋๋ค.
๋ ๋ง์ ํ๊ฒฝ ๋ณ์์ ์ถ๊ฐ ์ ๋ณด๋ [testing_utils.py](src/transformers/testing_utils.py)์์ ์ฐพ์ ์ ์์ต๋๋ค.
๐ค Transformers๋ ํ
์คํธ ์คํ๊ธฐ๋ก `pytest`๋ฅผ ์ฌ์ฉํฉ๋๋ค. ๊ทธ๋ฌ๋ ํ
์คํธ ์ค์ํธ ์์ฒด์์๋ `pytest` ๊ด๋ จ ๊ธฐ๋ฅ์ ์ฌ์ฉํ์ง ์์ต๋๋ค.
์ด๊ฒ์ `unittest`๊ฐ ์์ ํ ์ง์๋๋ค๋ ๊ฒ์ ์๋ฏธํฉ๋๋ค. ๋ค์์ `unittest`๋ก ํ
์คํธ๋ฅผ ์คํํ๋ ๋ฐฉ๋ฒ์
๋๋ค:
```bash
python -m unittest discover -s tests -t . -v
python -m unittest discover -s examples -t examples -v
```
### ์คํ์ผ ๊ฐ์ด๋ [[style-guide]]
๋ฌธ์๋ [Google Python ์คํ์ผ ๊ฐ์ด๋](https://google.github.io/styleguide/pyguide.html)๋ฅผ ๋ฐ๋ฆ
๋๋ค. ์์ธํ ์ ๋ณด๋ [๋ฌธ์ ์์ฑ ๊ฐ์ด๋](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)๋ฅผ ํ์ธํ์ธ์.
### Windows์์ ๊ฐ๋ฐ [[develop-on-windows]]
Windows์์ ๊ฐ๋ฐํ ๊ฒฝ์ฐ([Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) ๋๋ WSL์์ ์์
ํ์ง ์๋ ํ) Windows `CRLF` ์ค ๋ฐ๊ฟ์ Linux `LF` ์ค ๋ฐ๊ฟ์ผ๋ก ๋ณํํ๋๋ก git์ ๊ตฌ์ฑํด์ผ ํฉ๋๋ค:
```bash
git config core.autocrlf input
```
Windows์์ `make` ๋ช
๋ น์ ์คํํ๋ ํ ๊ฐ์ง ๋ฐฉ๋ฒ์ MSYS2๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์
๋๋ค:
1. [MSYS2](https://www.msys2.org/)๋ฅผ ๋ค์ด๋ก๋ํฉ๋๋ค. `C:\msys64`์ ์ค์น๋์๋ค๊ณ ๊ฐ์ ํฉ๋๋ค.
2. CLI์์ `C:\msys64\msys2.exe`๋ฅผ ์ฝ๋๋ค (์์ ๋ฉ๋ด์์ ์ฌ์ฉ ๊ฐ๋ฅํด์ผ ํจ).
3. ์์์ ๋ค์์ ์คํํ์ฌ: `pacman -Syu` ๋ฐ `pacman -S make`๋ก `make`๋ฅผ ์ค์นํฉ๋๋ค.
4. ํ๊ฒฝ ๋ณ์ PATH์ `C:\msys64\usr\bin`์ ์ถ๊ฐํ์ธ์.
์ด์ ๋ชจ๋ ํฐ๋ฏธ๋ (PowerShell, cmd.exe ๋ฑ)์์ `make`๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค! ๐
### ํฌํฌํ ์ ์ฅ์๋ฅผ ์์ ์๋ณธ ๋ธ๋์น(main)๊ณผ ๋๊ธฐํํ๊ธฐ (Hugging Face ์ ์ฅ์) [[sync-a-forked-repository-with-upstream-main-the-hugging-face-repository]]
ํฌํฌํ ์ ์ฅ์์ main ๋ธ๋์น๋ฅผ ์
๋ฐ์ดํธํ ๋, ๋ค์ ๋จ๊ณ๋ฅผ ๋ฐ๋ผ ์ํํด์ฃผ์ธ์. ์ด๋ ๊ฒ ํ๋ฉด ๊ฐ upstream PR์ ์ฐธ์กฐ ๋
ธํธ๊ฐ ์ถ๊ฐ๋๋ ๊ฒ์ ํผํ๊ณ ์ด๋ฌํ PR์ ๊ด์ฌํ๋ ๊ฐ๋ฐ์๋ค์๊ฒ ๋ถํ์ํ ์๋ฆผ์ด ์ ์ก๋๋ ๊ฒ์ ๋ฐฉ์งํ ์ ์์ต๋๋ค.
1. ๊ฐ๋ฅํ๋ฉด ํฌํฌ๋ ์ ์ฅ์์ ๋ธ๋์น ๋ฐ PR์ ์ฌ์ฉํ์ฌ upstream๊ณผ ๋๊ธฐํํ์ง ๋ง์ธ์. ๋์ ํฌํฌ๋ main ์ ์ฅ์์ ์ง์ ๋ณํฉํ์ธ์.
2. PR์ด ๋ฐ๋์ ํ์ํ ๊ฒฝ์ฐ, ๋ธ๋์น๋ฅผ ํ์ธํ ํ ๋ค์ ๋จ๊ณ๋ฅผ ์ฌ์ฉํ์ธ์:
```bash
git checkout -b your-branch-for-syncing
git pull --squash --no-commit upstream main
git commit -m '<your message without GitHub references>'
git push --set-upstream origin your-branch-for-syncing
```
| transformers/docs/source/ko/contributing.md/0 | {
"file_path": "transformers/docs/source/ko/contributing.md",
"repo_id": "transformers",
"token_count": 15765
} | 298 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ์์ด์ ํธ & ๋๊ตฌ [[agents-tools]]
<Tip warning={true}>
Transformers Agent๋ ์คํ ์ค์ธ API์ด๋ฏ๋ก ์ธ์ ๋ ์ง ๋ณ๊ฒฝ๋ ์ ์์ต๋๋ค.
API๋ ๊ธฐ๋ฐ ๋ชจ๋ธ์ด ์์ฃผ ์
๋ฐ์ดํธ๋๋ฏ๋ก, ์์ด์ ํธ๊ฐ ์ ๊ณตํ๋ ๊ฒฐ๊ณผ๋ฌผ์ ๋ฌ๋ผ์ง ์ ์์ต๋๋ค.
</Tip>
์์ด์ ํธ์ ๋๊ตฌ์ ๋ํด ๋ ์์๋ณด๋ ค๋ฉด [์๊ฐ ๊ฐ์ด๋](../transformers_agents)๋ฅผ ๊ผญ ์ฝ์ด๋ณด์ธ์.
์ด ํ์ด์ง์๋ ๊ธฐ๋ณธ ํด๋์ค์ ๋ํ API ๋ฌธ์๊ฐ ํฌํจ๋์ด ์์ต๋๋ค.
## ์์ด์ ํธ [[agents]]
์ฐ๋ฆฌ๋ ๊ธฐ๋ณธ [`Agent`] ํด๋์ค๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๋ ๊ฐ์ง ์ ํ์ ์์ด์ ํธ๋ฅผ ์ ๊ณตํฉ๋๋ค:
- [`CodeAgent`]๋ ํ ๋ฒ์ ๋์ํฉ๋๋ค. ์์
์ ํด๊ฒฐํ๊ธฐ ์ํด ์ฝ๋๋ฅผ ์์ฑํ ๋ค์, ๋ฐ๋ก ์คํํฉ๋๋ค.
- [`ReactAgent`]๋ ๋จ๊ณ๋ณ๋ก ๋์ํ๋ฉฐ, ๊ฐ ๋จ๊ณ๋ ํ๋์ ์๊ฐ, ํ๋์ ๋๊ตฌ ํธ์ถ ๋ฐ ์คํ์ผ๋ก ๊ตฌ์ฑ๋ฉ๋๋ค. ์ด ์์ด์ ํธ์๋ ๋ ๊ฐ์ง ํด๋์ค๊ฐ ์์ต๋๋ค:
- [`ReactJsonAgent`]๋ ๋๊ตฌ ํธ์ถ์ JSON์ผ๋ก ์์ฑํฉ๋๋ค.
- [`ReactCodeAgent`]๋ ๋๊ตฌ ํธ์ถ์ Python ์ฝ๋๋ก ์์ฑํฉ๋๋ค.
### Agent [[agent]]
[[autodoc]] Agent
### CodeAgent [[codeagent]]
[[autodoc]] CodeAgent
### React agents [[react-agents]]
[[autodoc]] ReactAgent
[[autodoc]] ReactJsonAgent
[[autodoc]] ReactCodeAgent
## Tools [[tools]]
### load_tool [[loadtool]]
[[autodoc]] load_tool
### Tool [[tool]]
[[autodoc]] Tool
### Toolbox [[toolbox]]
[[autodoc]] Toolbox
### PipelineTool [[pipelinetool]]
[[autodoc]] PipelineTool
### launch_gradio_demo [[launchgradiodemo]]
[[autodoc]] launch_gradio_demo
### ToolCollection [[toolcollection]]
[[autodoc]] ToolCollection
## ์์ง [[engines]]
์์ด์ ํธ ํ๋ ์์ํฌ์์ ์ฌ์ฉํ ์ ์๋ ์์ง์ ์์ ๋กญ๊ฒ ๋ง๋ค๊ณ ์ฌ์ฉํ ์ ์์ต๋๋ค.
์ด ์์ง๋ค์ ๋ค์๊ณผ ๊ฐ์ ์ฌ์์ ๊ฐ์ง๊ณ ์์ต๋๋ค:
1. ์
๋ ฅ(`List[Dict[str, str]]`)์ ๋ํ [๋ฉ์์ง ํ์](../chat_templating.md)์ ๋ฐ๋ฅด๊ณ ๋ฌธ์์ด์ ๋ฐํํด์ผ ํฉ๋๋ค.
2. ์ธ์ `stop_sequences`์ ์ํ์ค๊ฐ ์ ๋ฌ๋๊ธฐ *์ ์* ์ถ๋ ฅ์ ์์ฑํ๋ ๊ฒ์ ์ค์งํด์ผ ํฉ๋๋ค.
### HfApiEngine [[HfApiEngine]]
ํธ์๋ฅผ ์ํด, ์์ ์ฌํญ์ ๊ตฌํํ๊ณ ๋๊ท๋ชจ ์ธ์ด ๋ชจ๋ธ ์คํ์ ์ํด ์ถ๋ก ์๋ํฌ์ธํธ๋ฅผ ์ฌ์ฉํ๋ `HfApiEngine`์ ์ถ๊ฐํ์ต๋๋ค.
```python
>>> from transformers import HfApiEngine
>>> messages = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "No need to help, take it easy."},
... ]
>>> HfApiEngine()(messages, stop_sequences=["conversation"])
"That's very kind of you to say! It's always nice to have a relaxed "
```
[[autodoc]] HfApiEngine
## ์์ด์ ํธ ์ ํ [[agent-types]]
์์ด์ ํธ๋ ๋๊ตฌ ๊ฐ์ ๋ชจ๋ ์ ํ์ ๊ฐ์ฒด๋ฅผ ์ฒ๋ฆฌํ ์ ์์ต๋๋ค; ๋๊ตฌ๋ ์์ ํ ๋ฉํฐ๋ชจ๋ฌ์ด๋ฏ๋ก ํ
์คํธ, ์ด๋ฏธ์ง, ์ค๋์ค, ๋น๋์ค ๋ฑ ๋ค์ํ ์ ํ์ ์๋ฝํ๊ณ ๋ฐํํ ์ ์์ต๋๋ค.
๋๊ตฌ ๊ฐ์ ํธํ์ฑ์ ๋์ด๊ณ ipython (jupyter, colab, ipython ๋
ธํธ๋ถ, ...)์์ ์ด๋ฌํ
๋ฐํ ๊ฐ์ ์ฌ๋ฐ๋ฅด๊ฒ ๋ ๋๋งํ๊ธฐ ์ํด ์ด๋ฌํ ์ ํ์ ์ค์ฌ์ผ๋ก ๋ํผ ํด๋์ค๋ฅผ
๊ตฌํํฉ๋๋ค.
๋ํ๋ ๊ฐ์ฒด๋ ์ฒ์๊ณผ ๋์ผํ๊ฒ ์๋ํด์ผ ํฉ๋๋ค; ํ
์คํธ ๊ฐ์ฒด๋ ์ฌ์ ํ ๋ฌธ์์ด๋ก ์๋ํด์ผ ํ๋ฉฐ,
์ด๋ฏธ์ง ๊ฐ์ฒด๋ ์ฌ์ ํ `PIL.Image`๋ก ์๋ํด์ผ ํฉ๋๋ค.
์ด๋ฌํ ์ ํ์๋ ์ธ ๊ฐ์ง ํน์ ๋ชฉ์ ์ด ์์ต๋๋ค:
- `to_raw`๋ฅผ ํธ์ถํ๋ฉด ๊ธฐ๋ณธ ๊ฐ์ฒด๊ฐ ๋ฐํ๋์ด์ผ ํฉ๋๋ค.
- `to_string`์ ํธ์ถํ๋ฉด ๊ฐ์ฒด๊ฐ ๋ฌธ์์ด๋ก ๋ฐํ๋์ด์ผ ํฉ๋๋ค:
`AgentText`์ ๊ฒฝ์ฐ ๋ฌธ์์ด์ด ๋ ์ ์์ง๋ง, ๋ค๋ฅธ ๊ฒฝ์ฐ์๋ ๊ฐ์ฒด์ ์ง๋ ฌํ๋ ๋ฒ์ ์ ๊ฒฝ๋ก์ผ ์ ์์ต๋๋ค.
- ipython ์ปค๋์์ ํ์ํ ๋ ๊ฐ์ฒด๊ฐ ์ฌ๋ฐ๋ฅด๊ฒ ํ์๋์ด์ผ ํฉ๋๋ค.
### AgentText [[agenttext]]
[[autodoc]] transformers.agents.agent_types.AgentText
### AgentImage [[agentimage]]
[[autodoc]] transformers.agents.agent_types.AgentImage
### AgentAudio [[agentaudio]]
[[autodoc]] transformers.agents.agent_types.AgentAudio
| transformers/docs/source/ko/main_classes/agent.md/0 | {
"file_path": "transformers/docs/source/ko/main_classes/agent.md",
"repo_id": "transformers",
"token_count": 2949
} | 299 |
Subsets and Splits