From 3401b84076fcb711fad9c5542e6391977d650ab1 Mon Sep 17 00:00:00 2001 From: Isaac Corley <22203655+isaaccorley@users.noreply.github.com> Date: Mon, 26 Feb 2024 19:08:58 -0600 Subject: [PATCH] Overhaul 2.1 Remove Dependencies / Add Full Timm Support (#3) * remove dependencies and add full timm support * update readme * add unet head upsampling param * fix flake8 * update readme * make output_stride optional, not all timm models support an output stride arg --- README.md | 490 +-- assets/pretrained_weights.webp | Bin 0 -> 107262 bytes pyproject.toml | 8 +- requirements/required.txt | 7 +- requirements/tests.txt | 4 +- scripts/list_compatible_encoders.py | 40 + tests/test_models.py | 236 +- torchseg/__init__.py | 7 +- torchseg/base/heads.py | 13 +- torchseg/base/modules.py | 53 - torchseg/decoders/deeplabv3/model.py | 19 +- torchseg/decoders/fpn/model.py | 12 +- torchseg/decoders/linknet/model.py | 12 +- torchseg/decoders/manet/model.py | 12 +- torchseg/decoders/pan/model.py | 13 +- torchseg/decoders/pspnet/model.py | 12 +- torchseg/decoders/unet/model.py | 17 +- torchseg/decoders/unetplusplus/model.py | 12 +- torchseg/encoders/__init__.py | 153 +- torchseg/encoders/_base.py | 59 - torchseg/encoders/_utils.py | 57 - torchseg/encoders/densenet.py | 130 - torchseg/encoders/dpn.py | 146 - torchseg/encoders/efficientnet.py | 152 - torchseg/encoders/inceptionresnetv2.py | 68 - torchseg/encoders/inceptionv4.py | 68 - torchseg/encoders/mix_transformer.py | 25 +- torchseg/encoders/mobilenet.py | 55 - torchseg/encoders/mobileone.py | 558 --- torchseg/encoders/resnet.py | 211 - torchseg/encoders/senet.py | 149 - torchseg/encoders/supported.py | 5025 +++++++++++++++++++++++ torchseg/encoders/timm.py | 147 + torchseg/encoders/timm_efficientnet.py | 459 --- torchseg/encoders/timm_gernet.py | 124 - torchseg/encoders/timm_mobilenetv3.py | 150 - torchseg/encoders/timm_regnet.py | 349 -- torchseg/encoders/timm_res2net.py | 164 - torchseg/encoders/timm_resnest.py | 209 - torchseg/encoders/timm_sknet.py | 104 - torchseg/encoders/timm_universal.py | 38 - torchseg/encoders/vgg.py | 133 - torchseg/encoders/xception.py | 75 - 43 files changed, 5699 insertions(+), 4076 deletions(-) create mode 100644 assets/pretrained_weights.webp create mode 100644 scripts/list_compatible_encoders.py delete mode 100644 torchseg/encoders/_base.py delete mode 100644 torchseg/encoders/_utils.py delete mode 100644 torchseg/encoders/densenet.py delete mode 100644 torchseg/encoders/dpn.py delete mode 100644 torchseg/encoders/efficientnet.py delete mode 100644 torchseg/encoders/inceptionresnetv2.py delete mode 100644 torchseg/encoders/inceptionv4.py delete mode 100644 torchseg/encoders/mobilenet.py delete mode 100644 torchseg/encoders/mobileone.py delete mode 100644 torchseg/encoders/resnet.py delete mode 100644 torchseg/encoders/senet.py create mode 100644 torchseg/encoders/supported.py create mode 100644 torchseg/encoders/timm.py delete mode 100644 torchseg/encoders/timm_efficientnet.py delete mode 100644 torchseg/encoders/timm_gernet.py delete mode 100644 torchseg/encoders/timm_mobilenetv3.py delete mode 100644 torchseg/encoders/timm_regnet.py delete mode 100644 torchseg/encoders/timm_res2net.py delete mode 100644 torchseg/encoders/timm_resnest.py delete mode 100644 torchseg/encoders/timm_sknet.py delete mode 100644 torchseg/encoders/timm_universal.py delete mode 100644 torchseg/encoders/vgg.py delete mode 100644 torchseg/encoders/xception.py diff --git a/README.md b/README.md index 89d0f68a..ace89c81 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,129 @@ ## TorchSeg -TorchSeg is an actively maintained and up-to-date fork of the Segmentation Models PyTorch (torchseg). +TorchSeg is an actively maintained and up-to-date fork of the [Segmentation Models PyTorch (smp) library](https://github.com/qubvel/segmentation_models.pytorch). + +#### Updates + +The goal of this fork is to 1) provide maintenance support for the original library and 2) add features relevant to modern semantic segmentation. Since the fork, this library has added some features which can be summarized below: + +- Improved [PyTorch Image Models (timm)](https://github.com/huggingface/pytorch-image-models) for models with feature extraction functionality (852/1017=84% of timm models). This includes the typical CNN models such as `ResNet`, `EfficientNet`, etc., but now extends to include modern architectures like `ConvNext`, `Swin`, `PoolFormer`, `MaxViT` and more! +- Support for pretrained Vision Transformer (ViT) encoders. Currently timm ViTs do not support feature extraction out of the box. However we have added support for extracting intermediate transformer encoder layer feature maps to obtain this functionality. We support 100+ ViT based models including `ViT`, `DeiT`, `FlexiViT`! + + +Additionally we have performed the following for improved software standards: + +- More thorough testing and CI +- Formatting using `black`, `isort`, `flake8`, `mypy` +- Reduction of dependence on unmaintained libraries (now depends only on `torch`, `timm`, and `einops`) +- Reduce lines of code to maintain (removed custom utils, metrics, encoders) in favor of newer libraries such as `torchmetrics` and `timm` + #### Features The main features of this library are: - High level API (just two lines to create a neural network) - - 9 models architectures for binary and multi class segmentation (including legendary Unet) - - 124 available encoders (and 500+ encoders from [timm](https://github.com/rwightman/pytorch-image-models)) + - 9 segmentation architectures for binary and multi class segmentation (including U-Net, DeepLabV3) + - Support for 852/1017 (~84%) of available encoders from [timm](https://github.com/rwightman/pytorch-image-models) - All encoders have pre-trained weights for faster and better convergence - - Popular losses for training routines + - Popular segmentation loss functions #### Example Usage -Segmentation model is just a PyTorch nn.Module, which can be created as easy as: +TorchSeg models at their base are just torch nn.Modules. They can be created as follows: ```python import torchseg model = torchseg.Unet( - encoder_name="resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7 - encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization - in_channels=1, # model input channels (1 for gray-scale images, 3 for RGB, etc.) - classes=3, # model output channels (number of classes in your dataset) + encoder_name="resnet50", + encoder_weights=True, + in_channels=3 + classes=3, +) +``` + +TorchSeg has an `encoder_params` feature which passes additional parameters to `timm.create_model()` when defining an encoder backbone. One can specify different activitions, normalization layers, and more like below. + +You can also define a `functools.partial` callable as an activation/normalization layer. See the timm docs for more information on available [activations](https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/create_act.py) and [normalization](https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/create_norm.py) layers. You can even used pretrained weights while changing the activations/normalizations! + +```python +model = torchseg.Unet( + encoder_name="resnet50", + encoder_weights=True, + in_channels=3 + classes=3, + encoder_params={ + "act_layer": "prelu", + "norm_layer": "layernorm" + } +) +``` + +Some models like `Swin` and `ConvNext` perform a downsampling of scale=4 in the first block (stem) and then downsample by 2 afterwards with only `depth=4` blocks. This results in an output size of half after the decoder. To get the same output size as the input you can pass `head_upsampling=2` which will upsample once more prior to the segmentation head. + +```python +model = torchseg.Unet( + "convnextv2_tiny", + in_channels=3, + classes=2, + encoder_weights=True, + encoder_depth=4, + decoder_channels=(256, 128, 64, 32), + head_upsampling=2 +) + +model = torchseg.Unet( + "swin_tiny_patch4_window7_224", + in_channels=3, + classes=2, + encoder_weights=True, + encoder_depth=4, + decoder_channels=(256, 128, 64, 32), + head_upsampling=2, + encoder_params={"img_size": 256} # need to define img size since swin is a ViT hybrid +) + +model = torchseg.Unet( + "maxvit_small_tf_224", + in_channels=3, + classes=2, + encoder_weights=True, + encoder_depth=5, + decoder_channels=(256, 128, 64, 32, 16), + encoder_params={"img_size": 256} ) ``` - - see [table](#architectures) with available model architectures - - see [table](#encoders) with available encoders and their corresponding weights +TorchSeg supports pretrained ViT encoders from timm by extracting intermediate transformer block features specified by the `encoder_indices` and `encoder_depth` arguments. + +You will also need to define `scale_factors` for upsampling the feature layers to the resolutions expected by the decoders. For U-Net `depth=5` this would be `scales=(8, 4, 2, 1, 0.5)`. For `depth=4` this would be `scales=(4, 2, 1, 0.5)`, for `depth=3` this would be `scales=(2, 1, 0.5)` and so on. + +Another benefit of using timm is that by passing in a new `img_size`, timm automatically interpolates the ViT positional embeddings to work with your new image size which creates a different number of patch tokens. + +```python +import torch +import torchseg + +model = torchseg.Unet( + "vit_small_patch16_224", + in_channels=8, + classes=2, + encoder_depth=5, + encoder_indices=(2, 4, 6, 8, 10), # which intermediate blocks to extract features from + encoder_weights=True, + decoder_channels=(256, 128, 64, 32, 16), + encoder_params={ # additional params passed to timm.create_model and the vit encoder + "scale_factors": (8, 4, 2, 1, 0.5), # resize scale_factors for patch size 16 and 5 layers + "img_size": 256, # timm automatically interpolates the positional embeddings to your new image size + }, +) + +``` ### Models -#### Architectures +#### Architectures (Decoders) - Unet [[paper](https://arxiv.org/abs/1505.04597)] - Unet++ [[paper](https://arxiv.org/pdf/1807.10165.pdf)] @@ -46,339 +137,66 @@ model = torchseg.Unet( #### Encoders -The following is a list of supported encoders in TorchSeg. Select the appropriate family of encoders and click to expand the table and select a specific encoder and its pre-trained weights (`encoder_name` and `encoder_weights` parameters). - -
-ResNet -
- -| Encoder | Weights | Params, M | -| --------- | :-------------------: | :-------: | -| resnet18 | imagenet / ssl / swsl | 11M | -| resnet34 | imagenet | 21M | -| resnet50 | imagenet / ssl / swsl | 23M | -| resnet101 | imagenet | 42M | -| resnet152 | imagenet | 58M | - -
-
- -
-ResNeXt -
- -| Encoder | Weights | Params, M | -| ----------------- | :-------------------------------: | :-------: | -| resnext50_32x4d | imagenet / ssl / swsl | 22M | -| resnext101_32x4d | ssl / swsl | 42M | -| resnext101_32x8d | imagenet / instagram / ssl / swsl | 86M | -| resnext101_32x16d | instagram / ssl / swsl | 191M | -| resnext101_32x32d | instagram | 466M | -| resnext101_32x48d | instagram | 826M | - -
-
- -
-ResNeSt -
- -| Encoder | Weights | Params, M | -| ----------------------- | :------: | :-------: | -| timm-resnest14d | imagenet | 8M | -| timm-resnest26d | imagenet | 15M | -| timm-resnest50d | imagenet | 25M | -| timm-resnest101e | imagenet | 46M | -| timm-resnest200e | imagenet | 68M | -| timm-resnest269e | imagenet | 108M | -| timm-resnest50d_4s2x40d | imagenet | 28M | -| timm-resnest50d_1s4x24d | imagenet | 23M | - -
-
- -
-Res2Ne(X)t -
- -| Encoder | Weights | Params, M | -| ---------------------- | :------: | :-------: | -| timm-res2net50_26w_4s | imagenet | 23M | -| timm-res2net101_26w_4s | imagenet | 43M | -| timm-res2net50_26w_6s | imagenet | 35M | -| timm-res2net50_26w_8s | imagenet | 46M | -| timm-res2net50_48w_2s | imagenet | 23M | -| timm-res2net50_14w_8s | imagenet | 23M | -| timm-res2next50 | imagenet | 22M | - -
-
- -
-RegNet(x/y) -
- -| Encoder | Weights | Params, M | -| ---------------- | :------: | :-------: | -| timm-regnetx_002 | imagenet | 2M | -| timm-regnetx_004 | imagenet | 4M | -| timm-regnetx_006 | imagenet | 5M | -| timm-regnetx_008 | imagenet | 6M | -| timm-regnetx_016 | imagenet | 8M | -| timm-regnetx_032 | imagenet | 14M | -| timm-regnetx_040 | imagenet | 20M | -| timm-regnetx_064 | imagenet | 24M | -| timm-regnetx_080 | imagenet | 37M | -| timm-regnetx_120 | imagenet | 43M | -| timm-regnetx_160 | imagenet | 52M | -| timm-regnetx_320 | imagenet | 105M | -| timm-regnety_002 | imagenet | 2M | -| timm-regnety_004 | imagenet | 3M | -| timm-regnety_006 | imagenet | 5M | -| timm-regnety_008 | imagenet | 5M | -| timm-regnety_016 | imagenet | 10M | -| timm-regnety_032 | imagenet | 17M | -| timm-regnety_040 | imagenet | 19M | -| timm-regnety_064 | imagenet | 29M | -| timm-regnety_080 | imagenet | 37M | -| timm-regnety_120 | imagenet | 49M | -| timm-regnety_160 | imagenet | 80M | -| timm-regnety_320 | imagenet | 141M | - -
-
- -
-GERNet -
- -| Encoder | Weights | Params, M | -| ------------- | :------: | :-------: | -| timm-gernet_s | imagenet | 6M | -| timm-gernet_m | imagenet | 18M | -| timm-gernet_l | imagenet | 28M | - -
-
- -
-SE-Net -
- -| Encoder | Weights | Params, M | -| ------------------- | :------: | :-------: | -| senet154 | imagenet | 113M | -| se_resnet50 | imagenet | 26M | -| se_resnet101 | imagenet | 47M | -| se_resnet152 | imagenet | 64M | -| se_resnext50_32x4d | imagenet | 25M | -| se_resnext101_32x4d | imagenet | 46M | - -
-
- -
-SK-ResNe(X)t -
- -| Encoder | Weights | Params, M | -| ---------------------- | :------: | :-------: | -| timm-skresnet18 | imagenet | 11M | -| timm-skresnet34 | imagenet | 21M | -| timm-skresnext50_32x4d | imagenet | 25M | - -
-
- -
-DenseNet -
- -| Encoder | Weights | Params, M | -| ----------- | :------: | :-------: | -| densenet121 | imagenet | 6M | -| densenet169 | imagenet | 12M | -| densenet201 | imagenet | 18M | -| densenet161 | imagenet | 26M | - -
-
- -
-Inception -
- -| Encoder | Weights | Params, M | -| ----------------- | :-----------------------------: | :-------: | -| inceptionresnetv2 | imagenet / imagenet+background | 54M | -| inceptionv4 | imagenet / imagenet+background | 41M | -| xception | imagenet | 22M | - -
-
- -
-EfficientNet -
- -| Encoder | Weights | Params, M | -| ----------------------- | :--------------------------------: | :-------: | -| efficientnet-b0 | imagenet | 4M | -| efficientnet-b1 | imagenet | 6M | -| efficientnet-b2 | imagenet | 7M | -| efficientnet-b3 | imagenet | 10M | -| efficientnet-b4 | imagenet | 17M | -| efficientnet-b5 | imagenet | 28M | -| efficientnet-b6 | imagenet | 40M | -| efficientnet-b7 | imagenet | 63M | -| timm-efficientnet-b0 | imagenet / advprop / noisy-student | 4M | -| timm-efficientnet-b1 | imagenet / advprop / noisy-student | 6M | -| timm-efficientnet-b2 | imagenet / advprop / noisy-student | 7M | -| timm-efficientnet-b3 | imagenet / advprop / noisy-student | 10M | -| timm-efficientnet-b4 | imagenet / advprop / noisy-student | 17M | -| timm-efficientnet-b5 | imagenet / advprop / noisy-student | 28M | -| timm-efficientnet-b6 | imagenet / advprop / noisy-student | 40M | -| timm-efficientnet-b7 | imagenet / advprop / noisy-student | 63M | -| timm-efficientnet-b8 | imagenet / advprop | 84M | -| timm-efficientnet-l2 | noisy-student | 474M | -| timm-efficientnet-lite0 | imagenet | 4M | -| timm-efficientnet-lite1 | imagenet | 5M | -| timm-efficientnet-lite2 | imagenet | 6M | -| timm-efficientnet-lite3 | imagenet | 8M | -| timm-efficientnet-lite4 | imagenet | 13M | - -
-
- -
-MobileNet -
- -| Encoder | Weights | Params, M | -| ---------------------------------- | :------: | :-------: | -| mobilenet_v2 | imagenet | 2M | -| timm-mobilenetv3_large_075 | imagenet | 1.78M | -| timm-mobilenetv3_large_100 | imagenet | 2.97M | -| timm-mobilenetv3_large_minimal_100 | imagenet | 1.41M | -| timm-mobilenetv3_small_075 | imagenet | 0.57M | -| timm-mobilenetv3_small_100 | imagenet | 0.93M | -| timm-mobilenetv3_small_minimal_100 | imagenet | 0.43M | - -
-
- -
-DPN -
- -| Encoder | Weights | Params, M | -| ------- | :---------: | :-------: | -| dpn68 | imagenet | 11M | -| dpn68b | imagenet+5k | 11M | -| dpn92 | imagenet+5k | 34M | -| dpn98 | imagenet | 58M | -| dpn107 | imagenet+5k | 84M | -| dpn131 | imagenet | 76M | - -
-
- -
-VGG -
- -| Encoder | Weights | Params, M | -| -------- | :------: | :-------: | -| vgg11 | imagenet | 9M | -| vgg11_bn | imagenet | 9M | -| vgg13 | imagenet | 9M | -| vgg13_bn | imagenet | 9M | -| vgg16 | imagenet | 14M | -| vgg16_bn | imagenet | 14M | -| vgg19 | imagenet | 20M | -| vgg19_bn | imagenet | 20M | - -
-
- -
-Mix Vision Transformer -
- -Backbone from SegFormer pretrained on Imagenet! Can be used with other decoders from package, you can combine Mix Vision Transformer with Unet, FPN and others! - -Limitations: - - - encoder is **not** supported by Linknet, Unet++ - - encoder is supported by FPN only for encoder **depth = 5** - -| Encoder | Weights | Params, M | -| ------- | :------: | :-------: | -| mit_b0 | imagenet | 3M | -| mit_b1 | imagenet | 13M | -| mit_b2 | imagenet | 24M | -| mit_b3 | imagenet | 44M | -| mit_b4 | imagenet | 60M | -| mit_b5 | imagenet | 81M | - -
-
- -
-MobileOne -
- -Apple's "sub-one-ms" Backbone pretrained on Imagenet! Can be used with all decoders. - -Note: In the official github repo the s0 variant has additional num_conv_branches, leading to more params than s1. - -| Encoder | Weights | Params, M | -| ------------ | :------: | :-------: | -| mobileone_s0 | imagenet | 4.6M | -| mobileone_s1 | imagenet | 4.0M | -| mobileone_s2 | imagenet | 6.5M | -| mobileone_s3 | imagenet | 8.8M | -| mobileone_s4 | imagenet | 13.6M | - -
-
+TorchSeg relies entirely on the [timm](https://github.com/huggingface/pytorch-image-models) library for pretrained encoder support. This means that TorchSeg supports any timm model which has `features_only` feature extraction functionality. Additionally we support any ViT models with a `get_intermediate_layers` method. This results in a total of 852/1017 (~84%) encoders from timm including `ResNet`, `Swin`, `ConvNext`, `ViT`, and more! +To list the following supported encoders: -\* `ssl`, `swsl` - semi-supervised and weakly-supervised learning on ImageNet ([repo](https://github.com/facebookresearch/semi-supervised-ImageNet1K-models)). +```python +import torchseg -#### Timm Encoders +torchseg.list_encoders() +``` -Pytorch Image Models (a.k.a. timm) has a lot of pretrained models and interface which allows using these models as encoders in torchseg, however, not all models are supported +We have additionally pulled the the feature extractor metadata of each model with `features_only` support from timm at `output_stride=32`. This metadata provides information such as the number of intermediate layers, channels for each layer, layer name, and downsampling reduction. - - not all transformer models have ``features_only`` functionality implemented that is required for encoder - - some models have inappropriate strides +```python +import torchseg -Total number of supported encoders: 549 +metadata = torchseg.encoders.TIMM_ENCODERS["convnext_base"] +print(metadata) + +""" +{ + 'channels': [128, 256, 512, 1024], + 'indices': (0, 1, 2, 3), + 'module': ['stages.0', 'stages.1', 'stages.2', 'stages.3'], + 'reduction': [4, 8, 16, 32], +} +""" + +metadata = torchseg.encoders.TIMM_ENCODERS["resnet50"] +print(metadata) + +""" +{ + 'channels': [64, 256, 512, 1024, 2048], + 'indices': (0, 1, 2, 3, 4), + 'module': ['act1', 'layer1', 'layer2', 'layer3', 'layer4'], + 'reduction': [2, 4, 8, 16, 32] +} +""" +``` -### Models API +#### Models API - - `model.encoder` - pretrained backbone to extract features of different spatial resolution - - `model.decoder` - depends on models architecture (`Unet`/`Linknet`/`PSPNet`/`FPN`) - - `model.segmentation_head` - last block to produce required number of mask channels (include also optional upsampling and activation) + - `model.encoder` - pretrained backbone to extract intermediate features + - `model.decoder` - network for processing the intermediate features to the original image resolution (`Unet`, `DeepLabv3+`, `FPN`) + - `model.segmentation_head` - final block producing the mask output (includes optional upsampling and activation) - `model.classification_head` - optional block which create classification head on top of encoder - `model.forward(x)` - sequentially pass `x` through model\`s encoder, decoder and segmentation head (and classification head if specified) ##### Input channels -Input channels parameter allows you to create models, which process tensors with arbitrary number of channels. -If you use pretrained weights from imagenet - weights of first convolution will be reused. For -1-channel case it would be a sum of weights of first convolution layer, otherwise channels would be -populated with weights like `new_weight[:, i] = pretrained_weight[:, i % 3]` and than scaled with `new_weight * 3 / new_in_channels`. -```python -model = torchseg.FPN('resnet34', in_channels=1) -mask = model(torch.ones([1, 1, 64, 64])) -``` +Timm encoders supports the use of pretrained weights with arbitrary input channels by repeating weights for channels if > 3. For example, if `in_channels=6`, RGB ImageNet pretrained weights in the initial layer would be repeated `RGBRGB` to avoid random initialization. For `in_channels=7` this would result in `RGBRGBR`. Below is a diagram to visualize this method. -##### Auxiliary classification output +

+
+

-All models support `aux_params` parameters, which is default set to `None`. -If `aux_params = None` then classification auxiliary output is not created, else -model produce not only `mask`, but also `label` output with shape `NC`. +##### Auxiliary Classifier + +All models support an optional auxiliary classifier head through the use of `aux_params`. If `aux_params != None` then the +model will produce the a `label` output in addition to the `mask` output with shape `(N, C)`. Classification head consists of GlobalPooling->Dropout(optional)->Linear->Activation(optional) layers, which can be configured by `aux_params` as follows: @@ -386,18 +204,20 @@ configured by `aux_params` as follows: aux_params=dict( pooling='avg', # one of 'avg', 'max' dropout=0.5, # dropout ratio, default is None - activation='sigmoid', # activation function, default is None + activation=nn.Sigmoid(), # activation function, default is Identity classes=4, # define number of output labels ) -model = torchseg.Unet('resnet34', classes=4, aux_params=aux_params) +model = torchseg.Unet('resnet18', classes=4, aux_params=aux_params) mask, label = model(x) ``` ##### Depth -Depth parameter specify a number of downsampling operations in encoder, so you can make -your model lighter if specify smaller `depth`. +Depth represents the number of downsampling operations in the encoder, so you can make +your model lighter by specifying less `depth`. Defaults to `depth=5`. + +Note that some models like `ConvNext` and `Swin` only have 4 intermediate feature blocks. Therefore, to use these encoders set `encoder_depth=4`. This can be found in the metadata above. ```python -model = torchseg.Unet('resnet34', encoder_depth=4) +model = torchseg.Unet('resnet50', encoder_depth=4) ``` diff --git a/assets/pretrained_weights.webp b/assets/pretrained_weights.webp new file mode 100644 index 0000000000000000000000000000000000000000..ce14fa1fb44b68df9e65a8efc51906cbf5419631 GIT binary patch literal 107262 zcmV)1K+V5WNk&HYq5%L`MM6+kP&iEKq5%Le2gLIL%{Xk^Mv`Frex?70H#;{oy9Gq_ ze*&~4Ch3?IDQfYU&m_^{6B;gJZ156E$~xVXyQ+Bz5S5O8NFRho0pT&p9-4b2kH{nr}+$sgN;a2rXIBBlS5c;0i4=vX>vNcwHK8EG*A>NqP>P{lct+CmX2+u*3?yu5iwFn zOK%7OLA|vU`u1w+*jp6U4UH>9_WE|O+v^HBMAyUs2GRtG2wIIX(&_$2Q55weF1|H1 zEloqyr|H||R8%AadS+-WU5Cc6h+-5ufkU)*i2hifZAhE#$KFA!m+noX!K=yViLG*G$W*WSWhVaoO-s_00dj-nOLt{%fCeZ_Bv5 zyJWbAZkf9~B!ySuyFohxVm|H1mNwb$A`_nf`gg4*wJZbp$&t7;-LKjCyZ zMdtPkf9b5J@T;0=`UxQY2yl%K5j8G1TsU7G7Eby@A|8M{L>+dz(j86J3b_+5oI~nx zVxEGX39*rzo{Rs|jnjnRl+4{Ba)>*_<#tBx@!3UmI>N0N61qj!A$G8k6RsjVou2R) zjdY5`#p#Sk(1a6FbvvE#R1w{i;j%M|8RwPffsaTr8>2c;7qc?zJY} z$hKYE%9wNSbMJi*t)+#lAqGrRAPa5r34#DfgZeIkXiMvR&)I9um;f*A|G&T7+L!m( z-aXri6DPZzv#a;Xu4nh&d*A!?-h1!8x7~X=efQpb@4fmYXZO9&&i3xToOpYe?32tI zFl%|{q}XeF{qMa`-)2R!OoF11G%_-=M!LTs6U!thw~fKbw`2`i23CkQ$Xb%o_%@UL z2h0fx@}S%jWFr%6fjJ?Ofd{;0Vkwg4TY~%nBam7Puw)WsqY$I{7b_$h zuw-HsqexbeGFbXl+8fhtDE5tI% zZMkI>$x?{3zyyU@Ln3YI->lpg5-l)6Cea|@65V2>e~Z3gj83x#eC1n$Y!ng&j7*F& zV9AmdWTV^?G_n*0Wx(u^AZy5$wse|hvQZY86XM&f0R)mZ?)^I-LEE-*cK(##m;hh! z|F65`NY3~7`#;Y)CpoLKm{elsZZWeO)M`|Znz_}3%{^)yXzo^Xx0s=&9yO`O%#1B8 z6?0Z)C1)OZp8xN6aZYBoI=V_Td3M(yv&zhL9NiNh8J4$h#!)g?$XI3e-~3+~&@o$A zj=$MmW)H{?T~s@?V75!n zF;B{<$Lut_7ZlrRwinFZ23?%xO@l7d>@4Fvw~u*B=D*oxt_AJJWH`z^%d4K~neddk zb2-!OCykxvQ)bsh;NLvUJE*XAQJ0F2s0)?3(|ls?vdli0KeMZh;||NvXW7g>7~5@D zPXuL}%j^@&mBt=j=;D~IGBZ?o2Wrf&i!!^Hw=%(+~T*>(16NOokJEtwex%~Hh3 z5oPEIEpH#QkNH^`sN*X~nnwfvPm1KW+pns6W_I`ZB%N5Y(2kjzW5;&D>^RKK%nWbd zFf%hbvGX@GGsUsc7E7noG3?#KbXS$P+qPBPjcu#e+Ik;j&b728OJ)d$jVU`B zwrxw2M0Cu3&V9v)en+ew)=O(a4SmcY!;K6pAv65f_ny7xP~VayS&}5#77>ejM0MS( zyZ`@>`cLln&Tv!Vn1BLU@c)AU7yQ5A{{{aq_m@W zZUjK26by!7A{Zt@bwrIRmnw-!B#E-pE@ouQ|u zTlh-8gs4^P4d1|5aVnmTU(T~~=1_yP8GzdZ=mn_ih+EnPoFAW&@5#@A)W>52 zM5C`~+0Wz|FiB<38YfbFyoo;>;X*A4yfE+_z#@QBky0;a7T$@U#M7hRH}T1+;0xgi z@mXE2@w+iENNI}ISDdH`W{TlaF%wIX3TlRxrea`bshMF?Q)wPxVgjX^fTZRXFbofv zVOZ^m0B}nHx&W64t_B09BC@20RJs5&=`NxFtS%@E7{E>n1_Wb`os`LTxqzy$wF?GWK1uQ_+N2{Gwv(M+f zcw0Ul^N?xg@#ZnZn+G%T-bpac^y*E)i&ZyEn2Ry-f_XJ;92w3dKIsi_wZV1E;6@Np z*#51GfR*lkLEv;iZ$L#*XIG52B!#mTVQi&14?KqjO}SQKD;jt>3R}=@lcAU^;0gu} zSfs?y@X%?E0Akn@0)`O=!5FZpMgtGQhylB&rGNk)+~vS^lcXlGopmfySb8hRcKZ7< zB#J+)e!{ma@WDJ0KJj>b5WkiQDJXnFUKT{W7Uq7AKGr^riCFLX@#@0R#2`aHlK4k5 z%B#74=lfCobapu{A(EQCSM`W^Y8LY$7cXLBUT{h>H>Sj$gk+*LPe9BdPgK+#i69Q~ zwPZj7u5y*@Zh8Y(m2LVv{G@Q4!NighQUsCXO0lVzGbwixegXr5yYC*wpUAZa-C(R*-Co{Y+dpUkN-ytwh~(|9g; zhJ3>ne-0ncuO}Y;+t`VohrT5bTwzmN|k z5|3&ejQET7n|R3c@|$@j)MAeDN9D5R`>@mIzzgx#JS9;u$@y4*I14zdCh;kU6}~&y zc}32DyjWzlpr)Vz*TuabuxT=)zNGsCijgJE%!Uqb8}liJO>b?)luVhEa$B>uij-{C zSS^fJ3m_u|Lj@gLG#Y4gYp|4QFzXU)OC)z^77I)OO}qAB9aa~3n6qH5vj7_M)Lk>bpsU= z1K*ra;eFYA*;Qhr7KMoGLIJn?T=RhBXV~#$-WMo~Fk&5;7xSI@nW)BNRc2Lg3TKCN zwNc|JDN!wej8HhAw*XS$Ar%%Jpb8rD!5E9h0U9vh5^WKSwP@t3!CIUJ4anM9EKJaV zRBK}`7F>z)8Z1n#)y&~(Hh`-fMGf*TW8DZ1ayfPkb&keF zXh;83!yjVkW1o9du6vmaw&JVxyQnuA9a{ckW?=ZQ#3F*n)Yf(%~s z?WtUQX#>GYPX?WWChlojTccW%G6h%Rt={S$+i!o5kGUfvD(Mm^pf@F~gvGIRTqA5s zPw8Q@SP{TG4B+V|fPg9Z4lPuU3T72EtYxG|ix`7tLa9cjYp?({7BCL5fTpmZ*iov5 zq6Rd8%C0S7C!hg&hz2ZJ4YZyuHbCXH0KP<#Rf-@|U_#t=5wVHy>WPAQary%iLsAg2 z`~MPXLRQD=JK;o$V(M~I3hwdCkRVD-RqpV7k|DArJPQ2(rP6}@f$p#Iug0lmsE|UawCx>5uH#7b{0(Oi{+2gNJc%(rG=K! zq5%QW0!T&CfH7bc+J-y;77RGh1}suQuPla<6&?4@S9mKvIN;{)mrxJ{V9E7q_|FD>EPkOhihWqHLM~ZzRZ3 zwGt3K9C%DU_P;e1W!Gvp1F^Lu$-rUfoi%?hDrke$tB5J*)wtI{0_zR`4Ny1nlH6R6HaRYaU{%Nz%9qLACM_Fx*Nm(B=h_fDu4FYC(gv1+5zZ z8v_$nB9%0AW;&kcg#L-%nWZ3M(aGs|&TEZHVbv5PS(HV@$MI5Rq4Bq*6$AQ7$$6Jq zDM19_!W^$a{@AJlX77U4C5j{AA%)GTBA?LW8yO>&6-^&J7lq{3WnxB0A3**@l$Eu5 zZ$|QybJIVJQA8vfEqnWBPP*{-EQn7_OfZiG7eV*EZ?{tQXZV<}0~JQe%sG%UNT;M# za5n=i|bX+CzR4rw?$d+tSZmLAT|j?)*(aCUZ<4l!PhFK>i1Du^*35I9B!P)k|_L7A4I@wkI5#=oOk=z*IX;|eUKNHCdDUHw?ExzaMKb1a@du`s1 z-$SEG32v{;KoJ21ab?d)i<7}9?)mt^3J6h7)jqGwPIsh){=}WpL;>-QeO5oT1c^eg zj=h23M;(8XJB|w4a;6CGC0KnHtKz73<+1l^2?B=UgS;uKBax5!B>~cnKE8!Dc|F0a zki*){-F4{2w}!#Q-FEM}MJC7em}&GaYK2kFc2`OQ%=89$MZ$t1BG-$*_h zQ?r4unCB*!&PnMjSJQG;4e1BRY53y5c0=OE2WL;?7ti7sH-@vf-~QLW{WLGfU%WJY z@$5{=IT7zf1&KmLh%Hf&C@sV&P7tA$=!D}lkQVtO$GE5dXPycI(h8#E@f~5Hp+ura z5lzW}1N{>Z-Q>nYN{(4EE2ne452X^T_Jm~@-e{Nd-vXl=?Lg0_p{V3l==4e8iAWB| zS61zMIvNZp<`Iv7CgoHu(V}Hx=a=VNP255AleIs^g&Q{ABTEwy`u_&0=yJ+^fEb z7GIIAXK#%5lIO?Vj)(c{+w))VU+eQHa(5r2QtXMD)3l^cq{$^xr2;JtEkKipCaMMC zd^6}dob&jbrkXbSr8DWtv6`Dq zpFX{Jc6N3afADRCF=m0Akr|WWgw>bhTP4Pyy#1fr)pz&J08KLl|>H zA?8!V#%W$HRw{0{qg&>Fo$mpzn=C`51Wf@vfj4QC1NnUHnbyMMO6F(6^PY`!-uB$& z+i&#l_wi1N@fn-b1sPo|7CvVcW=uv{xWyDlw^%F^?79RXMZ1CBlSOB6DO`8XIywgq zn-E2;!&#>Wj_(~G-+QX2s4ZOfc!4FfG+C4Ea{+q`X{!4}RfEF{g{J>lR{- z-=qrVo&?A*x%55YdsTd&hv+|-mA$N8Jl)fK+3qJ7i*AwM9vnPn z^R9n-kS$NIXSMf_v%B{WdUf$npEghX?A_1#|M30)uzJY9_@&e9nH@H>jwiW0WVGuP zF8reF7Jic^j6#oNoj%T+NOm_ga)bzAX;8d2FM7Ju#O3@>Su`#_rT#I5C3yYI&1>_ z4}zRdNAFTZ<5_ub7J5Dq5F?Hn{R*bv<07{``pOU8E^CH+-14$KvFx8fe z{~)xckfjSW{5)Ds|In>FZ_ciVl#INmCFuInZFAah{mz{8xFWU=EB|P2yU*#E7S^2k ziE+x;Qx0{CH<|Yw)4n9fJ-XdOC0{`P>ySIWx-^uKf>6*V;cj}5kGUf}&sA;(`pC&L zV^cnky~}PW-1xequGP4tdAP&KMVyn`(8*q?i^{oMDy z(t7SIKlfVux%Zqi-_u@ekH^oodeD{El?Ofbc=TN3H9dOXQ$gRq`?<&Z-Trf5$}XSl z-+kP_`}nWk_wO=&+%Kl@`h1(wgKX=c&Zkd5_<)0ttIN|*?q%O=^RMPV2szs=!X}+E zU33dN?|qlCzEil^q%0N*cC!>c1CUS*G9d$HV&>veOF2rwDAH)|X};i)i+k<6-b$gr zVUtPNwmEQzO{Z%?G8PaBzPOVgeg=Ge3Uayim6{ciw|7YvDHc}RZSftBix7&c(b8cr z&64xJ7AMDMS>=A8$}FFrCIa=|<~1lA#;fQYw}ig*y~JgY?n$EXGDmD{Mj{vPe*YCu z3!Qp4T=Q2!C^&lALq4AH-3A8sy`p9Xjbn@4w>1d3+|7+15g555MG2P47F+`fMYM4( zr zF0te$T<$*zbV8PtzH-QHfg{`G2O{)8gbU5ZjR%P_cr`Hr5H-_N0N^R=i;0ni1wZ6h z^LshO2fDsVJ_dioQI5nCEB87QX|!9%b)MLzrLbJzA%ZPrTO+)%0*Q~O5>^@gM4L|4&2kd#I3sJcX&ac%5h8xo{W{%oKS%-RV z3YSX?5sxVpwU%K0Xo~)Zd``Y(qmlkNAQ2Rrx>3!O7(wmU@CfF8i&sS)5qiXdybDAs z7k!1U+~K(gXj}n>R!E)wkTGUVju`6f<{>rvWAGS`~}x#_%5iJnXfO{s5)c3 z3ScH8mh~cBrgGfov3;)9cdahumQd1;N$wOu!fo2|(S|AHqp;S@>z2`3E-p*X#YM^sBbZ=3dxECwU zJ%}0wCB2JwAL9XiHLd!(2MR3C z2cc%Y7&>498%fTBmb_qkz)VsC^oONP;zs)X?#z1W3)N>ey0#)YH9#A>}^Yj)NV3SqA@=eaO6p)T;F*qt% z!vJ{P5}2z?Hw%m5aI;0rfca(x3>Z&oQ-JHbfFjBPao{O$n6t%MdL^HS-6D3VIF7~~fIq9>RdGU`8CQfuK%Rk4! zR{8rM^tXrH$Ke!O`4P*Es0;wX#8s0~RqpYAR3!k)Tw)Lvp%Yd@OxcRt^rz`7?|kJR zqZ#zZ8Vr+l+zEv*sY5pM^c<|T_s_EQ(KI|cDhaAjLVRB6P2RFGf&L!%l0+bf9vt+& z%(nQdr!2xs5 zV`L8Jq6I4F1#rHBIa^KGYwjyK(NBI}qrU#X@%qkxG+)2!pe8lZbthi>H=i&10c-X#RQKaxN$N0)1x3Vr<%0Z?U2z0(K?oCptOYuK1PT!;w$hFi5?I+M=j@+`sO@>2Or-&XMT=I%e}gIobx#`J80HgD1xMxo>=%WMKgdWhl1*sz{kp^URay^?_vJ{Bz$U z|C&sGYgR7)SuC*uFZr!^dX8M?6faS}nIoUgdHVWD?|f~xBasO({E%t0F$XP${)a6) zQ%nHElt6POMD?Qy0n$JtVZ#z8fQo9YVd70qOijF)mgbpPuU-MRk0c$pI*ACN-~tgB zmALl}R;rFvw*yuxLviWkY`D&Zs_WEyGMPtpedv|rzxTgfeDiBFEk5)fdcj{jH){Vd z@A{v&=>4cy>n5iUruCp7bF-cKV3LffauVE6PA+|pJ~ODqs*Pd!Cs&i<(HD}*FhqrqhGCHO6&M^gYr|n{oB4SjC0ZO}-O{o20z+c)0T-9B zQY6YOPW$yV?yH1(7M5^z$uprlcD$u5F>-Or;Fi&?YQ#k%K#Y(z-+&m2SZE+a9N}UR zP)Q9k&}xyLc`A$*tm&MqMR6Kw#FV4B;_W!)d*3b?eUQ`fkVW<$d3xa2y#5$pcx~oW z5$M4f$`FvrLkBC95~BSPVVZ(s2pG*4p)*l~76I0#kP}oRC=w#5l;PC`Fa$4RQkv)0 z!~`RWm;#WDBoz=rQbmA!f39mq$}ls4YFjC6k;$ZyAtxp3tRRvEZzKQNeu{7Z;L?Bl zkLNjPE!^Ei$4$ULC~WpFi}rynMt93Lky zeo%#U*!ZN4@ry6BF{&7kUq0)fd-av(_$0>9CdU}#E648b<2ZaH@@vx57*Aqihct;p zIu1jQ8y`M6d%Zd7(v!n>cr*+r-x!ABEJ;XJ31SXF#8e6OG3@}c@F$KMrAdp?B$lKe zlL-{Wa9P-V;llsM8Nd3DzwgDIp3D5P&kNH@)G>leP<<2u#X?ymBm z|7NcB-!7VX)l7GR;vaf7e`er`KRoBMNlIoSS_C>5Ga4NGl-wvM%jYz)Nd+}PVj5Zw zNYW;k16piD+kuMNX4T9)Ff|?Rfv*58SxYgg8F&uN3X;nT^A+E! z=OAg6VUiCLaZ3b4=y-owcdoiBqLN94?LXH7z;GjSkB^CnaurIp$#bwozy2svF7=2{ z`fXhGdd|J>>oonX;g25`%B{X43v8yV&1zoqm)z0Fq>ulE7sZ~&%`+Tj$k($GW-%)Ktdn00N20gMK*5oqh@N6bH%O!aBZL{1NU{^faB9y-kb zl9+bE=*N3GXFkSPzP|r6P}mK#GJf+Wfg3(zy@W}o#BOW!->*2}Dg{>nxZMp_LYZm_ zd`S>QSGg$b1NjE&2hwO<;EsOU&)hEfBxhZHD401Eja?q`&79+)@O5S|Cij#mGjn*z z)TxG5Ofu4&97QHC{k08!4;5;u=AMZ??E%`y&i`ip?fmYy&;8@9td3Skd*1uJ%^v8- zwWnWre>30a`0ak~{ayxlPd)d3GtTqxJUjG0dpy7Ee>M|;`s_6StLpLe@0ee2{P%1o!PUcTLStW;^-zSRelKoX4P`2A;z-`3g+_ zFY$y4w&ck$i3(0Y zL653MK!#~-!2)Ecb@V76p|bt`L47<0e1^B$QI)Cpup`{0LNcPtN`h#UD?^vP_VyI7 zo%e3H8eS0e<)|O@!_)2)R5oS zB#APV=7rMau3O>;>M;yh0b>E8WiZxzE-VqfbTy#D62~(3o=AkpT^#{uxqzYnY9u&{ z)=B(oUZZ1)ImIna$9;fP@2vT%=}%{8_fE}Y&vo2%8tK6!(lz6ehQUBOokrS3{M!!o z@`Djaj?IIej|%$rXe>$yeIKTh5GA-_`Teo$AKKIVhS)7K7tuxYO;`V)DlI) zj`#29mMMjMF9+PItjqvr0PX}104n05I#NEw}Z-5^-&td@?HrNb3K35)(4%qt)YEuLN8l+yIuyxGx*PSh$G=a7%A2 zFlIS`yE$Is@Jf~duFgt#!P42RtpUd}qH6~M1Y93*U@1l+Ujc84ou3GxK`3qKkel_u zkkt0u+1-I*z-_Jr5CiHR8%kRk4LC6`Gw42f^THoz&pDpuKXb{4$?2BysYh$f+!wsE zF*-dsQ0^>2CTJ1}U`M}eCo3&+A#wQEt5kn}s(NS=y}&!=I=I7CGXhB{*F`d!PA36) zDioSZiRbu*&hUZTmYA)((5>FvEWh_qZ)x)!rJ%qBxhUwtq%o?|1YLW;SGNXCTK`6M zUtPi%YG2dM&ae**?*dG(jo)V|v_MatWNNmoow2!$EfH#5&495LrIVV@Q9Hs#tToh* z(m<){B0W8v0e!#$1PA2RKzIo4FmDMk2JX`Z2OdBmpl#a%5n;o!F|i&X#cip?e9!vS zKis-)n{BEHmgkP`{is*LNs(1l@~l6q(|yWqC0ln_k4ben^l+5d zt#cmMiiGxr<_5{aF3};;9c?WTHH~#9s^&(X^U{M^4=M7P96H<>7{F6r775^F43$Rw zNmrZTYS&>DVw6U`^j_@29)M5-u6CwSb76t@?xaC_Z4pXOCjww&0GO?fz(Am-hiS3_ zM9XOf4Pe0NSZI294cuJ1U(=?t{1pUM+&gdiO81}7@sk^_*SY-_tgDmO!@c|J5nqXm zJ<9GQ%#%f6R+4}VCXgaQ&LKl2L%$8lSf$G9e@_5j1ptH3D=8eQXb??_mYZ+oo61Yw7*4YzGd$+9A|FjbdpJ zXks~T8=}nX44QDS8k52wlC-4JXZS^5=!SX8Ki}rkU2cE&p2$D<<9_vUh;x}Uu;xoA zNT5FM%0SctP_HqCJZ<#9&wz?*cP)j0yECwoD}Xz+1n-LqfSY~E%Ce?NN>gA4*}Sx& z<-OVK((v};KOATH)|_kEO4Ge_z3t_+e)KE*jH{y;f-=Ud5^zZv;wGduK#=r`#N3l9 z*OIQ<)@rG|Ducihr%XYhvY61B_hA4V0cx5jx@JsuUFvq255Y%>;M*I@CWH@jRyQgN zVCqgL^XAkxu{xOd2#F0{-(T1Wl2tQr0LI}0OWQLD%jPTOpb7PDYku7K7?lh7{`7=& zx*=@~!a6KUiF^=EX2Fe2RiupvNl9tDuVwQ-f<;x>~=3;-R zMq#cz865B;KE&oI9jGz_P_BzYLMN}vkLLr*+LBum;aP|ZM`d}MtTBz(trQaRw|=_~ zxkzq1o$d$uQa>DfTzmMD7R0{ySGA*F+H)4QV4%l^X&sJ+8^hA=#ZlFWFVe0-J?5I) z>xQMPhbmAT$c+FY0^CAB+*DI*Z^GCwaZ6ooR_K7KzG4D~y3LmrY9X#n@R-~x7cv8r z8U77FyK#sKJ!k^`O+*;;fa1hMrC1`syYOI5zBxGU-<{s-@AZBEi~9ay&QtY!?`|%A zw&JF-t{o(pWJ+zINaq+&eEl@(r%fHDr%7jn2<(jC~=Y0WTb82 zK51lpqWY!0YdAGELXAI-H+#UJn%F82IQbEWeAW?@s7 zYd`h=<|g&tPxn?IjoH!TP59yOtB2lu|JFy`&6sT#dwuUeeQWReX3u{_{z$g{=*=(s zNd0J|de#0J;BJdd)Z^stgjVah*181{5Co)%fI0wJKwN7gk{yRrI1vTmHR2D4lRlbt zXkO;o*z(Dg^&qxyV9`K5f!H?-Dh5KU2Q zG_3{=>P3r)Xc5q$4(by|iU=Z#QSMEZmrY1&erF4*ru6y-Nc!RqrXj=U^f|Lek@Ylj zH)by^sVq61Fi0c`AHH`xGW{@hps5?wpyG$q7@;BFXa?a%YHwYw+5gQr6cUsHdTuh1 zuB;Fcc9CXBr7?W<-cemGah3f{7kE5b%V1CRg=2uK2r}R-Pn&a2N@)@);GhnF5XVNYz+2@(s1zp|wnc9SLrSSr+`ixE$i+Fw!R8(Yx`IPY$ z8RRyYD_QX0{BI~Xb29e%^waWv+c7W^CNcrI%VYwAz+{3}7tw&Af@nZ3nmJlC*U+{C zT5A$$O0X=%<4q~*#2%OAQjHPRMw9DO1EbS21UlVnV0q!M-($S641u;kzAwY;h=_>> zpbZ42VnF|YCSF|?xxvjM%FPAZxfRmUapm}Ut|q3tJ99}|2raulVeQ#@x%8@K{*QL! z!92jG)jxZ$u;Z}AywPdhxyN!)uMzOL79(yrT~aj+9k=efQW;3huKO_)<3OGvgwSZU zpwXHo%+Y#2*P=Cg4s$eiu9<6=NGl?uTWQ;;(n>3BODnW(+e#~KD=q(L)z9Bq{ax+* zd*U~~a0xHTa0xDjOJ*b9{~mZJF@LX1V-geG7`~8dDvv%951&iBG`y3JF~yB>@`B-fNlCJUW6W5h)gr8RU7kN~jr6hH#t7(#&N5f|pD%nWW%oh@}*dT6U0UQ7ZOOPQx zrh)kbiYN(4%theG+M*7#)uo2qCArA)hbvurWwsGK{i>E41I4J6^LoZa2( z-*1gPOgwJZ4cK0}?(4T=au`ZDxWgqFf&fTDgK%@R;3hfNmPF?3vOO19JY~K7#~OKtBbze;{05C-RAFZx60MbF}%ukC~!=(!C(@vVt_s?!(fmqx#;yLO=5)W zWF}qb(bMjKmU7Z1jgLNYI6OBb=J3(evxIEy*{Ij6M_*X@L;KD`y2B5DW%qEg@b7dT zhv0+50ywbHb#y>yEY8v`YzH0SEOcGyxUI;HDYGfFsk`myu+CXJIt~x%oTWR&S!YA= z&brPzI=YU_mtYL=Vn4h~?|0k%o7ov{mA%^NxO&ZrYteWeImgK^+IULo2$NtCb7sa0 zAi+=+9qr$*G!ML*KqsJVt?jGZ05bs1xMv1Y2cLFDglns_hGtfhKU}Yl(KVXdXi-tO zi|#G{OyB>8cc0_+zi{BS$NbDDEYKK&qX>kAkWg@-45X5vmjIv~C|HM8gxM5l-E2B@ zmV~j^(Sg0~EFCyHI=YTxrx;3eq11I0O;@`LOy{7+SY|+Nr%+>ZJahrTv7G@N0lrJ#o8eW$`gm4U!r+GT>EAzYZMk`Ei7tM-Et)nnl40IKKka$Pec_%D zN^&q*2Ipc_89fA^EsdN2Y)D2&h$K+~s;|%v-gO6BZ0lc9m>N4Yo909Gc#1X%`c4J;iKnMeRa6C&V13!^mx>ymP!LR!LG5l*Ma>G5uU zNZGUhSSMBR`T3mZw|Cg(xiOyfzJ8ayv>DCen4t?6*J-5Py#&V<+S;&EFrpg?u8q3&Xvt1d4g-2$+szQ7O`Uthk;1ViE z*vtmt0XBgu1ZWANxbgsX3PfOh>Xv{um-3l)HVh6 zlBMe5WxA+;UMJOfez4WX$yiD4YHzKM=gLUOz;=Bu%vWC z`Paa?063U{XXz+T03K4DFw!$n?1usqwy3Rg0WSwW*Pi}d&(03Ln2l=b3AHWvlb*jE z0}Mb)0KyHKJ6LG~&50@<#>D8_E>ivdc{LyG9RZ*JJv=w*Mh!J8ttf+RM#+#nn4-#H z$lf*gXa()V98=5z|8Qtmxb>dF7*h&)vm%q!=7H0*=cxCEMLgr!63i9n_e_vW0--J>lni za?ZUnjFe07($SXIlbz0E01x^BjZ?W9);Fn}90UGGD~=W%cee$CC@2$@K=oC9e|f3e z2Y{Uf0}!nYSM0*nRrivVg2yXLiov~Wqtn2?cI#RC+KnQp`o`>ay)N_gwPr&!LQ=#t z3qt>tPyjqY0UnC0Vx9sBE|hJjtHF(`9W~qPs2#<+;GlLj4(gKacA?CT2wQVz&BQKY zo$qFA1vvm-4sqPa`CvEW8zudssZn={f)qK)aKP9OnGuNWj^zT+x0%f`k|& z-fGpqk(6%KXme1+v;YOb1{}w6`q#rnAEij-6g1>5fJTJjkCw z4IJUl_h+tqh$*}&Q#U|amevGeg?6A(y)2apXtD~D%g&2<*4uT##@x#)_)A>nS-i7T z{HW`_C!jV5(;oII1v;@{VmXNJl>-K#cBe3b^rb zqoj6MJHJ}pxKkGb9pnk%{SRjzt(l>itivOqsuPPVm+W1f|iu(r!()~=dI=rrW}n{8Zmc3cn1$rN^A*W%&<$^dv(k!tn1W`U{&3UdV_ZP)_&^5jWSzxRp`j8=_QUCD)=<&et1a-gu_L z460n~&3DLd1q}U^65+)LtkCMZ3~wgj%>Vii1;3a7_KYb@YQ_U3vfvA z^~mK7+jvDz6^TOtyOE=B7`&MTZ0U%XD$q?V?M52Ym@63E*`cY1qAY11+O2S>HNNtp zb;ZNmWshl>KZ=(XTbTCgmCN_JOvL>fTuhKcnThrrvl&z zF5rMraKQx^a0DFGxI*n}R~KCE>Vm6XNe#H#)vk8bfTMOm?Wl3J3ohV-gWwNvM!#!> zt!e?ZfEGq-DS#G03xF23IFSi55w>+A6eltfMot8n2%{%bDAmGftZQ;m7^uqtPxFA{t4Rk|MspgO z@tnacfNN^@NoB?{X2QNE$q*AmrgWJB9-p2C5G0MxIa+wOCu)x?lB%;isV#vkO|&a= z5EU`*LIg@!|1twiQNZ}|dakubM!a~|m!HpgeZ6zJ0a}0R9*jTS@4tRefBVz#@ozqV zcbpCQr8odlgRHMzYc!w%-Wa+QSgw`Wffh?aK$y}k)lsC{OKFfaP=R+j z$L^zmrpbDB#B~$4Tfd0{n&!WhRuEonTd03ve0}HDb3F^k<9B~Pehpc@Y>GZ(x9h4IFrJ zJoy^5!^j%Y(;i_H>$orE>0aOgPj`4pXxmx^?wVm?jWrg71!E0ZV++O_u*QPHf+<*x zHP%>TF%~e`5(CzNHP(PN)>y!JSLU4uoOgH!c);PE2ZJ?Oj0Fr93|NfC6r4WUaBMiR z9UE|-_6Rb>u#f8$VCe}CsVKsWZKlD{gSyFV zY)%;6v236zVdLHD^L!VQ_lNpA>McRB%6)AWLV}Qmr@2+=N_ZN3=cws-oKny)u+`Fxlv@W^}tA0s+d3;MjB*0WmO>*f?Wxk7vBSg zCl@dea;q3)H?6&ST>rnGsqv}cthL}8=Pu#A$##97Osvgow|6i3ImDu}RJO|)KzE4X zjQ8u)HOGlyQ*3=>a3x{aZB9;X+nU(6GqE+XZF5fSWP*upb7I@JZBI1u&3oT_|9tgT zSJ#8R_n)rXUHz=yYwh9%qjUp}k$$=P*2;JiR~S`Y8CaZ5`qq&_$i(wd6Prpp+n#p4)TE@wy8pvDWynGTx7!&7aA`xxNVG?n$rF zQfi)PV?$+-sedcY8M~djNBBd3b=h@883K&JW;ktiRdJ-8`jcZ=*aMqDpmgf9DP%i&A99>oecji7o;f@yQSRw~IhnXlj17j){C*2s;$&vGL6q$VaX`sD&TmmpNcmbBmAu$=tE^?;hMxz#l!Ph1_r{;7cg5 zS(nwm`OY=WJKlpY3~^Wdk}(mrNR4yt{Lc_&#F1Vy3ts4?Ik!lPsye*TM-#9+ z?Cf^8i4QuJbqfb7vq4=HzkB}?<+O_?Wb@q!jpeb|Y9&h(*fdcYYd2|wbG^sNW20O~ z{I8!ZTTN_rMPy6J$qs2F#fk?5-Bi9NMsrpVr(3H;lb2 zJyI+haE_-`8o`|pa2Pe?57ox}j;IDlFd@K-?!EF}f05+t`;(*H_olXYbQoOCvDM?!-~2%I$KCC42Da$Axk0+pq}eV&3Z`xG zQVjK5s%)&|KMu+Gvk{853pSc->RMrwDruWU*d;}l^#+0+Kn-7ciOu37OcWIKq0e3{ z)%5Q9>Hes2uK^R)yPl%cxXvoMOKnycL+JwA3S@CEhOO}U>gd(2>z*1L zr0XjoH=_0KkeF*B@G^mwI$j-8)D9k_m<*MGGyVmAvex@Jkr{IL-JGCXPL0NGAmQqu zL%swfZ%LD854ZD%pM@mIkWW^tR(87&V+vqj?<#yR(#OcBlV&e=U0mgeBzyfctE)_< zz8SjTp_t;Tnhir;#)(Xhu^2Rr>wcHPXe?CNX99G>Cgy15`3?gCvBAjK&cQ6MIf;GI z$7Ob2Zy7bJ9-vvN)I}y3^xK-$(PMBI8Nj z!I55&ND3FxPelg_2*Cv5z4_z@D$?Xhldd_cURC({CSB6avi6Cdla7qQsi;zMNkWkW z8)ys-4RJyNHmQIpL&Lz|(|5)l)ZK?KRkbjoB@28OAWgX*h?-EaqBL+SPx4eLNap8x zL#+c=qL!1@o{>d?a1mFcw(q}xWgdayT_@*mLDn#hiv>L$U#4P2@E_SJusQ6+Ob!Ub zb|sJIU4;vY2AeTD7kc(0TnDj2GK4GkGxuk>%;G_rGY-J=5Ultpp7MLnT* zXtCwk%Iv&y9Iu0o5x0{C-t3{2AK~8(^3(9*koo@8kAj@1PG0dN!o;*#z@0QwQ8Bz8 zGkho`r3xJ+b)r&5?}$THbZa>4t4Krsi6NMA|&q3ZEy~-J5r3 zWxpDnTIT;lz$?#(A_4~WN3L=Ul92=t<;_82JggluXuYUfO0NlJKlPLqkx_jwyqoIu z;vmibAP1}RI5OcW&Ew4O!~e}5NaHF=#|&8;y0VE$9$gZsoXA&*5- zs^Xg7p4ob$CorKhROrUdwaszXp~|9*bTiyc^kwsLl$vhp#hX*S%nP-KDumAedEHy% z1&`q;vIKk=kkTXeQO}X_g;amMM^N|uM5#=xjD}}lHUY+5MkY(Ydho5NkbJrgfPc?QwkJ%LzzfSV!5lR zVW->&5kxwj+UJ9MwQE^#RLZ2X_JJDlR78nwW!W{SB+lL%P0zIWv`s)OqplG;rBB#w2(FR{RgDOR{}7%XP8usA!SJ&JCEsj3A2vWuT>pAjBerWBD; z-yY$dc^oGwFdAOxRlXjgkWaM>dMnr>cPuGc2Lsx{9;YQ$t#4ZO`->fA_@ay(YJRL&_Dk+{TKG7}>tl zP7k5Ym-pV*%!uMiYj%+`Os4PHnaBB=V`75?R}&?SE>Sj&eqx|Z?N1q&0x^jua-97i{v7k8@u*S3euFE#K8#y}t1Sl6OBe5{&#$-q3lDV~!Ga35Nz z5nU`h`-d=8W%5V7Sp2|*#y{?WH%fm5d^ZOaDOo*jlYPszj$jlWVQ5dyo|C zT^8*~(N8lBUk=hR1*yp@tnWo?Rv?R?tlW}Pfof+)Ox(a;_;u_1+}>Z=!LjFao?vF3 zk1glyK~@dHzd$TO+J4$UAH$wE2x5teaO%(Z7#v}-k7wNDa>D1rXlUz9>UFXM0YG}r z@xzN-NZ(J+5ShvYA{RbMukB(=cKx*Ye{C2|3^>}eyIt5X>A-EPpwl06GK;C@f#;bw z#Y;96u)2J{vyo4bqMduK9;rb?2w?DDd1sml+X4wq4f=++`RS}hPqP?v96@3vOkOdt zV%O_!OFJtX#861V8N`RUt-SUll47!<0_Kya=ZIT>&?qxmCYV!*rS^&1@Z_&rgtS@wpCo!3BVM)a2gN|Z6^E|w9#fcUewFfg*@V?p&<%azl-5)rW@OJCEukM6* zKJS=A_vbxA5lU_g6cFxh=9^tr{;24pcgg=UmETQL=&u1EO_NlIk^ML3fs}Ai!25|J zbCZ)z8k{^_0+d$n;=M8cv-HbHg6_mQOEvM)1hIseCO^~t-tor^p+Ga*$@zEuE817# zVx!|{E9#&AU?CTTCOEM@U}cyQqnF#oqb9UYtJN%18n671SydLW{Q(36nCdZ?huPvm z#2p4Yi9_uBhyMfZV&0ad}T3%DpaIArk0)Sfm!t7V_oAS1@6I3C`VJ)$ z%h_)CcKSi5zvkY|*=fKz60eO`sOH;|`ctd}Bd|$knvxw6);LST3B&IkSHjh?+DCsU zf_g&ZxV`13Sm%X;iS0Mq<6)FU7sFi;9oBd(Ms=8UYDxOshwWLmU0Wj-bE+=E&4AQ& z23~94g33#sgQY@>uncXNHxKHcK7XGzyX<~djv3QR!j^FsGt3Ln7l5}$Gta2Y&>(_& zFe2lzk-8o-v4*#EbzJQ8BenG+)bwB5Tqo*a`EhwLT1q^b6S3v>x4LIm@Z10O*r<+( zZ!2i7p={)JL1nwUv=EkzugK)8C=X9}!Qu|TnwUFX?fYl!2s?E1p&b~qi2JY;QjOR#y3Pw1)5aPZbVRCdkg`S4a#Xbgy(F7SdPiSJ(gUgAq){qeN|N%- zP|!O7+*EHd<4pojPR&R%Mdmp?f~^S8u2@fWbK|k#H{2C_(wi>lShpCZtMuF=9(d}0 zk^i~sGdQjoJ5NvG{m)qwfEv=U?Navu3OWX7i3xYA@}R51RF*2Kl0xP=>VxK(xEuqi z`;%wqm$ry~&&Qa_!4YMyipNMEG4 zwSy_nyZ=mM<|3VE^sh3JT#PJ@hDPRg93aZbY>B>XSbE{#yB!-xB(6Vl3)8L@LH3`R z45@^!oPLOsH(g-GcOQ;Ql}g(4!zO6E1=Db>I7-W+5h^^_=gD%_cw;1EvQ~}LC>q^K8to?%8{-Z$eYdpZecKtN zdnaq4N1dm-NK*QEvKyeJS6p5$)A{3}nZXD~kVIdz8fUdiO!>QQ=uYGscxvI&LA}7) zRHiU1J7`W7zx{5kp<&BdnHBq%3N|qv#DmI*R#R9_-P<8aYi^9yqJ`UNE0`MbJUm&- zVm1rMc%bPmdu8PWj_D?8RK}aG_lV2fubKFxaH&J&8As#f$Z0{Y9_2zb?#62>mY?E7-^r{fRRk#NQV&?-98;UN54yb5mU%B7MczrHz({h^oY!;FCETG%TFM&M>T*F&YNQXnn}I@)*;>GCEJg1 zQ%E=w6P6Z8iN*8RVd<$nm9o@~#bGNvVnJ5e_n|N56NSlm>pM$-6xYf;PzScyx-2b1 z^HWQP1>BG%+Wh7ep#`oGF9m9=pi%_W= zLCPL_DEOtyANUV~%yCXX=wTw1TOr%#fHX zk88{huU~ValXgQfEiZxbV6CBA=&g)XQaW|=tZRf$(-Q^;iLTrf~ z-6R{!0|`LnyAA|kl38g1k6l!O9|lys2tb%1>=M(q)~86ZFH-J!0czlKwn%>}N*tXe zgq?>5oMvuCBgBa+16A1O659ynbWof+JULYp_8T4eS_1?K5Nbu;{kw-HrqOZ-dQ_)1sC;9~CO9L|1ReO(#Ea6FPpHJ%pzYnwBP^{?RwK zxaqy!Ta(L54-xoyW3Q-Utq`{I|vcqW5P!&S@)2`jEx;}oUj zC7w?-Leesbqc^aqq@~Y_x9ex5pV^0;2*KzoH98`LM;&Xww@)%RuV*-`5(cISb(~(D zg;jW?b+=HVaK$@Mh5>ak%>57r?uh4VGX1pObl`QjWDuPLH+NHmI@Qh?{UeTe_+aR` zCjiHX*PZZ){`4u+|_F^39QSPIt(QEV* z_VWbWGqfd<&H{%*AJ1;xe0K($(!$~O0h;Nzq7G$h%ojplMdmVG$^Z7?Nbrbn$dY&a zB0=FHt+lfrA||`(keN#UVtS5Dkzm+za&jHRtX_2 zs+g^Z>Aw&fd5@ZW>XMq`cUe)Qp`zSsIdMo<5l0A>;Jr#UWe9(OUSA|MLKWd_eW5fJ z7a)a*$s{HjeT-cBBS&Rvr>p(UiY^2fweRn%Ap7k3nISzSo|Iz*)m(abe(AZFfDn`C zVD8Z(P}OJJ&qPy-xmocLgGWd`y%Na}UAzJC{X*E~@VUExFtU zan?O2OV(uV*Mv&Hbzw6RUA0LAw?d$Gp-}^HjGgcAU4%_FiC~J3Y(Ydt0h|Bwl-Bm+ zesVzQ4p?>KD?o&afi3eu{8>bYcHJ4ChyXv?bQm-b{O)+=2viRKxL4YgLz+Wl;Mcw9 zZFk+RsGFZ;Yyl&IkU2!$czc8f>&9`wzw8kWYVSV3bpDnkbc5Erf#LqJ&0o_6dpnTW zjDW}U(EgwnxuS#^gxm=gelgb!X711LsnEOMXDSI+#jYbfkZNF4#XFE7+?^3VG ze8hG#X~{NLaJXe`R@k?1cSeQ+*9~UG5pJOS2`-`!^jKq3^c}wPD+39$AO+~)Ikx|m zWFoY<3u*l(J%?Cdf&BR+8i3OPzio$DOLdR1b&=fw0*I!ZQB>u0w`RJcQ`HIir|)r8 z+}T<1aJcRE+H=9qn`z1b$Le6eD}P=Pc6dD zpJzo#zhgH}uP&U|y`}7^61iYIPPu&saZ82)00PiOHn8$G$;WZ~QA~XKVyrn)Zh^J; zPLhA5s|?Gbdc>AArwDhqZ2Se!%%#nW5&Zq@@!R)_hu26Kt- z(>*L=+U)1?d;Q#l90vKXd;XrtaW)B&5m#rg2p6cMJ@=nFi|eMzHNOIWvkwxuAs8(T ziV>KvR_KqFDyG_LAn%R3>&033jnVbXOV;OzaN*Y;|>P+^?S|Qi9Ik zi0GZh^*ozhnlC2dCk)ekG$5RW8+;fIX}3}%!QRJ~u*%7a@zL`_7q57hJ@Cga-vxzz z273M8u6VLpU$$`02-~f4P1)O> zFme#oBkiMN&8~S+V;L6F36iF;Zy{Tc$p#>X;1>?$$~t(k&~jUX&0)z2bHIduA0+4V z_3ilfw)oFhf=Op@@Cz2msuXR}gIg}Ue$#sIy}{bHqqpzkS-3Jy1uFzltH=qLdB`3D z?gxs3#W=4Ej3vCKmk|F1p;7HpeLaMD=Log0lz_jQO?>`m0)`*@nMc3s=uqg7}sl_UsGMn24W{MddhF9v}RJnm0)Et=^BM3ht0wE0m4B0 zGJ$Q}N%88}lHVGZL~(*`MHl=teYN+>&??MCV3i3nqwC6q%og-Bm9<|uXD|^^vnW80 zh8r55$&q>6z`|!(F2PhVTALjfBdCyLK7l(dqJAii)gg-=-FL1SzHnfnXnV{BY$nbX zoPfZ!vwc|Hkvm`d9}1^melr1EHim!b48{+^Kjum0-L!ot;spcJD~9z-mRz4gqvm(d zXzVSL3|-(*5$-zOYJ|Ul)ZWNK>?RHB6Kdy`_Pi~UGd}Em^YT0x*AcY8yG({Cx{@^K_Jl|0*;2zPRIANw z#a*SC`j-)x>`S6^Z{WWDWD4c@xTvWH!cm~%L)_XbSl#r(jF%K(D}2*7k~4IGwIL=5=@M_ZMVd z)ZdK^OFa+hgmvOskqf7U%90pc>HMoU8L3qHdxoU2-qy4ODCLjPfq`R;R#N$B~j z-SSfK?<%Rm>(K1>oFyzZSkayU`5^EfJQ^m@(H^4?f$5O)Oi<+51mXzs!1&wSs%UfjW_?(_`|xl!!K;gbjT=It zp?GU9t-Pi>rG3W$Gg_1T-5YAm&Ks9)O8cXBn7V^XWAesr;_$~g)07v2DRW&bcQ}J~ zdiAtE_|AlznJ8mT!07%slGW|;w$M`h_8IG~#Bir#&Ig@AiIZ=;4tmkey>d=6b%@lJ zGuK2+LY?t>2$R9XtR_?YaGcv%hCt+W^2+Dbe%G=5nqP&4w7om`Db@VNoZ}cxFDf%M zF7X>JSR0hRpaNX5GUm`E%GPQ?8)o{ zFS;oYX=r zo{G`~IU@sWht7k>hns8ui$h2EEN?VR0iG+aPoNBQ!4GGd0z>ofdAKrYIBhYc5an}@ zNWVZs>|=Pp+vCbhUDey^Q*isVKYTqFDv*5IZ|cu)E?D1sU7kPXZdGP(TuLB{_MFTu z`lM%^MnXARlxer9P_pa)FehFa#8~}S<*iB6dpwraI}YP_;2EGCHbe59CES-t{}gti z`V9W|y>5bCWKmj1G6+TW0?J0^v_(*oM;!%a)Gl3{IkDtjg6O1$djnrz%bPi6r8DPd z7kYu@w4wP>fAX-WC$%u>e2vb0#1OsiK67wWWH7i%-IX|t$$xU=7~ZjP*?^&8I_l!- zZp%|db)d@Aa*@u3Od5tXF;tztXgD9d=(w@b#zjHAH`CzAK<5=nXfKS2zNwoRdD`M* zUItOgt9)}5-l5BXP=3eM$VxI<%`H~ldaAy~@li`wGV7Mf1d2HBExtlwf6;7U_<6K~ z-KjcZveyXpJs#3XfXI*~N8w$G&R*htNyNp9lW&(B2A#%yU7=_ahc!aJ5zEftU~7G9 zn^$0Onb<9Re=o?tucmwA8)td&jMYJLDnGnRjRlvusuLX&Ns=2ul68$E3emnqYKdD8 z2afvPXXT;kc-CTT_3I9d0q>(_&G^+m`On)%cTSp=jWpsv#y?5}qrlL9L7y>zg%2o| z3qX+hnBXw? z`Xj7S^wcXWUV)M%5a(K)DjRh^NV6fFU?UN_<%u}?WmqxgT<7m8^PLg)u>Y}9c{r=6 zJS9~xo{{kH>1}0C$ej%&H`Pot44;;#H?5kNBeTyZRKCahW0&>zf89iI zKkmYwfhJ_4R>8QUc^H1B#&F7xEEGF1X0e{IwoDw!*nX9dP51VSVFsgFeqH$6g6lV2 zVn2sB#u(LXtQkZoaD(UWV-p+GVEG(!$aTff4>Fp2#2xN*U9Wf;>q@Zje)^_C|CAGV z67J%k#1I^9c}#*^jb)UV(lEFNQ$Z=FFcCbs+h9BCF-o?Zbt}hWY;88VRT1*}9Zrsj z;bNM*J7B0t>Ha+>8(tQ|1Jd|u-aK@2V$ZDCqU)B&j%T6{Uvpa>vfnotJ;fx`6=i3n zUAxc{&L$chjCDZSduyJnKoDbL9QqENfviYvOG4}V++Ov#5 zkleq7`O`&dygB9$Q^$p|ZBH?lw`PH<1v5q2Ty{fuU9+;6oA!!_L-j3nu|bBz0E!cm z0G0c#Qt0P8w?KnpC&A^-5g*S><{(+36-zFK&!G!mK8MF{qh}eXZ^l4!DK{vjoq%}P zvLGYr@_PT_zDVfYm+x|5gZWE z1_4F!H|LEkp?e2kr6)4X2c$|N-nHB!v z|4<&N{M+KH)*MWLCRy~`wQ7cZ?Dsw&j@HCa%sQhXPDn3rklGL`-c zvYG{fpk@oigH8gW=;c?6s)yK`*@ak|8{r-bAT zFM2FVGcL{bTNx9!C9_p0iy%>qfF+s#p(H;qq9126GnT=)Lz_#RH7w>}&%OQIf-Ib) zb|OX$5}biP0nR|b6iO2R_m(J+gNPWgU7VTm3%j9?m+_Vfn9dU8-1YdVRe7*GHj1Fh zX;a2-r8_m6Dzl5I;@J*X{A@G1c1BAVws+_`XVs`G!I0tjF>xQ!1={|h8r*?h#F-HE z0}moGxQ`{CK+A|pJ@n6wMf{?%WpHT;qwc0#lRJZp&rC6AhV$;RbD5|`do6ZvWA7YK zt5@j-JF6K5i%cz*u~q1v*#3cNt?IR_Ub`5Ia*`N$C+jLf51bEpRC!m0$&)CCs5!J- zyS;u35915T8+wY}@X>G1<*k&{XeJ(C6`8m3=g0&acM=$F5>r(4BHaFLwIGva(GsDB zK&MP1axRsmTb;Jar1=3Fa)R{$#DBEMPFr4%p3qynS;JnL(-8He6H^rUtg53^6JOWY ztl?{80_wlR6h+3wh1|%0Ordfebhq5d)kxR;+&4q%frDDiAP&%=7Q)gFSd;2Rs0lC@ zv8S#8J&*#|m8*Vi9@4<&Z1xIXQNt9|a=0 z`ZZVkVPH1GRiI3*y>vkzdbwzf0gJ@6th6jx)5J7+bIvqjcXR7i)8gbT*j3X6d8$=m zDpQMuFZ-9#oodz8+`?Rp>dRGz^VgSyxdr%Y+Sls;S<^VoEX==F((YeD=80d!um48M z|Hm~U3pP?+DlssCIOU!pj6}4b2vxD|zj@E9P<~^$v(a||A5eJDd7;7Qn?D~y(OFQrspi=M$8-W&P0mM*S#rTY^ zWFvf0M{8^dN_Vm1f@YHrb{blUQ3TVDDTXBY4u^m&tWt{>)I2+~Yo)~adPet4?fRC9 zD^~=O&mTy5>$<+|T$m1{IL=%r?nm%%KU<5}6?v+!hJwi;0#G?BjmIVl!!lQY4RL(p z{r01OXO}UOQ6o$9fuhQ9SL0oX3Hz08*S=ni;H+c?74u4iYZXosftLHOszR=@gGdRX zQe#ve+yfcLEez3blRDtZY(zSOi&+E_#_EkKVLq_UAq=N%Hia%`DD#I(eG<;4I|3)^ zp5gm~ouBCNFf)%X@qi~w2Wkj0tPl*i=8m?;Mo0s>F*t!+H?1)%QXm0xdG%=g8cH4h zIsI@el_EDLQ>I5-q6%WwSQgr$N+~@8V|KKU2Xy$}J-v>ZKMlTN;>}Jap)-BOmQDcl z>rng}L{KW7)ojlRRKfdV7mnLAe$gQ`fE}K>y8rz6v4^7R!g7Y=(8htPfH-g{m7fYMY#7g+U!_!3*{SpU z-v;X0Vt62}Q@K#FP|y{{Pd&}8VQ{F-6P~wOwZX#R6ku{&*;;d6eYy3yJJPZOc)Za0 z6=d!ZZX^Gx_otlLTw*Njze>SDd19cWvn@JM!N(1UK!>V<0k0RuZw;ggosPB8}L0kwO-%@ulW7QxAIwJN^MJA?ch#;Bt57xPPl>-f_M#Y_Cd{MEo0^zZ-Owp)>kPuA5Y-seD+il_J z8AbDl5Uz;_Yh^~za1_d*$eBIc1=FXRNN;P*nG5kaJZ2x*M= zRd3zTGM3$SUa<>tr(UUu>7&atbs-S4xfpSVro1nQ!Fad~xuS5N)iXwDXDZS3L^RmU z!IWUy_n};3G2_+YUK=IY@Q!7V;m$W53=!ot7ZXL_a292@L7V$@-M|eY@bPzqNJoHU zvA)XLbY4hYE7OF>IIFkqz1bf+;<7}RHvAhw{=TaB2yx8K(5qUYFSyS)^rF`0XSsY{ zre2bgBlXVW-5_k~F4X2v0@|NUTnwD(t7ZLcd5h@L)F1qK{B*6^Ewu(wlb_56(+GzaCTt&kT&~+=ZeHvNUGr0u(57GiBIJO+4X(}xp)%1@I*i)Eoe|uhu-2sdV$DyD z6CYTpL?GvkL96&~>wDelml{s)ziq$8F7M0Z{DWTkvl5>Bcin%vV1sjG`BM5#T@>SJ zrxVTntn-AqMjzpUxQEjH8PAzk@j)Sx@!L0w=|wT@tU)QoUC(_w{)DK`> z_y$=VpSB)3F=}@X3tY-1S_$PF?6N! zkM3ElM{v309;cxS=CLf#c2km|(__;LPhIU#gkm7&u&iF`@XLk1=9nzp(~iRUzudd4 zk@=1I5Fyt@hBa?Df5_ro=Ia`x^DTq7Z0^Y`r)rVK^Vk~MJu*j2MQra4JpJEM`qmSh z4p6P=%*BgRwP@E9SQP@;?Ueb8InhuMU5NM5oq@{%bZo}RT`p@^>C6)FX$WVT0j!;) zh(s?v9eu0V)^mTbYx>q29D8o%+Ze*>xT`OF5qIqG%Vlwi!ubapf|?&{4R-afLNpPf z1^G)$CmPMtb?L(J?Oe8!Z918n`5fMOmh81g9SkQ5oKJ_GpGxewU)J~x%Gc z(*1H~tT)d!wY*}imf{h>wyXc!G$FdEwa07X?sby?9jBGJUr>l2P%-I0(6kjeOo3R# z(_G*+8b67vDzDa_%#X@L2sL#_Y_GHgalfrMX?m!Ed8Y|MA z?r!F-Bq)H$z4x-M&mf}KHIlLKL$uSs4Ug<21R|Sg9cQT;VbBc%5^p^HPVJgkNMhrR z5;kBI*JxF||J>{#PQi?lR$$w#j3%fxUWneJcg^WnxX_=o%_B>UuPU`%aCC*QF)UiN zP4qgQGb$b=L$cjkg%ta)%I&&ED#j&Xy`_#MR3;K=D&0=0Vc=8q{5VOA+^_BFSLC#x z;VYc=xZbv#XLe5@$R3sdI|A@{u+9sUC>%%pBR}8qx%KA9kA4UzqbwWN+_6Th=~;cc z_2xkFl*e|vPgT!18{(Qfqwa-Ssn(%y=oqjp zFyF&nEQZz?q<-i8b6q-OPOqIL#42<;d_>^!p2+p@FB_|p@pnU9uds&>78_-WO5f;g z>mzTjxeLriflH#Ez4gEJLE!mG86sbx!X=Gsw;yuj%VG zm-wUP+zvT-Mr+s;jq?}E>7m8>^36=lM>hJ)e2BLIT-&98g{l}*;0_0eYO%$*ks?4D zVM^m$iQnxepTpKVI_1l2a%)OqX-DGu&&v?r3EK2QFHE`~pp^G}6OwZ6`5)q`Hwk6i zqQ!}xFuF~xK?=lJqp-Bn7YYyDZ@3jB@{;EV2;FGd!H-C& z$$js?&&5qW-TMNF_tPLYYPT|*`$O99)XK%|o1x`AwhLmXA0p88t|k_=xnkSt_Z>Yk zcH82gr>ro@DftBnndWgjOpR&tt`gK7tLO1`rd`gx~cAw-hW z<>7NO1Yg}WcKH{U3Mzm2m9&+va z77CyA$BN$nkwY)lEn6c*tPKOhz(SzLaJBA>QF9XFH0*URN5#?tB0aTmJTwYQ^XQfW z;A2tdKFkhxVGToJdm{$U%r5vLQL9`p)O?dalEN2r}*-;F6GmRqJTp}~C`NmemwoyMk@;Z^y zyWzu~yrnBmHIuwMp2!N(LSKllwt*yM^R(NNkZ=UjsS^ti81hiP-y zo+7l;FAOVbzH{@>w)QYT#cw4`9j>F}Su8u@H|!i9oeP&M7cTsbdh8usVT-eLoO{C8 z5y|-E;EkzGyIzk#`%!>cvBh(?f_#U!e1Aw?{~8yO+*w$)YXBts=M`P-EN~2(04-+K z@F6ry6G=aDkWK%!>8%GRcqX|9MB>yXhVlK^{AU8!j|PS^Z=%=l8dL6Rlqn7xeK7ny z@a%{FT>aZ-IM06Hcs#k z0fICAI9~neh=N0L#@S4&dC{zU$u+T0|PcSfx7-KWmd=!_Lxf(uNwC z5@}u3pSc6D+o|w1)B@9foa-!_<&dGvRt{MOavX_C;J6J4tRwsUD@gu6Ah_z;PPb0l z9dS<7Jp698X*wF^a1k_$)Qo>y3JK?;*KYObr%WV>9g2*|)P{r2U89Nt^pNPm_Eyz!H)Sv&8xbItKdLbP*ZA+{`TQ_w{xpl+7D@~PO0_E z$Tsyb11-d0s9P{LaavlO0|~i~WX)MQ*}D!B5Xr0&U!#LWA0>3sVN^_)1#WnHVMgB2 zB6Ll|UjFtSJ7lGApZ${$wXuqxhv0O9kK;_&u*RNWZT|XV)x;Q$^@1OJzml*WO6m$I zgR$-77q?bim`4WK_4#Cn#8e}Z>tBzA(q+&>pk}V&zXY8l##?s0Z73sf!$pK5^FeVD zlnE3_Qs3$JH*;eBf(ehz01{5asJl&=A#R6OARf|yz<&EaBB$k4EStl(FDU{r98rv* z2q|V_F^sQbKB__@k77g$BZ?j`g;EP^!C&_FFx&4vMWq^_KPGgqU=x+q4i+lYZ^MgC zN@UfHjmfkGQKWnn8)h)Cr7?iUB>GjK?8|G2JCX7TW=~RR#|kJN$~xF(Ti{cW${ofa z7*(b3s!0p7{g{;Eu3bT;J!!0{_#6L%Es?FeZt9q`-MY>X4ABORaQ__LCGc#nwB)&6aV!4#msh_3<9Z4iCIKSkeZjq&>#Nksgu zzjWgf6gsacsbDNz6Z36gAsDBpLRvhU)I5;8oMw7Jt`iT&6P~Ip{5566o3kUK36LHO zH5v@ruT(2&jG**#34@&92lP|o5c&Z9tB=Yu9;gKb;d_iRYH znd{>^{g}{7l0aCS16ch52z0hV6ofBG;FnUmh4o(Gj~lq&h3{O=hTx&$H2#XrkSx6z z2qcgBp`Yf`V>|GWbBt|elBw$ob z7!>cD1J;*TAxspABCG@^GB*tYfPhdE27`c9f>1(9S_q*jC5Z%qz+pJ(EYzV#e1`-7 zN55S~5B66bDCTc_pHDw!!BzqDPD11Vj+r2Q%(W-!v1o_IB7l(u@QIUtY?l zZ@w|#3o<2)BdCv`gtaX)InKmr7{ws`Y{|nw85uu0t_1;#aNjY&`?k??BlAxV)N&ap^swp=R7cHu z3ZiodlT(#7$PFd6*h$(;`MC=QznILSSJIlV`nU_hJTrUW% zqsc1Ap=bkF67o0A4gq)ro(SAJ`S4gMas5I4eC%V8^b#=~fx|{phC?6;6SHVPYK+S( z8e!lyPEhldighE{31xN_CqFxuEpCTN$NvFyK#afhbl5T`XtpR1cUx&R@XkZC4hi$Q zWW!UH8nD;}9k8%$-HnYk+OnjWxA7q?&~^YOTyD=XV0wmKQwAnzFK6g0l-8SYRwL!N&q)06}N%03C9$1P6Er4tg92yQg!m;!(^^n-q;K%x3Q@ zAvvUrL!c+J#fOZx0N&#$Iz0xc7yxYqPtgD@=n^u|0mKZuroNT)Ocke_gL(dbU3!iE zxJ`(n-#)=cOb!mi!X`wsKn$+1ut`IOg9SJPQ;h)RfLr*4(t!tqGx&v@_8J61%Py() z+U{A+!*$*}&9-S?ynA&{g*ONWu*p>;R7muI(Vqkpg~|{Y*u8lxXv9YZbaHWkchX|4 z+_TA|kroiPU9_M<(4vttExUIt3p#AukOppHHe_U`34FU$0Z0(n@wkWpEeIeQK>!43 z0ije7G#~)&oGrEhHLNQ!8JO1Thc!tZWk(gAWwdCqTXA5wX2I2NCmg_rV>`uc$976Z zkW&}40jF3h6_-p2X0wUEA#Z`EaFvH&xZD$;`^!hIs`n+5g*k??CVi1nymV)2GF+(H z#8oB^Ll-5+G>uUWEvY?@F@9#13~?iSo8<6v{7n4D(AqjS~ZzXKkxz282XZ&I3UcFWwc889XbjdfrocwJ1A?eRI`Mmee zUP*_iapM*D^z4aB}v_*(^D|ID6&z#Qo$eJQ=!S7@CX389F6t zWcW;sF}~Rze&IOA7+*e&FUP~!o}C@XtH<&9EM9HX1M%#OF@7BLbX_P_5~|i~^n`d!*5+R862%Y?+)yKklPZWMf@~l^@k5*WA5o6(Ir3 zN43K;?WziDY?T>4Wx-_DU@PmsUe&hi9$vj})^6siOMo*5=bW3l&bY&wGsd}V%p5eNfpacpQ7#s~LxP;x-=zm`TksSe7p+r{!E?^T6kj0=AO=4z|sLEOt3(%U5fk?PLp`5oMZ)cC@93pPn{P zHO2dx_w^56=RTP^JqF(IH)rpgu~L4DUrJ9;Zga85xZ13ZrRvypYtz-P&a|!#l{1-` zx~ixaxGn)Zk+5S>&H%#XNc-A^#f`m*9T_>2A$>SI4E;?6(&&I=HoHH7mm4b@*`+yP z;@mWUcnjj${@(b)<9+rmz6_>JUZII*V0AzYXQYH2)&E`%5hS1hH~ba=T$EJ3Rryek z_G(eYohlGaVQdv4TdRp|wqsGiQ!oTLnPXnh}b1N%*kMn!-JHh+C zx&O#RbLFeS`+PIMmBHTUu6X)PHrFS9sXQ#LSrPyaP!9(`m5)F2_|$A9=JZbAha@MN z$+xG5CySJ+3{R+1qL8lYtg}?tP3Z5+Rux$c*&7(`!YX# z;~ZbFR$UL7EIp_0<~s3oq&MtyA9fcMDQ*C2K$E5c0=y>xx{fh89KT^C0-Ku%F+@FY zRsos_5Zq}U!4ZD5NF>gmztOjT{Ks#FK)7}5)-CmrKZg0Oa2lR}LcV4iew^o%Hfn&i#|etNyXN>}74%-CX>9ZyqvxZd-0=euf?Q~$tMr+W(L_Gms)>uT=j ztMKjv=2ZYi)i_lf?~bYerK#I|H0fPhT%WabiX#!%hRqP$3d~^~8d`)YkfPH$)njma zVNRYQ(8P~#!&&f3oC-*gAO_Wg2w)BnOk~IDUrz!8sCLVCL;!$tY!@!z!bQ0he>ohj zML@KEipaDr6n9h*6%m7o59KROLpZ~a4(I#+Mf=`2O-ApD>ph*8M|;)ntVf=EO6Qfi zrbuSQ4Ix=}ezd}dq0OBY1H_;u=)gsqi;QcBGGIM!;jjbN@1;_APxL$S!ec#l0P9gr zL(u^-fc`E`O+¬{tlt_iN_}_iIgbw{&cXE~pEdNF7++2hAMkm`6@eZ{1pke@zId zI>Ychxyh~T*Gv_r`-Mab0ZCQO^*<5rcm0j;7plQ}pXh!yp&w#+s)*sU48;D+`tcPG zW6|f(WiQM&%($;*2}gUZYr{nO(03w$D2YfA8B!G$)6l=Z0Fv$veQ+<}*S0K>M5Ju{ z?aRu_?izG#S|-Sa$U?9sEe~_i_YQc(TRX5E0&OHzGU|mb-bB%!IQ@4nFZIjbdf6+Y zap*B|yRVmQN4n~Ih84RB1uOavtSwv%K#>zot)ZC#JP?Pt+P#-Sq8f2r(L}`Y5{gtB zPBg_lJbVCPDMqb&@2{9b(!S-@?pq-5Om(1}w~ekf`Z)E`1gjd`5b8JU)z3&k>)O5P z0)dqpMAuF;hX(YWUq=&Yh|D3hZ8j1%0aRnO2AHbH16~Mh){nS5vaHZe14yzso>g+srbKt9W_=~=n zVa;Ahp=Fgt;9GFe(g29A0jKVo4nQp9I_f%^%vUf+1&Xy(F1h2N9wQcsI@$}(4uVaS_f7?qXBs7ZLqn)VpTi^At>j9t-o<^5$h+l_s{nf*~>^p^+%?7weMZIZl zBV*QGA74)Y)BBgY=HVRc?ghurT_>L(;&q2ZUGiD(qVkc0w3~UCVG-a2t^l+}T|)$t zu1VAb3?6L5{tZ$J&pNPES}L*NYH&huIXfPfIBL*OyfrhJvc55!f&i)yAbO|(LxNv_ z7us6KSl7BnZwzPx{v-z0H(nx+IXK52*JBef$)3~%0yP`cz>0dD;+vV8%Av@Nse$Do zG|uz9#!oXv@WME@mZs}751(5&zW08@gqA6n-163$fAoW;pHZ86FB!e9)8NPZIRC-H zPhYP1I0~@8Pi{vZfoj(g033zkwVqP_+f^f?6kJ!8?f9nu#uo(s;yr+c0QCD)e(=A3 zijV(Cvn;#_q6{b|eNRy8qTOC~=|_&I{RZd1>(Q+1UhXwCJI{W0r1va`y5I}3kTNsH zJ8pvqnSLSY0A{HHU|*r4tMuo`2$n>KJ!8GmsZ(juaDrcU9=9Gm;u(O!D`GW+2e&kS zsl5zfmSa2A6dR`&$fH?C#4C4f6Xt%7D%4OTEy#U!X|%LJB^SmT)yc0ATO?=2C^c@G z@BA!r6>AyGdPzG^Hgnoao0!r)w9$4lcJxon&`fl-nIy%o9flDyv=!s|MGC`In$`kO zeEX%p@!i1a8!Z++4C=Yd==%qK{VPYm{C+NwpRPGHlA*H*G7!(0L?}2QLIg^ofBWnh z6a=84zQNzlANMaF`Ca^sjt=Ain;L*}!JXk407b5#htK-lD_@*>;c6;5!)}zA>#}CQ z(fqgGfAJrWB_DWYhSd*>_D?@wANj4h?{l8gBFLN#8|I)`f*BQ#H+hx^5ZXB_8bVwJ z0C56=+66c!5Ce_6P`3nbuc$Q=YDXZ3nih!l6$s6WKn#Q?-D;_M52UW+`Fy@n-vgk9X@N(13sKmbg;Sp% z^V8ov;7c&m5_EbFzV7*jatVe~DQjB7A|ydXbO~S%=>F}x30xJnTn=0f_@BPVeE|mW z1^%I*5cm-2ccICN9^>$0vL!Z)v-|>=`txDAO)s`z`lx8|*4OnoSpQ%jS1PCfc~N#P zMO?V10n23a#;9kAs@X}P<7iNEXoftdiRGY8iyhj7R}SJZw+x5FoCAho=#(4|3`r90 z&^Cz~t}X}KVHm2|4DHF)<<%Q+E#virR}SXGFbs?KAaPz9_Fp+BN=$}h#|A_q(h?+B zLaUYttf-?B&EKhH1(aj9#nK z5C&7rZI3RyUOd-LKfK~UzSX+#qQ!zy*xuR9(?|Z^*Ym6`LYNp5XDJ{- zgj}Q`^-9wKWx>+@*?|kSeYN^O{HT9#zt%fl8TkJHSid9zM3QbKxuW54ofu$QL^O%7 zv<8z#kU%KeGEchTuTLlX;LRNKe<1bBm>s_C!-B@&6yhhn@Xr!WphsK*JZPvNA>pk? zA>l9xb4Yh+JDVBXb)D<#TsjWvxY2RgSvtDTIQ!@>NZeZnvd zgFP7-hAPsSVR+?$p-r(}RP$~amZ?@UU{8jjYO6s~lNqR;Ok?xrt7RIAJe_f5ZqhtW zntWV7Ip3L~3Azf#7d!8L@uJtg-tp;QXw`QmvE`3Gn|bADkMxDN<~jj^9uyc_-;Dsf+kdq5ciw?|r!8S&9Ioqe-)XN!l)qin#)s zUz?AZ03}GqxjEoGFZBHA!Ff+_c>Mzag{SJ_b(>G~<<3UE^HZ<)JpMqT$?NO=so99H z|Hu_nlad5;sZCvBERNb$rmjrIqN2*gRNW%CwaM&_`Vy(@$~>ve4Crp)thLUXMb*(+ z%MIJnVQw32*IA2$&UTKj!&-}CW{M=1DtYI^g$!2qQ+zof_@{5iDB+k^(lQkrV6I zdoTXv&-c7P|NT>a>epN0tGVF^hW{Xl%;$d;@h3U>@Fy04b9fC{eYPT*V_{sS1fK%(h&f1es3sCs&E)wJvENHt)^&6jnEwCMTTjP=}df* zw|$u(|Na@j`s;10SN`ll=b7WIhkx^^mw#vG6+LLz3}Z(=};fZo;cYMn}?8 z-J7YdvbOg=9QjN>@WyAw9eD3~>U+O9!21l3t~}K~%%9|KxaVQI=l$G2_Wp^V<9&7D zJsI)d_{Tgi$=2h27Tz~afH|jkG0cr(fBpj0#m-^b!yQ=Cs|Wa{fW`a>TQmx(DlpVI z3ze~o+*nt!*f1zK+rEYK79WTZ0?W7oS&18mVb3M=yba5tb6$Av%b)wYpDnR>JkHnu z?4U3F1GAd}bQUQkOoeq_Az(Rco3{cyrNF?LMI<+2JN?xum;f&VJPD8k%m8!*@IQVi zVEbg!Rpnvl)*+y73Q`^DbD(dwx8ei4o` z!TWk^_u?=6W?Sd_dCzBm{h_b?QQM_g`6!vUdVmpuG@$CCTnVCCVenZygu#h^+SG|8 z2uFp%uKse0Tud1_SSJo#uow?!u-fb;bvQpz;b6}XFfb0{mgB^joIBtLx~x62MEKQh zu0XI??NK|2sRanpIyAYkc913-IKrMcsFCu{!;zil$@Bt-#(Jkp?dtn?l*v!sBft6b z?o6%5t}{g4diUQLW0@2yKgxSnQ_UUzl8bMyGyicT-~C-lwT}xLhc?;Ti(1>B=Fc8$ zDXY>@44z;)5eNP4SC3YE;z<4ko)Op^vBGyKy9}45ni~es8695a) z4r~j0R}amAMl=-zDIGKNXa!bfzJx&|tbn6yGoWf%0%(D;#b5M_$B+XQ9laX!<^gaI9Q#w zMqcNqExIlr|7Qn(b;OO3TQK(&LIGqLVE4B-?xiqJs{wo~InM&@7bpU*2K;TG4)~VK zsv}8k|2EO7`1=Tvy&Ji3$8a<{AuL_DuIjb%*{Q$%Yv=#-pUt!WbvD&HkBt4k{5yw# z*>`jLx@q$s+XwTABNax8=}d}|5*9Ry8tPe2!5SD&g0bXcySl3Yl~B!4Wc~hrfh{J0 zHA{3gfUfK^NE^&NnKUc#a{tXaW(5Nf3?kI3rH-$|5v%!J3mO@^MNOrL*9`%PtSYp5 z|8NXT#%(YV%opEJpZx5#rG((lpts~M67MLz7>S5~pPZ_9H52oQa-uGBLtXTm(Ymtd z^wx3**b;S&_isyV*?Oa7g{CYfFNMAatmn$kbdZd3%}1KbO^rITPRL=^c_Vq)D1J{O>!e&f3}kyZ2{Az~;sH3G&ajjD#_SS%IG zF*FA&1N9(&pf>yCrmbT@ppKBweLsCCxfz&TGZM0H8y;X)N|Yeosx3y3J$df`>{q^R zYdb&vnL7@(c0GqD_)FW<{BhE+-R1W5_Y#QB$eVEav$3Z143W2urNbpsHrv0Rm_f_+`JcF*Vew=km)=|ZJ?rA(-*=ezm)qPxi4F-TLt1fdi*B1I9?gYz z*W3ra@=NKuzYhDBA0PDGUz8mYpt>n#TG}C+W3V>KuEO0~~ zk--n#&s#kJLXZJXP3d{hH`EDrBv%s(Tgw!22qkKRx)S zr}jRln-t|{)69Z_-#hfgX$_Eo5Y@!tDYh4+>B9$!~pZ)%>b7tCr}`E|D4 zl{;j@tx0gIyDO(IzOya;w{A|&4t@NwblUgialY{Ipf8EmOV}$SHwa&= zHK;;DO(3I^DO7w%H{Rc!fEUG6(1YYR!gxGBkPHIwIm_L?XsE=HFm4?hO)fHBZaYr) zRPWvL4EgL`SI%*l2YJ&k@9C7c0yP%cWRNsT7bM_HlG_| zBJI5@bYFkz7hU`KL*Fk#h4U8U`Gv3AJ5PLPo={a`QR#1Hv)8@PtgpmwT3=PZ z-5gH5AF3&A;(5>SRFO>I)i5E<8| zvDm{SZU0*j%ua2pTd-pm?CVeRgE#hj)*|LD1(Jl>TF;|6=%e=Qle6o6qEe;oDO(Yb zOf-vBe|REh)u2relD&CCG@MSlQ0U8Ydusy2Q3%PcxE)S2l4rh??Wdwe4zu90N1)5< z%suR$oQHf24ncU-tm;mmEMXgP(=gqxg|u6KQ^9o8ZbfZ=jEZiYrJs11!F4LCO=bLb z%;xDI^?1UQsj>gg{ZCy-iBbMf<$oZc=KxyN&W6viqh1H9qu*j@F zT+4X+h0EN5wixTK5>&({f_9vG0Cv2;J8Iwsp#mu6@t{6j6-m0uK7^62984+585emL z7Q3N&@|sV4`{M)eeZ9@N@)vmNv)V#eu3Ziq3-_LdJvExc2&<4Tq;ZNthMNMXOu@i9 z2SAb_M3X}VKxBYJfPO{8?L2%o$p*Ii&CO{N`G>(8$QlD5eVNa$8lJpF3v1Q@N>rf? zTJ=KE6tz&Q(Vz>W)uKfUpaDdy2aSNtL4dis00?uTxLj%hK~rphRHuL8xu=2`ku7Fe zZ=KUFfxFo|#XDlz;QS1^b8XRerzMWPonuEk|37f@>yAe|3%%+GGnDXRt@@J&MMZ%D zF)K`55}9pqhorA739l9@ClLMJT2-~DZ>nba#ymbyuNZKZs}%Yi#*|8jQX`|NpeUc6 z=IAP696H7|8u@1)H4^uD9WCH4|d4>x2(6TO{~MS@>QoWpMTl?sP$!_dXS zUVW__G#l9Yp-Y-KhoKt=EyM71kZ!PXI8o|`bi%M4PP){Sm^ci{gheuT!%3Pfe}@gy z4M}o+NF>c%ABNWlx4~R;y6$ALyngA-a*>iQVG_#Xdtiej(FlPY%m*j`LyvlXoN7ld zJ~_<(?6w+hhjPxrw#-$>Gk+xyvtEJ0MPT|kbA1;+-@k;6fK{gmlmv+&z&Huwl28s* zF`z%20&*aUNcVIN6P#lr#)GHK69Fh93IJ3|RUoQrX9z*Tz^fg6e(v*cPkzGfy!(6W zjgfQnX}^;Xe1|YF0MRMgAsLkRk*YHp1{rEb$`B&$WEinNqnn%K&6_8{Nz6&i%`uQ6 z4a*^($Ah$Ua57vUNaKlneK=dD>!14I{A@Vs9u33!)xmBw3qK5xV)fIpd-P}+()ugr zUpVyLVM?4oTyzWR4*g;S{9$*vSS-4fk*<5V=(=3#02FI*7O=QntfRJo=_;JXU~A}r z%Hg24wWCm+%e3i3a$e2dfG&=CePxbm=m` zYA7$I77(n) zhgZPN3^Pm105dy_P!ZQvuIu7vH^7efMF-%%P;e4No&s+JB3v6-mlWhVP&!9?(rc&* z?`R^=2RzB`+vT;-yU47Gh4%2NF3YESvu1}JpoV(CRtU9e%zl}@Q7|+$Sjuh3){QAx ztSd1TEhC))iv>$zZNO1#jlJ0LC^!Uwa>C1aBQ@eS z2`Y&AmW~G!hnm7(5!BET{kg`D$GLmi!y%rtcP@P(XVHFwlX#?V&>r{|q~I(kX)IWU@pp3_t7E zcxShA-^=+WKqjs z%&%saXJ_Mn?uYTa`8XuUp>GtA1t3qFO*RXj4|uwbc>ZT<-|PK1T;Lfz+G4+pGkp})fKLkt z90fq5#zn@nZNRA+07qMAP#Kr;n4DW3{9H6R4GVS~8S~|k0tmHPz4}#ynIedI6I75X zAcEOhO0}EB?G^>F>Grtqec`U??&vB_hE%kUTuwQMxv_>9aHPfN5;H0rd5WwLOR|6IRNC#kL#sC6LGZ_8b8WhUsv-8Hyzlc}mbMG=|06^7MJE=_|l9>YD ztM}?u%p!-?P+EYA0N0C%_<-ouVSMbbq?nGAvA%+P&OaRz`?V+De&n02d& zs`~246F%Rb?-9>?Fh(sn$fH!p)1SiwBl7@X+nY<}FADvbo2}GM-O!Y5YYjNIy9S&Y zk1Ac3jkbj?21XjY9mW*1``~~Gs71Y3Nm>-9L~SLh2ucV*kOSaLnI7*Ro~h|Q9RxmC zSK7|!yn^re2>tJUk#Cd80I&dnm~iV;fL()F|2CKezO@*iFXMGs{6}ODs(3sB)#yWQ z_FA2Q65~WsLo^OXURvH6wQ4n=86c@#++ZO9cjyWLNw^A;aMG>jEGf1XK~>l)LQ$qh z?YQ-W+F7Vepq}S(KHvX5=W=G3*vn(j!}FY@J!AOMm<1#1K(a4Tg^HPiu#L8)cGQmA zp+`u~wz}F;Gt%i%f)>DR$?D56fGLz(Dbgm!>} z0;kN7@ro&g*7S>N5TsAUrOt(R+}raqhFRvZFnWn0KGETNx`X*G`$0;I z2#~0Lv=Ye?C{fS?j7`CeP-vBwL--v*dV#enFKakIsLA}`ACQgf01J zShxtBoFwcHbP3cH8#Dr@3?{%s4CN1Cz~If2VmB5v;>!{;xr~)C(g1Y2AUTW}YH$WG zSa3`*h#3KwVe_>Zz{n_(AmS#`Ja3qlEoUTBR8U?f+!21JEq;NmY}vQ8sJP>3?#sv7 zFYzM+xuAjk28L}gOc@L^L{iFyfd1?2x;qJjFIWIO1wbyS$<%;sEDevh%6J9ZQo_&Y zdLPP37<rR#HumXHuHyw;4P!1tVXWcJB^zUg}o2_$dCMJsBTE!m-0lvH_7Ze8s17 zo5p5oxT*|rafb(BcZL7`*9q5ED05M=;c02p%~{i_)Q!w~e6vMeOMXRXo;W@B^|NKa zjBM>Sbyf8Ax1V+Gm!9O%XSvfGtD6lfMf(661P#(k#U&AqKr1Chp;QWL01!X`X$1ig zio#{Kl+Vl5p+r+Ew7|S&qzI+vhJo5?6dF7aJMfx5HCS^hh%597NotB10BUM95VO+^ zMUdykKsu)|_GogNxdiI!;B!3SQ;f3hBDT!3)4qJ@0Y3i)JUr|JKkfVD7+D*U1p1PZ z82~#jVL$?CvDFkz==)>Xp?|w2!$_|AR7CO}X6oV4W-yA&tvMm(@&ur4bhUjx=$G? z2F+lBd%p2(SN(^y7|R^@@g)y?Z>gjHq~#i1yT^FWwB@BkZ2i_=_rC1k)O_Ri)pnx* z3qeI^S$tUrB@bqjMloOizn&%=Oo~ZDf{!*xN;dHDM}PG2@N7fU;tmhp|Ky#+ME+b! zINn&!-Z@U~Myli|K9i=m+dso`I{kG%^XtT4Cv8{p?bxpFv<`7d$?8Xc6c9&{ zXq%RXRuDC0V}5i*Xq_PNv~8i~`=KTx0KC6gc5R0c(>KdUKR*u7@+eO+$5|ID&~&F= z`GkkyIga5&fXNU>lhqU=TZt4b0F@kt4AMH|!L4bu8e41(^}fXkZHErfz+D&ixSzcsHJu-a5A4%kwof!wl=V|22*` zxy%(GdOixt%^BSGB`9Yyc0BPz_(TdhlvANCc|jiV35*Ty_Lpb$b{~j3#z2c9_g>GZ zmQ(`XF^RAFgJ7*ZH$MIAKmoueZdpl{+)A1y%#^Gps$(OyZ36#}9G6_=s`qA46D$YCeS!28>LGj~U zJo!|ml_#PTqIUV_z)<=)tesuEaJHw-mW2%pi z(~l<8>({??eHdQ3e)SXAuP4LC^yK>0PrP!lk-T#9nV(40n5$@n(_}cy@DRaCeaOwL zH)BMc5Y+%Q!qc2eqY;hJ0EYF#l`ElGZ@v(i7lNMX){Di$=d&(th2zzAPd)8P^Ej(> zZ`gC;TGOaOP=r7XfQ8~Uu&p$1;@V}1xoEK!O+h7^<@KrPMd1>+L@WdosM>ZwvH`Gg zli82qqSx4F$7f?w_(n2u6-Y>y=ks~YH6B__kcRtQ@AIJ{d=6Wd5j$Vd=3PK&%+Lzj zFbbyq6_NqG2cDke&JXDJ4HvuqDw!T|M0gje)_NFEV=wU86{gZ*V*PC`S)G+z|+nP z-;=rqvMvg8Drshx+=``66e6P(Go7mt)bR~d(_lKOy;G`jO=gkCG|Rk;W4UxjW1w29 zlPtaiHZDx10jpNT>pYlDXXaC8oFxa#@wu-UKQ?!KRcm*D_4M5MH>mugT0KyYp(bme zj5}FWo9OFj*<1VZ=k_5mJo#wN;3L=H*|&c0M=u@zcF)DYp>@yb8? zXOg*>KHtgN4fE5NPSP`vVw0Y{cIoA#AN|FDtdGP6lm1kbGMt_H3MPFMo75YUT=LVs z_b-bF$Z8`ddA902EnYu8<;H_=+3j45wPbyYAwAm*NgCPDbDBAM*Ttf%6x_j-JMC0JnYSuHUuVO0(Mv$o|z)oT9E>#H90@nw&GJBSvvBbWB; z3VP;X2#^_>3L>*fr=@{s4(nWohbA(?Z+Maqk zHz+o)_$`*`S^74M`a0q z+jl=WJ0)uqW_GKr{ox3=MAM=YpbR7c8K@I;FMig0VJyD8S`Xix>>@ov!5nm!g0z#% zMy-{aSgBo&5>x@lp9`0n9#0E4`8%3y`Rh4! z=^56!%{y~&gNwf`Y<^vzC|vIrr=vgZ?>d@ANb>Z!@MxrRI}X5K{KsqnCWqI6@l@A+{MVUdmGbE%Wr z@p{dHRqGmnm+qS%33WYc@8D!#XQq(uCL6zwkBEd_Kd3Z>~uV zW;0=(l#QLWS~6k%t<=dg>vi}hNt1Te@?@>JYm)hHyw@}02GlTS?LL`I8fy8a{qJ3Puc6%7p!1m!igD5{P^AhxwtzQ@~?WG%`85`>$t*EHEV{655vW z#=H$QCT8-`WSxUvjDuc^mt?7PPRNC3{~qptSJA>B&qW$`ZuRhd{w#(!xVXbF9-Ee- zekG`+g2mm{jSWLmDa#FqEeu~@Y|~kO_wS$SYri;i|2^uMJ$09TcilTsknMlQ-ZMed zpa80($0UidzC|MPAkFNujiv!~npnExd=r;3#H1Si0U&JwycHHIS2@VptYtb znxcpR2wDKN))a+utwjrf0HV2E4G3ri(DOJ6QYq*-h2?*zNl0o+(3Hh)q}c9f)k^P? zcO!Gpj!pJo&sEc6Tk_#f@nPY6GK-Jor85Q@F~pnVPz*#}lNUocPL7H-!2`!NN>oKn z2G%`7(HS1XpcWZP!RY-*prYg4`*j+6a=w^P0!*r52JLMDl(OipW@uT_hcam^UOlb8)-zW-X2R;&F-vpdkOvmFIF%E;V z-gw$IPi_YXw&X*sSDzi;P4}#(VJGtg`Y_Kaw{5$s-@Z2|Pa}dF}j+ zJ2gcOwi9U4arW!av@s(+ZPBRc>wr-s`CzX0EHH+b;Sr3>m+jpb|1i> zU*MR{SL;W+b$9p_j`@U}_cl+=@1zs+U7OSEt(hl(?#^66HWw#|;C4XSZyYz21eBK3 zdFgtHuC-`-F>+MC9ady!0Fq?`*u`N%m`FDM(2s3g?<5yIKQ`+;ct{D~%?7^vV@Cy4 zTfuw4{SATIvre2tpDT^BwX2js9jg^2hSS z3Vo{q4;zz115VlkGH?OFLW`|Q!nL9#x$-$q=+WfJAH(y3NibNC#@#>lel4_acg1#S z$6w5vhoTI297Q6>{c4uE<8Ix#)=npd99sM$cK$kwCFWV*M0g78Ov3fAjxR6bIEndY zJucB>^E27t;_uAz4JT&rcgpTu^@Nj|9(?T^p5qGCx#yxoMa;N2P*#T5QBWmd&=h+; zn|LQ49L3F&6!R$Im)pG+IVD>yeL=MME+fwlcwj-%{A2n z!^?y2xh4Q)(yoG=fmH)gH%e*hW(i}p7dqx`;C>4b0QYR)qcCEdg(guLya$j@Q45&5 z14K(cPg`Pm=Sr)_-7#mi+a0Aw=hSR5Faj_Lkw zBqC}|4)4ovi_Wo`fXVXUOPR5?BlPtT(K5;^d71?&U<{6_rEvy@gLqOH#q*yN;Icm5&)#b z1)#%e&0$n}HM*wecxw$<(R8>8X*Ynz{LnlWL;$m%gCDW$7XT4gcmt-V3t6Y8<5O_s zB#Vw1Nc;bBa{u!ZMPCLB-0WvFD(bw&mA{I*tVYqo1CKtItcTqDq}KI*3q+$He7=sM z`L@&Dd%1E_7`5WhgX4FjZj!=?lT+FMD!+#W75^FTk;L8GJCvJ@xFtA*sH`MN(&6g` zNWn_4-<|Il%tcNa6vOZ`C!ZYQ9X~nn8Go9)Xv+yFZg;snG;}VzrI)|VTpdwzYs&@T z*iPDpmjOJ|r%qa*z!)6W*m?wfQ~M-wYH2<5_8xf%jbEk4Lp^2~Jos9YC}CV1oh0J| zV2uTXH5OxY!PYZ@7o54$0 z9{d$Q-``PxRm4lz(Uw0p#N>Q|ZJ82rP>K8l=fYiQK7~NW$wUG&0N@}KLxXS-u@UQ* zd8ueR%j?y$gRaRa%LHi%#&&{Rj;#Qao!)s9uqF%<5~I=#mJvI|JqF^^ooYr(4aT^- z#8?cq(}3QB)PUK-S11XLyn?Z2RC+hPcUVvsZe{4Swa}c#!u{{s?@Ph(OP9Ix)rnQP zhW7=MN%W;<4MKwq)OWeOTrD^6WXRdqvYa!w+lvm3^ZoOk0{x8Pc~-1qw-vbbeQuFb z^uf1h9_rb{2|jt}Z^rLfdVGXyK z>$=qGx^Cf}O=8!jtEhOekuVIyE2JO3Hw>u`?M8XRo5L`iTpxzv{JKe#F4`elmxHr_ z5)caxp(xfNzySyj3%Df+5WzyUFkfPwumq$7!4knnuukqjx?^r*#J(A3wZ@pBHY2+f z?W^VX-M`3RTJyqMnJ6Wyca#;QV+sVK?D#4jM6-iwzmXq*lz($V9P#=s5SZ? z5K&P?%=#LFm5C{+NggT|+-+vP>Uz)3ZD)<0+0Hq0U*X}`n+UFaqlIp?(0b%_K?66u z6I>|tS!+rlV?|bCQc)zBL#|uTy<_B`GJeW?Move{>JuuS`}n!9c4LNNA_}{^KNhc- zQF67Bh10p`Tj@ubEEy|?$}~!rMw=+8b86J1H`Ewv zg9CEm#tpX_shm-0jiEMFwV}Gg47hT5<<(i&rR1=~cHLspB_!eGp6=ualMmG)c_n@M zbhw_JNv0~kIq>E%w2PBrNaJ-S@wbnO8HN*g&f%kzVffX_aj4SFadq{CWHJoHa55AX zNs}{%r>`8Sypntn70>5aPePssx5@}!DSud4fAhS*|K-A&iy4+4-g~#5x%XH8^w5`o zH1}bj6!I5-JR^zxo<)W|o*qvIh8p1XgCYkJtwseqBi%91P8)$HSRO#_Fns`WK#jk% zniiXyg%rNsdoMyQ3e^mt28M{3nF{bgY8EpkZ4nbd5CITsmQp=#<{PbZsfhr@;(5-u z#RJ;X2aDVpp`Z&QOh_&fF8W$i0HT8=gIL6~ck#|ri~g?2|I*j}Qwkp~8}?55Mdyl_ zttQ7E$7vOTWtlD6{XhkiIYr?B@hOFbe8fPcgAor3CBnS!O;u}G8n_X zIBi(nhfeo!=ezl&qFdc>fzCR*bOpFWznGoE0l?uvml>{6>~et)?!f&Da_7|wG6v2; zE$ciX)T&@==9yqI*7%w^;9-6~$9U%Fs0(H_XKw23i>Xz&^|#62KIYr?l5sG>5DWwp zjGbnLVG5kzOrXbL07ZC6_9Z8PB8XB z_p*D+O5He%esmQ2$Os-Xj0m_XjsA1>pQftRZczYHrno3mSg4JbiCN{!rvSl=U_KeS zYG-T}jO3)?N-5q95%fcRb@Je8f;4;4xkdAz^LzfHOZ9hprGL(Ce{|^i9~@}le2_); zkV_@vB-*7;;&$LmXW=4UY(ezGfpHI1`Yfxprwibl!=6qXvc>le=@7;W*Ggm8v@lpw z@nus2G4fji`8Ispuf9!w{ajx2cl-X23%RYTDoZifshiqVPqZ4!iFtP-A0m)t{cAB}CtXMzDJEE13Ap{xXU7I4IA$~3cBP}yinZjNH5P=J6r8Do zH^ygNf*^U8P0o^V24hqV+oMc?1~AxbnQJ4kU00osWmPOaqIxK9Vmuj#>?I#AGfW& z>hHe4CUofRRQ=YW)58C?U zHrZ2u*Ph#Nwp@Jq!Iu!UVJy0!gcgfio0Y-3sE3iD)x6$n#YJ$ia;rILYi$952Q(^8 zVAv^7*4BT}QdA4~;Q%3Tm2%+f0{{~s&Q)Z9>5^ewQ|dL}(bb;!7=j7sNBidU;R=nv zf*v2O)RLM5UwNG`s4(~E{wTZ?s<99LB*DrvuDizjl|N!wc}zGO@6*pKhM_+-eNUR~ zf2FTpyRom5{!JV70xX!&=%fQm%DlA-Dh9*Ss-5c=mlTL{Z2b;hBbf7 z`o1uEFDC?rVBA;=rbKzX&KeX$zs6v}0#eZMG8r3X&^2fg7ZEcxQOo295zG+5Q2IR3HOHYGUYr+!Rv*!3@ES>Mf+`8vzF=BQWkW zQ(*wMZI@7CF$gACZ%l5DOuU(fW)7E8L}B;6VCBM;^-G`$0`BG@A~H5=0Kl{T;tFpZobT%K5-`LaV$NE1_w}sfUCi~C8r%A09te|v@4q48ZFL(=zhQB1{Px+JULSD zcP_bSD=zfVyvQ|Yv2Sm&u*@t2Lt|ejVc{(xJZ*_*$NI%ffk+e1p$r>H=_&NiM`u!f zUKAiRQHEMV5ZF(sMTwjYHSIc9Z9gVUMO z@L>9aAW>utP#B>&)yfV8SOGU3s|PZZOhrr>0P{R=n zHGoOSzhU~O{!Md6?c~|deCGN0J2e~U_0cEyp1T{Iz@_fa^MCKI)x53kijm(!en)?M zZ&Pq|y!f}X?a~k^?i;6+u5FlmYun^Sb%zAaga}ul?V_FnQ$W$j4;e&iS$P@c=|$3 z-n9m(1fnRh#t>UyK@JUMVwR#sA1pQ4u}MrnCI|^O2=%xgBQ>HM3`+;BV8Cn^Cj$c> zT;1hzkQh98RuC3K!Doe0fu@oHK4~61f=t<7>v~br)C&>0m!@i>E*$HPjSBf zuh`rt-{&8lW;P$Qdwv(0KivQS^W1-*i$0ib7AK>v=lJxm=*Rh5-_vSb;`%A1@w{(J zJ`Vh%<9Z&rdYktWuFH~wX{7|@px=oU2LNRw18oE304$40%NZC%4Je8^r9DAo5d&BO zb%eCPUdi3Dxh1z!&NsV$QKVVFW`1j0365=YGqD6}6>p@`F771Ob#FS31(*djr;+4n zoPtsdwpBkV?$h(3nvK#P2sB{>_7Ec?kk+s772yiOvndwN4gQ$P>teF8c3^*2e@d|w4^JA zJA97tyFY)-Ky2v%8gwzhvJnY;7$x15$V}8=$EU_KzkVwmylva9z|2K2EJuIa;Xr5f zqRuv^zRJ@gD6Hd*l$FuH3o|t{8hI7QedJCPbeJ&i&%oIY013lnh&RGR2DV-2L{*TR zl&Zn71mVP<=Yr3Zjcrp4rRPlx55L)Po&ujjKp{MkBR zcm}K=y!1fbH5M4EIk{oYgSD|6q`C{d~XJ2emw&8QO>~nW)L8ql2dgE|&fSU4U zgq&(fY)hJQafd|#B$JrQod-Y=l5RNw%K}1Gkiyl~CcA!-G^s`J-=FWbr=ppW*RzxZb4KVnaZ60lpQ0VoCelIz3Ay#43?A?*t5ST$icgE!SjWXnFs>^X@fGl6U(?15ONTf3 z)$3@+tWp8yzJ2)m!uK~>{P9HJG~u<6nk@P~f6IJ&(bGfj=G?ry_d&M1x%8gC`00SB0FksbUO&CA9=hT5v}xK-cRWhZ+(DMBloS_20v>{<-5F<%8=4j2lPO2^ofl? zuhFe&ZUi@5004%fy?aNXOoi_XXaJ$C<7h8R?V@V*p7m94nkRqnC>qI@y)952x^e9D zVjt**(W~01v&)K+r zb2-vTXH3HfK!qpG>B)C+tvCEje6(b;>`kv?M*j^;y#F*;cZiH+eE7b6`_{Kq-&&(Cx`?!ZXc0iP{Aep$pcO3| zL=7UMjN&NgJ7c9+P)w>D;d_UVcPYLLzw0y9Ny(YFz#a%#_e+*V9QZxvIScRhu% zq^4Nl^T1eONJCBue9ZdIt_CReHDhMNCXf!pA%Myx!X;D?4@evoCH(KG+5@N){YJ-s z^FJNopjZ5jW4<1jU9xmtiz}?g(#1I*dLShN&>Eqts`%Pkli{aIOJG!F=Jrkd^n1>< zAKO94QC!C@XbZYY1BI52eD`PAG;FXogYSXwl{yj2Z52t(kV=Ay^Z9&E5;dP8BcHiM zGrf&y`Bw;9-oEk^189(BmI#rA7<;kLYAz(MA{W*KF2JH(;lhf`p+^|=o)_qI<%o4n zz}%Az^a8WF8aXu?W^YrCk+vY|5kUpACNxr#2`X^;%|)$qd4NR$@&6u$rERC=WOmq& zUY#R9t8R1E{?h*RzpFcXCnq1I$S&Ne5}mKfw=q-O7#Y4sF*WKn<5h?1s>M>&JvS_i zk0C1n%8I!FlDHYxQau2403-n}xIZZ$2ID55ZPk;E8@h}ZaK;JbOxaft6rs!atV*o|I|X~qJd^{YUHh&)7P0? zqeV*Qglz#x1JVtkYS$VVV6%Ae&_5Ov%%@EM@CKRq@h$Vlm38&0@#gkx>$m;~o7((Q zwlAq(G-xzh1Dd%8twF-^+PTD$SnOm5tC@>E)pR@!zxrxg*=(kecF=j3Vc3=jyNX6KVWLMB%UV=*5vAb3x`d>cX<}78Qo(kPoH!fVfJ^(I02tIu^TokXw8gDU8 z3*t`W63$O)Lz6Hc=5su8I@LDrV}8*r*v}+Zs4u`|x9G020ZpIr>;qdKv~sBeY%c%>lzkAO z4A@;E8Ho@9B}Stegq1509xjDC?!DXjXU1NZrRiK~Q+wjW>o`Xr=E2cGd|U(NF>q7@ z3JFI!2?d5WbQmE=szTH=Zo_l!Vavqr_EN=xQFoZ46ad>hFs$%TgzX&=W(WzQqSVv| zHv>~UI)|yXSwaQisDO&vRDiRl0&GfD*He#klBBZ-;Hwjrm;jzQJ~g0c)>wa};*n?G z%p0@W8<);T`qM`59?vOa7|g0){a^ds(Q|07V~f4KyukG^tEZKF zf&dOEC=>$S0VD+gyayLaR}cWevZ7J9(iFh4J@?x`8}gl9pO@Et-en9D7vF$qKPMh- zMa+uuAt1nU)qoAaMyMV(BpO&uis@@spc;i(o@GOAS5AUmjwB%($-*#GV+d#kO^#hP z6dG$I#LOIZf;RO)=Q5k+NfZtYj|b@X6Z4nLjWRC{bnL~oBG-6w1(vf&?}{PKtF(XH}V+(ji<4KCBX#xSOowBnUs`T z7Cg~TzrYidE#NG**?UU)lRWv=Y>EDO*P67J+1YrzKFhKDI+hGRFI9qN_UvI6#|FAS zUoS#ym>>v1xe;vI3BY>*+QIK3>AI+P!(nIuQ8E~ca={KiDEUKwvAKM_+mmWJSHJRS zx!i+0BGFmL0J+KS1h?=6DN^^i4pQd2r~q{7DbzYjzew6Y@GJIwihTDc(m*EzNt;_b@y44mHqDBOXpq94O~`S z&^+A-AToB*AG>!0x-|@Ry$rz3mIIIrE6fAF2dJb0C|9^EOR%ESd}nR|Hyhv$USQ(< zboUkYdDbpbXOc7T6zxjfK3>cRqJsgssJ4ZR0t!rSN*k-W&JAM(Y_%o0Ir>n z9z<(dX~8s13YO8*$RItq(Wz`C^02ZjK{I7$cp#}<4O#Qd&2^Mp3^2%^ziP!<@1wr3 z#%)s3;?|8HFSX2K{ftHhwWxTJU0R65n+qHfwS2sBe8`P6H7q6`=I1msx*M4+al>hR zcgz3-z38YA^X|ah4JeZ+r;$96$x2stM8y(c$a5&?6 zS9?Oa64A3ssiDn0*m|6)HCbx)0s0ScY-~c`;yYwhO>&^|+V+rDiQc-v$BYXTRAbXc zLHIz0ISeg9WTmG)E^*J5nHv`LaC^huxgyFugJJK2{+y52bMM-6){Ge`mL84S!r=tx z@n~po7P;F7<%oDng+ge6r=UD|BVnR(Ocl!tcx_Ik5ifop&U>v3;PhPzRW>BNpBB5A??R)6SMK|bQYE70$Dear#7 z?#81Ydwk;MZBJN?vITSdz_uBL$6a(V_T3De^JJm1ix*^w)GV0-Gjw86(Hoa98cA4E zYT;5;a|qCJQFa9w8wv;Jwld7vw((5{3~)4IRzNB#A-oPz+Scye8n^1!bNftB>2!NI z+u}(QOAqtNeTSz%6^BBYs<-quAq54G+*WZIAg2dt0Lp_mQbL57D3}~fU{{O#b`(-D zl98I5(^xHSB{YCW0`0Vg(aoySSO7(898jz2Hw&ZpHNZZrDWz+GDUj?^G;C1}&!%iq zEwrow?oYY+3UtHYPo0O--740C!28!cEs2BJ)Iz|HQL12bX)fSIz%Bw>Q&ZQTkB>q+W$ z?q*FB?8!t0FC54&y7e4-X}$D}=3dVA7ki#hbfk`RMC=#wN#DkkOjQXYdVmc$fDJf+ z8aZIpL{h+!UX)OQD1rA$*wPBI3^05Mz&z3D2!zuYfLQ}50$8?cY>8n_amtpMj<}&B zV9HoCRSR4j@nH50r5H4$EilJ0doX)2tNn)3*W$_Fcr)?3a#AWjfIrVp@Jme9Gu}2@ z4Y+kDJC1<%&EvfYN9$W#L*Ir!;6q8+`&l_5@Ff%M9gJ|CN21pZ5CDgtnbQckIfP^Z zlv3MrC5j3H!o86T8ZdETmh8Z?vdtnJBpP`OSqKYm#1sV5tTP3bnkb2lU{J?}%R;MP zpib0${SQh;n)BC6Mk}`j%@nZqhG;Rf z@oEOt`y_k8jDS4zrNX$x1vg$dnl0Y*BM87~6<7JvX3`q;)ue-@dUrp&B{5F3cRtVK znt2&pcSOe_eu*c&L`SMj15KpGfTb39+5!xOhN{rm0#|2a;|8gq4|`^%9mALyu)SJl zwM|u0mRhf*0;r-D#2W={n*0oCAH%T4<~CO&hK5yH35=njtf9D7c<97elyvUYKNnc| zloY&>e>~>fIiBy*i%0C#95_*;42Nw;#_r~-=(@!8LJAsy3m^g4S(7{|ohj?t9%#Y| zORqP|ke!DiEQGa6#HtAbvgESwCD0t${b1GtTD0gXqVKvAEhXGBTeFlKEL$~{TI|-i z0T_xiG_a<4-(mn#Ie`gf)}*)Fzt2^IUf;u_h!=RKPO$~E4=6WB$6c5`j~9HA4gv++ zJ(n7zCUuM=k3@o*CN8sA1PCO~-575i8>yqoR+nHQ94B#|)Wd9fL-w&%t=l;u0g|2s ze1>Gc3p3gDl^|%HfWkB zxb)E@2ev}-G7aMDWkx{7=!72O35l&IRG~5CL1_>JHCjs}i+h$R-Mb2JWQgd^74x%@ zA^mdjKdaFe-i1ddL)6^Sak#N;-|=}cY%!0vWb~BFu(>Ii*~7YU?WO&~$W@+W*{U_h zQ0mrbkgrqfG=N@NlQ1Zqtdlg(<~(_p^8wHBSe{}#=3P#~UUa7pyEKk_3ikzxGJM5> zS#ZNfcO4&LkY6(jdwlq?F$5lSqexMZ!_9gXbfLExH_HKfKIS{BOaVGr`?pXAv?t%q zVr6X26)7*Q9ox1;yAR%mh}-#Rid*%r9XtE+PuH#vGnnc);yo`v!v$A|seSmNXpTBw zMFBuO6oQ5ZfDlR!Y0VRE0SqtJ{nB^EFkp0hyI{28Hqd5`HW!>q^sei<_*gF?haenyBLE$sC4n=uDntqI+t9Q|f*=AN zhfFM*ry4WftVZRP=EpL$jOXlbU1`kArI*&}SM?)z9a`^LWEn<5DgNpVOD;8@gm27T zlNfu4%@`Nuwd14=7jr`WO)?*8BZEC@$VYm83X?MXG6gdPi|pRE77kMMl-3!?KpXyI zu9D?&GLamGQ8A2)0leDnzMJL_fN@|7>u*u4XVd^U8L?)I218x2H!RE;xn5t}!j?Nq z!#~M_V`NrVFe<@2caD&VAaVit5&q9q!90sFbcw<7ROcQZX_4oI12KHu4WTSk=KY5E zm)!r-Lp{lsOr7vON;goN{>BKv#JFf@d71OE+&L%^6*{jUkPi4n7{am`yY~2a<1pJM z=%bU1n3@du;}6@W;{u=I=tDfeLp>{z!^;B4-k$x;o9Dc2EWPWBwIquJWBz<|(mB^g z9m^#{{OXv?=l+`xTl4jAl3BCSAY_Q>qsnAfYu+Kjl$<0{Mh+vuL>k@udG{i>+p>zH zI?wPxKI`pe*2RN2tIMo8YK&@}WY@pxh706__CTZNQW$E zOD>jtS(u6C;sTY%Xt=mwxq3C1v(?WbPlziOSE|T91W*f*=7KMbmC>D9k{7XsqK)*u z3iE-Y9t_TjXWZzwA};g&0Za!J@ET~9ZrNz+m&@h0|B6@5&;d_2BEw%KUa(x@5~>2n&q z1uhK=_XRA38^$((c@4@2*?OeOF;=*^x#0~vY`p4TJf%Jqb%6>Lb?l;gUUJpVWzN1` z@Sq>(7crRinw0uoKRj(C~rmt!(shDoBxP8bAKK3>5yY z=d`M5Tr>enu*SzNzyh=eEDZ|~4FYIEo(cjWfFJ^BL@0^~2<0tBL1c~~h#&%@H6X|m z5J0pB5aBYl7GRDB&@}bwodRJ`PZ7dTsr9;l*m~%mkwx|G%Du&Mg=LCR9-U=vF(LsZN zay$EqpMSXA;2h342O909CT7|2>QO)SuhI6%FFevLqNpDQi*E{+eojk1C|dW~ha|<% z@AakN4SCxHO)|rJf37ezA*sTMM8gr>4Z4IO>q%-kND)oJHz|q=7Mm!fi*gzLYff5o zaAxroq-7^yP4s0JQ1#*DA;rnVRjQ&9O=i}&`4p>$@>L1{Sb$C42iCxp=Y65LfUJR! zv*FD{!>2Q7c+UMa&8GA7|NhUto;&Y<{pbJvVZ-V_IiEkM{;jYEHaoyRP{SsnsedAn zuK>q3su_rVAk!d~QDv?yi#=E`aKq?5dDC&x&m^Y17p2hDCFGFIuPsz@jv*Hf#Y;|PUN zyd*Rsm1>Hw{k3zg@oykn$NZUVitbtuO9Qa_!a{M;1rX7oK>!W9OzVPZ0Sy{qtD2&K zo`V2dBgm7vB2T8%H3Ai_YSEc6{f6{=Fr0QRc>PS?I$6Q?i5iLvIM%U z?bP$Us5+MRyLlh_mGl4dOHc8kx948{*a!|fiRQW=m?#H_qHA&*S=YnvQ&CQct{^7dJ#;o^la=sK74l$cxvY#tos}GW~j8E z)D=~aXP&PiRDL|ECyN^|RbS6}?PX|ghWEb~D(yL){D)muo`hb-PoC;i=+)&nJW$JN zZS2kdrcP;pSJRF~IJ7Qq+VWtwbYK4I5=Z@c!D0u>C0DzzdFe~a#Vqh~FYGKby`2&L zm4E4PXT&7tUGK`tHbP}^J?;6!W18zq60tqNEFo)`Eod;JMXLZhQUUIT6Jg5wZQLwG zx{TV?{fW{>b0dKI<&K-}Bms z;QQX{PwKqQFE`uzh|~U5_xh9i^!-mC-Dv5vg?^fBBiDM-^oxt_dh}Q9ddLUC*)Y&& z!nxDk|KZ+NWb5joAVC-vxd^eXXk8b6!o>x73JT+>gitFbUU9T=82L5w}MM#UHyGqD4N)9YqZORM12)zzQ+dh6M%E_)CD>B}6s z5-y6X&BSlYXz_9UGnni5D5ZtJS!N)bGJv>=RwFD!C7H5Z8}v(LW9&!FFvkti1p@&f zA!7ri65mo)fPyIeHV(YC#LW~Z6dXn>))r)lmV;Z_ks(btjc;JI{RY9<0uED(1Kc3# z@&FHBJk57r-}8+x(mb5-wKpfdaIod&pUT{s%&nUzFpKF#4A+MRYt*XD$|6D7a~Nbv z6fz-r02&yn3aybvW*&W0JUl* z0=C721rP(lUz`O4~;qf6fM=n6M~(^p)!>dQbQ z19QPluR=`eMK%m8Ll8pf3-|Lz04f&TcrrEFzi-?>7C{QG zt}N#^n5Q&K#j1Ar6;1tQ*ou*xrYimSiGsuOcZK4MsaGFj7XUM~??q z1{hg7B^5GQ4M+je;&KUq0W|=4a7yt219&900btC1Q{yTe2#_Zf=PD@jp@--xeehv? zum&o4@F7e;e6W6^*Z1EJ)g91j(T%Rd)1SZ2=5upCH};#_C%N;p>TAFE+vi8hDqFti z{?d8(3zxk9%ZuLgX5PHuS@te(TlU<{RNI7EgicvZH2AtIB4CFz>l!JK9w z^ZcNpByClmF3V^?%WQzG7Nr`Jl4oN^pf8w0UL#&WiZ*4 zkkyq>)e#@vkN2I2ZMNLr#-RiMK;$k$Er9Cs<=2ugdmUmKaTfaO}54AOAZ*c`TA zj_YUvw;aDTr=5FXFgDtAKL8O=X{6|hm79Vh4%QE#!VwV%9`Hnpufg;~c(9(rOmtqk zlN)8b;dA#J>Zi+ezx_Qkf7m>~%e)Ei_kY>9pF8Ix?>xt6`j;R6!2kKW$PI6Gdh+Eq z3naqqK6$s7OWu8$H@&x5?PjwddElUeVE4vTu1=#HYN6T|u6eR~>?F|B#f;Hh%lIaY4%iV524?jR07|PI0 zOhJy1W@WMVzIN|$(XF%pU<2BWInFS7nq%U#S;jtTIdu21_p}pz{r=34-B349^z!GY zKKrIcU$q+Uxv}MzY}wA;Lm3+qgoXOX8l(Z8fs2r*gadFILweviZt|-|Ld!J#t&|aK z-%xLE2$M3|u&MxDasaM^o3X+!5gemyljz_9njZolz`z zfP+dy6irdk0D>+!zyxMgpjjppR<(DN@qH~S)OGrNrSB*6`|SLX9n`#2zy2{B`hDN? zTz^o1`03aFxA*V&cNPx6+%j~+l%-&m0rH3{*5|M!!Ov-48`;HGOi9w;o==zQP5Odj8{$g-Y!NUGs|=-0=L zqyDHUEq{x;8W@270p-!n-E@zSy|!avE5PR=h6A?&1u!KYJA;)<#X--SxP$Y_h(fX$ zFN}Ac+j_Rl{=1xVP?Oy7C3mBFiRs&OKmXzRp851bcg|(MoCkdO=r!gt-C_tCox+OS-I?RbqI6G4%5;@-Zu!{C#IXfGOaVD>%N0}{W z%eaveZH)5rE5sN%i;-a%nvKXwoDcUOrOJsK+hy)Y!fd>6`aHi$c2xPMO7i~U5BGoH z|33fa{RjH@UoQFUU#Ez!sdD!?-}>oHt3Nh7p2|+;RY8eNiYzz++n;{vH&S2E`rsMqOcA*GF@&J*DuV<0i4ttR#Nr0gP8_{ZPR)b@xv*2ns0J<4yM!3#FGk`dnN5q?( z2LsJ$cC>UiJB8HD9P$-k{QTawr^R@0f zUeE11e5>8%=4LPa{WLy%?4~aKz3uRq-@0vfvG^}fS;iF6rq!YtbSyYXKw;U{T;0z~zBiftz&&9uIgHU>2Z^6EQICZs;>h^QvS3Mw1Yp z=hMkkUHT7CkN1unBY(>ManZzk46_%0CgsB%d0dF|-{U&vRHG=ubWqWVQ{;OpUJ~5wX@EXeMR`I_T&ygcB@X44cD^f?=BAN)!SY{k8tW3*cfj;`k zb!IBlAk{@}zcB!07-bl!1j{PL$tkD+q#=4wg+di!hDGpkT$83zl}guRr49+k`cE|0 z1c;VaEiH`&aJ56)tAI3Y&JE#a6j$Epa_NjFMxY#{;WVdh$F-fC163B_Ko5>PlgaAM z$vfF9RaG)q5U=MIzryW}7PSk)=oM}ca>~0;`J*Rp^3i{`o&0^PvWttRPk;bGzmU})yms@~Bp^>Ruhz^Uir_DBIP`4{UaE*<04N&1#IHV5Q z0Yg?j9Ibd?U9{1)(Y`hsn0pN#?*|j$Xvr9YIUR!!CRye%_nrZ1j1EV)duxP|vf>C}bauvjVRn7+bKg18Vb4yu=a=ctUnW+iownu7>oB^SPGzj?PX})`OJ{Q!d+%nA zO6nEKEcPon0fVm z-!!|-jM~7v730hBOJrn!x)H{kp{n8Yv=JPd(gg;?#o`2=JMyI}duU9{vqMlX&RYH?H93 za*THDq3KjQE!}BJlB#Uld;h?%;n{17mmX|+w&hx3Rc?98C4~$3y9p-ZCftFb z(I5mPZ|-OG8a@ZchZ$Cbuk-bzyyAZ0T+VvjXM0=o8FfD?`qi*Z*)UAZo*Q1jyXT?z zzMJ!n{m?b))DF<7SktldE1DZhr~xkt!D0q1trh;HG4(bjXyK0O$lz8>>Z30+AS?k$^COL3Ip>j7f*G z9v}hos2QB9v0RJ^f;B2sGhw()QT3@BSkfrkb~&w5*dc+xB*QR9pzb*I)IPOLxdtGB z|DUn0PF$i1eCova7+N(uY_bISs8`})D3ZlxNkkt+`Yf#ANCjd@@a2}Jronm zatAcm8T}F8w6<8hNDS%2N7I+j03FAE$I@4sb-@df{8p6oK; z7{*XTiqwq30|R812Z+STuX&>feKq-8= zM#rGY+A~eu>9r?(xVhm&62phT*-I3?9~h=wbWGQYGn1^Yr@%~lgz1bMKn&Be8`tS( zFi=Of$VM8at1%+25JDZmpgI5&KqP^LD(nv1Dd-MPOpg;ah731P`OPdb<8yCe!QeDH zqo{DV@pDe``MO5uQ^qG5c2?B&-{TRRrGD@4HOV!y8drFLBQ6<<8QR~(z6P|7VEu%h zwK|Xs#erOhZfb9NC%amwT(!Lq*z~LYJ>Ua$gbToP1HXMGfNHB;sYtRC+JXbeHp*hr z=JwMW{!pj;*6rR7dznpfxuMw4g>j_I*VZc{KOBs>b3HK`eF%hqfeA) z*&gD=bb%T>v6FOYEA3*0l|i4qMoG{JH3AJ8H^kJOayP>u8+nBt^Wy()eyBwc3)0As z`LHj>Wj1XEjSw^Rl2VYj-qu#p>!%+|f7c{M*?1c10qCW3hBLspv5>U;;i3gjs~zfy zP#4I98B)aM)JM16GLJyoXBx2L|9KU9BRjJ`7aCfwS8dhB<%p z>C`{j&i$J&_PFkzm_VmN9r&gFNLMuf!q$_H%yv#A6VxUt7@2uv)*1D3iTVMt%3OAt zs0~3T&)s&*s&egwDI-UljuHH{fa_O25eaZ zqT$N&Xq*Q~gSHF=%4bcx9{^7~hnNQ`vtY1ez(zlK&%gj)ECVb6CxhmiU2QODIgpA2 zPR6kTq~b`Y5O*?8!Un41;gIRf2`G#(NE|ST1#MhxOI_{hkymXju$S*Gy7!}*?PgGnNnK7tbf{y2y00o+A3JUtIW9#Oa(u?PiiNkSEW?9@x9Cp6rkj5U2=< z(9u1nCbdkt?vA(n%Kiy60xf~=j;@67*M5O}!S6KD>dAY{m#}n_rZOq|~`{5W^7U0Phqqo`rd1hGbN) zD_!|3pH0>SAE`kj0M2>u{qwYNEi|@dq8~2vM|CHtCIP2Q&Va~#w%MY>{$N$q1*3IV1OgmK_ zzM%KN`giXx&KpK;vm8IWV2&r$lrdvS23Ixg?~~Zp!GzV$KrI-C;L#zn&k9HF%~mE8 zOa$Nq-s+iv=K70Y7jO}8ie zYF_Cs&$Z+#*l_+-9Q?JHhrcL9PQjXL7}Nw0oJ=X}@!omb7=oc4cgm=rC=I_Lnc8g` z=JTJgKi^AZfT7CK)ow+jG1NfqY8l*Dxbr6O#c)WKM36L#F(9pWrO0)r<0uYzYCHUK zo^c5ayE>o64?aVf!cC;{mPef)Z-OtNOVopCu!$--_$!IRY_y@fb?`T<0K>)!U!0mnu zrw8T+HeJebXyz&1|3QheC3)y-DZ~+m7LGMHf+JG#R#B}jwEg;|&+kh<;db*EQI8(= zcF4D{hq$=&Dho?&-n4m8`AT{uhM{7I07K;U$2Tc7C6vn=L}y4ZUOlMNyq45!(E~HckNw+$3#4@h zX=qr@Kn27I*<=ifg*@@#DgkKH-4+6@AXmYF_a>O>IX>I^mC3pf{hq1W+`34+HTKSe zOkOpE(_Y#|dhp0}C8e~3p>0zmm8(j#blbu?*7Y=_3OE_7@p?ikQfriAGYo7W=Xrb< z4NiX-MMX=E=22g=9_&!Iov-d$n7KqA<>f4!Y1h-f!Eh`s6{XBt>>BkqHTGFqiE59{ z5Ti^w0=2X_XG9G(-OtW<2CGH9aYwe(8tD|vEgym8z|+!fDU6E?U&Zg?&psFM8I}be z!|`myIp7hh@FU!lWX`l_%;VL*rAgEjZRK zPp5ZHBW>BjINoz7S0IC|z+N~yzjaUlSi-&e!1tb)s+Fhr=L>r8y;kD!URUeN)E<+? zmC@&1XQ8W1lJcGyc&=U-ZbglDwe11xjw^Q&uDhcrm1(;!vcp6v+D1r}n@FNGz-_kw zuS=&%t>skA@@y`!&29IMoQ^JS?=_Rhy87Zk>%lwOgTj~7VBBZJ3E%RBsXI(1l?)64 zEY+5JH>p!$FdaK@fC^f}U@GZsLUI7QU7$-Vv!@!EbouRH3HW*6?sNSk9gi>*u<@{M zs)Jm9r6g#evNEhS872fnh#Zu~S!VYW-1W&GW|uaKYjF@Ci6DvI&J_>&W#DRBOL9%Z$3 z=hTKPhE;an-@{*kqEer4VJ8DU(;rgEWR1!lZEZ*SxG8m4mfPyUZpM4r#)bEj*jteLW2PTx`dUj(c&xATN1&wT!eH|PeV z6~omHJJmNd0DVh?V0!GpOp;WM)xua!cx0n6AutxCVZ?;602sp%a4m8>wdl6wV~26_O`4g3k(1D0;o zBkU+JQ6sz_DBy;cQC^sqpA+Om6>8BRdymr@*r885VqQw?;qxQJAAe3mUlBg4UoZZH3|-1cA8*G(c+&og#&X24DeN1G@f3R#TI< zQsd*HP>76EtC~W3hI}c?W>8Ec?)HLn*Q2WYI`^8xLmeMSUav1niiwMpb@bzLu_<1i-O@*1%B2usJsnG||)K zMV5Nn5NtU~FqRqUr$3`T(o6IR*F0^%@ziq-y3Rk&H?Lp)#H+5$v2dr>rii$%*E*Hb zD84qVE-7*j+Bp|%x9*BgO*c`2)cQ;PRM4OSv<5){0iGf}wbqZ1$A7YSbacGX=v895 zR*mmma{YQBZpEPi4J(dbZED?Wo*SM+{^OV8rFK6(Cd2JRm$r$yy&-v6Y`AGjlC-N5 zD!0*%wtL4`i($B(5I1y59O_GuFc;I*24^p!?P4cd#f&CmM%OE#dGlt(Ia8~89d4dx zn2lKYtv6*jabT)m4UQ)a4pK=Yua)S_t90b0TGg$TZ_(_!%F7(?qCw5ph}H+QcTY2? z!%z5ZgNBvvkDH98Dn;Oy#Lkp7gZe{VIhcgx(d97Np-#{g$3#eo?Z!FK(faH}&9vet z&GJeA9xgex5RLF>i~ z{QR%b>mTgt%q$YO6kjig=txCgC?T3?fIK)c%3yFphB*y%sWN)zk~WLd!nq= zZ=TeuH|8e8_~w4_5GYki9?1sUTH1@b#MvhgOUYszf&em&?)t~X3Eb8+4_Ow7d z{z1Dw+`6{6AUQV}CzB{w7?a6BtDA6?8Z(%im5R$WjZ61xR58Z4)4T3sh^ZyUc$|jl zy2)|8dOUP)=n~Q$9xl2fi$&-B!gVPL-C+j~B}yQR1YkP~fCPaE7z_c_j!Y&JNlAFT zi6%(km_@=|jr@?8UocM*tJ_qxtL}d9U6w>%!?HZKAIv8UFgV5Z0@Ik>aKbGcKJiU& zDuX0zHw|{cW?e)F>s)ups-n>twI~S#RdjF@sun0PQx12DsA^Z;Q$Zz_ zQXJuOnL3CS1+X7~&nbP}nWHuIo~A*x9ZlcW@me6}7pCpkWMC zA{nwfV&BU_2`v)3(KysWaRk)IeUx>0(W=a5W7j+eEN|8lVCrv;xR!hvWA2JBGM|MNVazZ^H?~Q&7oX z_0Dwuj`4I~@z1G4)nXc5B=*o3M%U8Ii+bxPEFIS~3>$73Fff8I;JevU<}^uy;MfK^ zzX2n_Q4@d*>^vbkm>qHqTBuj5Z9&_Fm@Wq&yP3Gwq?${mJ0pN<4cPS|U5>_ZxHd*m zFd&$xp*-K&>odJDKJcE&xplnkfF6H}?|_zh_VhzN1rIR~F~EzVxCg)%@G56Yc+A@u zGOgdXo^zlm)ps)+vNo;r{Gcc`+RV{lYI_s(tS|w{b0kwl+=91kRy@?04E%qogb~Y? zQ~qV!)Jj*u0>GOS0H~SkIX0Hh+cLR9ai#&RVFt{hh3^2H=O~>}oS^{*1I#Ins?{X} zfC5qgxZ6_|`ZnC^w$sh5uWLA2+rnTLork1Get5GNJ#(G8w>{l=YmN%c9_BaJ-i>_P z&f{gedDh7jHE1!my&*RcDr7JuTt-4Y*4<>2EZdmIAS`DnV*t5z1x$^Pdk;x*vpL-% ztA(!XVvY-sKUiPOWCcGekslh-0kGatdAbDbAQ`0%jiP4-P{ z{+gfuWB(H%LX?rXhXK2l-TG znZg{H-I#I8QUh~vg;Q^e&x^1gCHt*L4*S%rpv!(I z6$|xDEd{_Zx^cYMuFyDuInWxmGRhJZ#PpP>$bjNegtNr9x%sw4lpObBh}7?o`!s`P zp491OYI@P&6X}?@qW95p5_!5l`$zjhr~C6S(}yCoqO1fs#-2`;x?GCtbs{=(_f@m< z0G1k2y-A%*5;OHFNWA z)^_xin!nG_dgt+R{yAqI9=^m8JCMSV3^QcbLEr0$f!V-h@3wd$0g)YI$3Fw|s7gkz;vwiw1?a*1G;n=G~laDM=)5`JPFX;%)=B}%gyYpemR z0W9V7mZC8a0bM8>DQMDmb!@WwCaXsxD4r^YX!&*PCUPx{xk4ln^k^oVIbjOkeHAXy z%NKK>C?9ioDo}9gdL3+HdAx<%-&h4i&(kML{22ZyBOvS$S&y|F;@lD4kVR0!ge@2>+m!b`;Lpm&3 zH66eyZazNrjnSsQ2Pu59rhp*wm?%xUu`etQDCN`(v=k{qPr9@O_YhZ4ctbU712jlo zVy+d6y3h`-B?=*nS*TWUYpS7u8r=GU6C)@&o&sQRxdFljXyj{vz020E-CqF>te`nJ zuK|3gcC{;c4|rQsa^#~&!%+ZG0I+&P-KrZ1;ZB2t+(^i*aAt@A4A2&ZAA4OBI-F z>w40Eo5!2gl{&YXG8oE^Z9WRtM;FfC5Vgcx6yzi& z1`TcFLfirtnIV9-ss>J|a<}|hJwP9Zvw(@ahawLq?5qLp0beDGQJP|25|S;Zp|pV1 zI#mhFN?DX-=Hx!Ea3r7o{5aj|XkWw6YN-r|zPn!FR_-c-fTeaQ9tRtxX z1~)*-tE2b`$Hp3K>_h&N#`2SedNty&q)Q~zGE3>(l)rZwHu_O=Y;EYAIC-oo(ag#D z@UQw@{XXt@MKs%YM}CH06MEvfJQCPCCT}PwaR|f0vp#-8mre=qK51?Y2t}jZFd-Vg z%{QA56-~_2*uY`6Lmfpk&;YJ;DfB}aN?NH0*;z87>2urU(#PQ)UfeEr0%j~wn4wo4 z5BnHTeof6eNPlEbnIi-_%3SqL8!b@w6iFq?XfX3w=>lis$RCxVn2ot~k7*oFbZdKt zYijPj%}__9;7({u#{j$%itCbMpp@T@0D{7slNSl#$Oc))>R}&D>}knp%hnfMEGRiw z?EWS?;}(}WsD^+n5}hUnOY^nv2spN5JFp$t&guXy+iti_%Z}RUM@Et<4cg5Fe{m6v z!Dt3MvMcJv_to+Lm#wSAm-Vjgnr-axWHlbL9UqUSFwiL4Kt+;1D>7F36PQiuWpgm< zK(eMYL4>%3p^7OV{el@V^d?z4Z^+2t95aO~CO5-rCzDk66IWKqU*giQH^={b_9R=k zVz_d7PB)^)T-ZxBN%j(r?FQM)?fW5I{~5;E<9fB9aa;;Az(_4nj-+DV81%_FN3V`L z(@XJ4S7?s9sWX<$iuU4}@2MyFN1Jg1_VqN1mqR-7au`X}PFQ2}R7Gulz1FJUvh3)S z^_Wk?QNy83z(s^gQuxRQKtz_M8iD0UMl!X0yx08tZbOw^9Ci9l<3)F5hb77jz3Vg& zIJ-RFqBwLba@u$e?h+P)6%>6$RdJfaKH zd6B|LDysRa$WuQx&p9?{T@rGZomxC>>W^P*d!OVML3b`Ipb}o01S0|fp~}(63=tU_ z5-2x13YAO3)5AIgOf3g(h@NHgE3IDKO~w$nGZV}b(o zBbQRPsKB~QXV-%8LO#PbQs!T>snw3$<|u*w?;AF%JJ5p}FC^Y&siXzLFVuwJ-y+D7 z%L^QMZ6C1^9MuhYraSMJfM#FZL>)_y)Z-s=^+X}s#LB6sLGiuZQX2dMQ~4U15QYFC z@DNa+09yj(G~88fT1NVdc$d6>%I=8mG*lIU)~$1>6Hok@tLXzTei9z#wOU{~_P|(V zJ#$Q5dwlpE?Lvp*sVU;pN^YVvw|APN=4;j_w;v z16m4NwxC5ziIx&AqqG28cEo}PENyLG&~|RE3#kI~#fEd%1@1T|YU0U{x_8VKy=@u6 zdbZ>;9k*DOgN&oe2v9C!r>+{X(Z?}}dv3zJMjak9st^*W=Dg5ZHPU(C?g*8@14_l( zWaOu_>iJk}as+Zns>ivSr+q2st#IL`-pOR=ct7vrxg81OO_~&xU|j=%xW-+m3d+nX zZ6V9EPU9>Zl))l{77>l01r1WrQqY1%=4gu7!=eCQjE#AvL__PN3FnJ}<;RpYzNvo3 z8ag+4prA)Q`go{(Y1d>C!&hxQo>TWU5h39=Sd{{$p*034`ostT-0}??J}`( z<;n6f(pi{C0kc(dr;&W>lb46_RW$lHF3IExx7F$Ox{1B0We+i=cW&cw>Px_Wz#I@QF8sWZqCV*3nn>9 z(~IB}7n=d?2;i%lWg6{0Bl-a61N$;qi8!UnRwJ2UYcR99Nsd7sa`t^}tCbVL{GGmf z*V)`dTTgq|I3RX#NZ?>%9bjO*4J2CaHRQnp=>yJf@w5*$YMTtlUa3c0kP?$4M-r4_ z!|*vYgCGBz|2~emn~CayIOBf3tDV`Z)_U1|oWt`V!Cwf$wGu(V@}h0RqXc}B(LB)> z8-|pre5n5ROXLz8*I$+QmGhwxCBhq5Nbmt$ju*HbKIR^v`mAKS&9)k>`YG zs)sLR?QE@DeyHW7o10;@Ympi-0u~(s5ffYQ{z=vJdjJ&>RC*bA=YHa7szG%CqGxKa zN$FP5lv?~OzE9p&%ljXD<;Vn5)N|&id`P4*8^NN@cSE9PofJY; z+0Zgi5>tX%`rQYc%LCu0+IZ#+qq?#RTay)%(fr&%ev|p4FV*I&IEd!`9eG2EvAR4>dz` z16|@l<6IsSHv`YdUT2@`2@+#Fk%FLY10*wmYqC_jVH#}d7&53Y(&@e#GKj2(d~Li? z5D1W8uX8`OLB1y^G8ReP*fdRrjMgL2Ov8{V-qOe>C1*Mr*vQJlG9Q19-J#?=Evv_00{@iyS<^gIIa^%?7 z>}?=<*#g|^H>jC%ltEpkd+I^X%@?eS zO2kdocH~gwg&E6_wiVgQ2TUS^6=$G>m`d-Unq>VaU12`cJxRlaN=CeXu3~<@jry{R zG48EKch%tpH-^k4b%%mq8lJW90q)xr8%u|pl(hP5NJW+2wd2pv>X-APrjH3AW&4~$ zN{~`ZlVhT!wt50&ZWA?*C*E{~#VqgqKEnjq02}}|92|W3d9hN5@Gk}ge%6Sb37S|tK^8-Dc3)$E7KrU|Lc z;KKW*8RKy(b8HHClx(B84FEQ5*u`K2m>5H4ulb6HC1j%_ z=DiCLh#>%>DuzHH24aAi6?J6IuunZnSoR@0!h%z=JP{HBfm#B#wgCG*`@mWR150X> zN=u;hSh%Y#wF`|votx+`*aNH%26!;#@oFFTk7;fr6jz^T-{3Cpli#jpmTFVbI&GuR z7G54|6u01@#2AP`3=pdzh6ph*LkG*FHUK#2qk##dv(n?r;so+ay}llU@swLi7p<`) zk9pWNi=Qm;DZKVL|1#^`PP$NbHVJ>+7}eJ(baKnC9!lmcX#bJAph70Nn%kAg>ch?N{Y3UxQV6s5q~c!=p*+&w?WuEu zrEW*;^4@AFySr@GrdbQ`R-B*b#{qu7hl`vf@d z)tKGxCw0l!$Q@FBM#>Z_XbU=@0t)3nuXc@XJbIb3t@A+-zqM@D{sFkX4yjle%nfI@ z4+74o&#FSN;_@s@o%%jE*=OL6ChXnsY7c_D09_|@;8y||fD1G|o+geDPry@i75IrNuHMfAn6{+LJ~KjApP>*^5WtTVpycvs^faIkI2PS`YT-JOzb z6G{h^FESftH3zuiv4&W`Z@k9WsFt=NBWyW>@1F>>bQ4+H{lCV|g0|p~Fku#H-$QV9 zwJFce0@}aCEIoMO9Q&uH*&(4AK^R9nvl%d>9r_shF@BZ^)~yJE9knw+sn@V{r%QJR zQ+6-QM(xnPe3$h)v#I-h+>pC2|2?NA<{NCUOBkfRXHxSU zKQLLjAd~ciA5Xo%Tt9oo1lV&2fI)zfT0t;OyeRdLR0Bz0F`s%PWq;BbU|&PsY7jtH z?Yw_=gQnkL0<^Q++*c2beBW#fPG0~DAKVZ#lUh8peQgT_0^S2itB8*=K){QntCU(U zKSp}ZY`q5dXWOq-BevTCSjt_TT`U4Ub!#h#AzsbJhOE(z(mc{z-(WlAoQt02mDzz` zI`ESn3I&6d948acO>@!K*Ba_Alz$wP@gh74$yhKX-DP_toO(8xtuCdfs|vd_GXbn| zGh$>GZOo|VV^7UQ=KBrslDu7=@^ir>=|l$8zrk$vK(ba9^^Q0VaB6_i$1`JZhL1tP z>$mWl{BE9&)AF17ojeX^O8Rhw)Be@Q2@vYxO%=hg!$H6p$|KHj@8{rJP0Y}DK)_PNz&Sf~D2e3=5NS>knYqU_OQCB$-IJIc3Yj3sM;Q z<_CAq4b(gG0PqGt=r0mTT{+tpKcU<)I7&iEMc^f9ih4h-s9G1|7ZBosN$EgP@ z&(6LInos}~MiWgxyJ$H9!=lG#?PrI2U&1frcQ6rN(}N;2)3d?sbT|hUJ1vaX#Is;N z9q3z5%oIa-XO=n{PsA7TCj0^tCh0TnKd7# zF8EjFJD-y9`Mfu;1SaV%rf{?+8x4jRu3t`g0SY4p80_(4oVq`-uktxq$$UiX?Wed)@EJ};NFN=xqgotdSe z9%yd`PE$!~e^$4Dv;VF!{x{nn98ALSS}P_rO(z3j&|F>)F$5x)26nQe_*MK&?p3eH z`TsC2OuR_Cik<{306{IF<;Dfre(b6Lh@;e^Ll}@i5o(B4XLtb$#Tmsor7>ItU|_Wi zfw_KVBD`_n6~n85lOT2nbO&??bbE{4L3dD6HC~#k8C5k~@ymGo8T(C86JnyLlU&#=v?f9WbJ{X#JU7fC{UlfXQpMs{+IFLTFm zSqM7{;yQK==AWKZaizzF^j8t+M{rURdN!D)rg3n+b~`uo0o4L^j5G@_ejp)szQV`h zQP{D}FQEVE_vIC?k0I$S#sO|r1XBf??NFp7IA^HYVTS-nfZoIizUK!LRrrH1A6^o4 z5S8(wf~Y9T`o-}gXw`Duc~`b5=}HzXn?<+i7S>hc4(}G-0_a#Q#~lm0N*ilBSabz1 zO{E=+&f`iJT|uQx5rx%Vf(q)Ls8lJ!co9YVyFgG7KoJ!MQKYobcbHu4J21f{1yTJ1 z-k6h<=>2>6^^}qRY7OA^tc?BW(Tx*e0G>4sr8UOv5MqI`CQvlSkAFZx;&}1vK5Jk{ zP@L%h{?A$N(LMWD3|;2!I3;G9?&43#Iv}C!?3C$nyqp@eu^ge&42Ao><!P(Wir1egc>g{Iuk57g`(si$|OE-l-pMGI&_%P1|N zkrFMSWeZxg?1)V^VbhFks$~;4flUpl-vTs39tyn9c;M4XNpf3;3xDX94nh4dEz zW+PG?FnKxMm@US+diK`c7r^M=zMNBBww%&>YKXTm@TsBEq}=ll&>|TWucX5Y`> z$?Yu^wZj&cz=UQBV_0441}5NuiXH9%ho@*RVud0;2zD;cN>fP1Fz{BB;W0DJN)0o> zd@5!bh6!MTi6K+KOfgBr0K)*oz#E3u4xgGCn5ANv0VXC;nkjJh??6lpF-%QrhnWP{ z=!Qux8IY2ySG`x~6m;RR7bK?$@aosltuVt|dWW6d=4&-z+}#6M+IelAF-dyPJNf0Y z8#kysV$4RDgFeerhv!r-~mIt;XN3XW||rx1}1n>48eS;9Zt~3+w3z8Z=u%a+{}e{;hecz_2yjO z8R|rpn$2ZfX?h1tVD&Yq4<{L+UQTW-S2WGfTwf~Ed!u#@GUbGl$$`A^(aa7eKeWyc zXV)KH*IU?DXYV%rPPw#?5`@V~Hcd{y1$v{+x+?&gF@?^lWiu|hu{bF`JBo?Ok+}ao z2omQ8mjVa|AcCv_5QkWad0-%lz1opVnra|Usc&LlL8MRtLq!x5%sWJ!2TMZ~FW#$2 z5^*9RVggw9R;ZasDntRNYGOidG?B@<)8Q|{o6^6@_(rWeLdRdneow`~TfTPwBR){` z@NdW&u2Ax|8+}M;omc?F0ortv!lv=UCae-*vJVb0G&=$U0!)Cai`M~$WC06Hb7^`0 z_}AbZql3?^_Tf#8vrG`dMBs;k2x7p@+`I~!M;$(Uf^X5b) zEi=W7M6p-IdxRK}K4yk@-dNSd3`M-8l`T?GSkJ2g3h4SUUjEZEz&_79=f^l^!`^~x6w;n#- z#FbzQ5NoKaOugE;(oIHARHUtHcD4gtJ)t;}Xh_Ic|8fZB!?`r79<=5!=cA~BVo0_Z z*h+%p^>g^S{5W=e729UK>vN`lS7q23Mf%9T3C?XmMA;n@r5KKs0Q1d~9Vk(hSG1SD1c*TS?&4&Z7)OWoEW?D<; z!=7}cfCPwmtrfh5&*1k_beSi`g#&&q4V#ko%Tsw{(ob!{2_t~X#9(Ps?VLDQ*-G!w z7AH`}cpdhd3u>BTPA&O$JU{HwKvEDaPrZn{%Z1mKkn^iqF>6AOe{Pztu$fpw071kw zy3pHlkp+*xBdOlv+$RAOlT=LndvTdIjoy+Lh-s-IfQo|8Hd*j~m%as}hKL850!cAv z+GFP*&tgu*LI5?yL`=4vQwOd+e6pK7Ig^uhhi8JNUkte4>ym)~eT^@!1*s+z-w|UF zE{+1VYG6`DrPBs?eKSr4MNyYR7!~})yd1lC!O}@C95C_Pj=12k@|(=c8wYD~+^={6 zdIR6AfOwYq;54liByPuRwiOYc@`C{sYnoYRmQovpQ9u-ZlDuUC|8+Q0ZIFn2{`ZsZo?U#D{5(ARs zvU8&iXTYbH0JPxXOS~($zKG$Z;1-+yUbY0p46XZy%$`+3%`5H9Zg<+}Xx93vU_MZb zJ`a2eMhsD)mfey6LQ3;67dM#kssN4hPfpqBR&jBIA4zjbFR>JBMk~cmDw;U12f;a3 zZ`C_elOW}41>nqK)@$XjLV@sqKC1-S8}Z zm7zN}CqB=!(6B)pt#UW-8?~J$;qgjpY_rse#yRCJ6g%v$`;}n9-F2PezcinWIwy-= zWJn9%@9VAL5_h>o({*M%S{;TfDL7*U2A;3QDJgpe8WUGcv($c9-Dd}IqEWp3b|VH- zJc{^>x%u2(=lhLJ0Nt)Qhlxq8C@#1h3?TNaPO;KFLricgn5?^ai*L=}cD0vf7!4n~ zL&)lQi>h=JUBf?@(UJ`Fnu=yhUBCwh!%j;X4%-2AIlXc*CubcIx~n(+s_L}Grmq8a zuQegVb#7M2T>@)C`Gvby zj^e|ql2xnO8r`ZMa?914CnJChtrstO`x%zxg_t1#c*Sd4CIO8wJe2BIhKYcIiC2WQ zOY~EdCPz*5}vdxj3ndQHc}Y?d#HvwT{lBdu|GAg(R4- zWJGR`z$zUX_z)^+p%T@Z`>MYJ+A=&DgOI8zd(!0|OAMZv-=-gHxwxY#;A5E#U1uKh z3jxn*I2RRKMsw6Wg=pRGGhQh~iRpg}j5+5V4={MTFc>|Y(`ScHjSpEyN@E|7XogZELYg9_c}B#jHn;rgrY>k8FcHxV8#1b*2v0;m zTDYRdd^D{>smDQ1CASz_#16ZTiFtn_Mv`Jq1Lwf^%^5Uyv7@lq80sbIY zaf!oEV1YRnsNV-OF~LY(Y#ilD5kiXLO>NHl>JKO^9N8|>;uQH2H_B^jUIFvr?#~4y z26EwB?t&tumpW>gnA(jnwbkC(%u+ATP?;CTO09s4%ZrxNkl4AofMH|@1SG8ciR9$c zIx%a&vT~Ry%dr}{xGsxl<u$cPc@=PjO_H^@0Oh~lQblca4LX)jAvee1E$@@j9CiR~+1B2l#XiJv&;W{r^F;z@8 z_v@fLB~o)Zmlo#2*rQ5Qz(aAH(RKEp943T`uLUhyEq;FW8V&19*|~oqm1X_*Jz0=a z$rBQcY}?M5f}5Ed^{%i|&tcOpvuNQ4Lz*iiwbeXFKP!br6w-!6n$z(uVCbkL_MK-^ z7)6eHsoSW>j2{a95I-)?Fw`YSmB0R7fCxY+C>}Djb912b=3V>G(z?Y2BokNIqHUXl zcg!iu`Cx9_W2sGxNLz5BGoDrj`P2l7rd~=>`xJg zHV`mqe3OUlap=I~n^Dw)CwQFLAqprfp&>tbj@!&5ijcM4Y{PD2NLm8&-4DF)Bkt0u z|2i7)tLv$Gjh$8AU~a5;|6X% zX6**Ovds!9%<}SCTEj$^H&iN3(>UdBKS>Mh`PcKXq1Bt~$NGM9QR7OLq>3Oi+Xj_- zrdQ&XmI`c}^0UAF8*F5Mvt?J1r1sp=wMq`ojoGOEj^`0ATump3{x|H>rp>aLZp}E2 zfM5^wi!N2-3hEraQ@u#Y0~oMB`FLvaf!6w38(c@}Zv7}{P}3bx?9L!Ma@$+(I;;U6 zs$!ZPcBy)_9)Y^tBu#r&wt-wMw=E$aFyP<~3w&{vum~QiUW=Va47}Q9_90b|m>zS( zS?T-J>%X(W3QLW%6m0%fWV%~eGx@qnTHeOK&`;) zJ}^PpMgdJUFaZt}$6UL`gi`03OV4JhE<^Vslrn~XjABfaCMlT6^FTiYZ?dJt*ZGBE z*y)(#AXEUVJ?pzMWnAQ*wSZXPTP=UpJLbSWI&D}%fkAg@aq#-}2172-rUym+oB#QX zPK52=lQ1EL7yg~0Zae@+d7}hnMB8=MiDqM3ED(d5r@5Fq^O~xKDVB{_T!NT0s023!_rPb{g7aW{l zuhlj7H1p{mTfAV)=g9ybHkqA9p{ws6OxSrl^Oo`s$jC&1l#Vi|o~L@!>g8R^RWn=o z!ej1wJshS2>mA$RuRKc#^1W&@;1Avx>K4PVh!bl5KF?E$11R%a3y5;>bJ4)sYBbGA zu04h|I@7h|GLP<2R%ck@MyIj;$YO?al0_Y049otmy9gsk!v1Q+vq6y`+|vKy5xoDMa>vA?d`@yF=;1bv9ceR+mHQwI=E`WgxFFyt=F-(%v7)NN+!kterfyW^GA zl^3?TKj1xMW>S+{08ITL@bP-P9{N3E-4>uJ(_Uh_MDM`^u1@07NAI$J1GeIxlqG4w zGJ9O`vaggI)I(`;?OC_H=XPpYzvJ$5|K+ce?*$X7TI|9Rz{6J(z>BzXIpA8iX~l#3 z-*j2So221mGZ&xi{vNPU!werr^_&%$zf-42bTuS3LmKXL$N}fMlel%V zmc#6GG>`rvnqxbfQiB_beYvs+Em)z2KKf z?PBRBmwn8!H?yJ!jJTR~@%}zLsgU!+NS82bRm5LmPF^ieL4;t6dI_eR8Q=b03Q2+m zvlz@o$ivbAbE_V?ry#YUhMj#+Q>2Dc%y~phZNrD(bcj#vc(aEeYrbq_0+V5C@y=^q z=yok!?0uavW@73(U-*7)U63^dS1Mwnc*FijnY#{18AgpeAWxQgMrUy0=c$RMo8qUtq`zmVROYUyr(4B?oU8n-u|Jjdu=` z#xnzlnIPK&hhm{AD-9S2i=AsDpoU@Ds2JH?OsHnxhgrhBa|i}vAlWKYAjZUmS~gk1 zFdAp^R?Ub`f_$D%|FkjyIM7whbiS*+noe#erl#J!nkpt!z)WQ1H8yBR>NB6S3NJq0#3Tqj zn2c!YZ1CsJB-NMTLwZ@+N*RU;YlHU|JWlCyvQ3%bk@FI>ObOlQa)E-~7_c(82BHX> zCy0An1Tv|P|E@eAk-{Ch=ZVEh0smK=g)lbjr#g=Kk`;^&LmGn zPykheia-Dm0fbb|R(K$_SxDPtgE!@nlANK7F839-gcm_jYNuBhY=@Q=zqk{KG#@t6wr2*O$TH~|cwJ68vLAVN(o-GspBh0#74d^9v)YKq=Duz?kTW(VCCz?h2=EfzqT`}g1uocW#)x9WdE0 z2h`Q5i-dtH$7aC1q0s!cr)D6Np+s2Vtf43-E;=jZbe48ky-hsJeReUYR1>w$t9TSI z_+Xd|ggHqkjliFr%8!bE?yFf7pyt5m0X!(#u3FB~cwC9?a6YJ%d4%K<)K!&;a#rQ6 zqoIWOtWbb@cfyD<^_o}Pz!1O$N;Ad8NC$nJcfne4lMR;K$O-vUlzeyURbZ!As?y-qX(V z&T*<9TGMvwImD^V1fah{>sJEnj3rj z4km1?Fk@tEK`q*8w~v6E4={4+I}tU3$ytPHo{0+~r5O-SiNn5EV#CbtoPw!M#oFda ze(ilqGqKcf8R?0gu^#Rc9ouawnh|t{N(*qvGC!y&WmyPLFwP=HFoMIpsKqwr&2)v7 zxTHtCs7I!fHJ5XS^9JM0r12JV#CzL9E!$&o+QAb>HVFw`*47#Z%olnbjl1E|8gLeB z4mB5?28WuT@+L=8KNJP?!F+H`1)nrY1tweRDwRxvXV#u21e=4u0GzhlVnJ%Q+NK<; z;G-~qC{)p|EfJNl>RHB|ibM#N@fB-3}&I??fw(|-w zKro>;8<=6W!3@JtyV(uTcgypP4wlUnO~EiQvziPrKZTsfW8eZ{0w#t4hL~Vtf?@u< z!JJ`MlX)=AYBDf0aVp+K#H$y<1T!%auV4arkynatSM5b>1rm22ud_kE$?#EF`?;4L^c5d`yU=pF~orr#gT2?kEY8D>GV@t|lBL5OpH zU^4F8HL1aioc+%YEGhe;++TlB5|0W(r0P_t)g=#aG7QxlIw&I-vot+2&4Q$`(5qYUUAzqJ!@{VxXkJ*)g;>jnf2O!w0D) z%p7V97-nUJ{Sm#m~p1F4)$Ydj8}$`;~k+lsC$6x(VXJ8L^dCGrUeA zlLm+MfPF4~pI%^QeKZX-nmnR;E~yGr!6&X+UylDWrYY5%j9j?|6`&$7*hCVN7oW9; zOqxHa0lD`kH`l`o2qv`$#PY=ZTJnlVvb~B&gvK0DpjaCCee*}{bL=D5Tko8YIPs9& zYaynFr6EeTNwVR&7HuYgNK6zA7w~`T{nLs-P>Z9FugPf5-_uULC=euws07@ucN+}! z+xRxO-DQVa%=7N|ap7L0XOYrOUHgiIfqjl{_)S4z^@aET^M^!1OlqM^hh9-)DC~Tn zF$t*e(*TOG8k+9JdhuJ!nN0A)a$zvy-Ac*R`2xi`sYK zKG*1~$&&|mC?FY!T^F64vYetZM{}Rf=0m^vS36?vn+!3tPzzPJ-=d1Rb36EzT)UU3 zq#;ToMliszsb-37O)UeR&*`q2xYVe`oga6m4u%zBCst68z;@mq)IX4bxx{6!fBc{v zNr9~}sZECX20gl1YL=Sedl!lWZ2p<$*`JPCi#TzA`Psep;lW!FYc13MImVN!mrLvGRd&bcO(uq+!H zhV`pA@2ze!imtS7S5C-|t~+u}Jg9Mx-Dkc&Uj1Q=?0U1+=W;4cag)M%2On@Jx;M*d z#|iC{;!`sy8bpj@CdgDX0sLu+1?MrVG!t*@5e0*R3xE|J*PEIKgVOB-TwP)zC3L;{ zfrZ$HT=(Wtf)&I1iY(&6m1cd2AL$5WG^90LUxfq>Y&E zen}dZwDr*hWHs4%uf_N4i3pMw_&9kb`i>+9(_?O0XX&fXr#_o5i2Ne8=CEw*9hNx4GH+*LhuLytRQSQ&3F)j>YKGokB21;wdn(@vP$w zXoFWoeQ>>rH15u6Kt-uQTRiGuq^6w2mi+*v$tKfndV$Ms2lU7p*Z3Tmh?YAaRea?H zadrh-Xr6+toB z^r|+TtAOg*6S9gvXunbWVi3R>gFcm_WsSc;?0xAJV85T z;{}>Ip%%^4Zg6XB*l8(bk#PeM#MCC?wmrwv%`kN`^t4bSg^pGFg=BpEe)R6frU$B{ zn0|4KEWon5F}D5nX1g+l{2fh?8`_YTThG+L>VU@N44mN>8(#FH8yv!cH}?u=Vq!|_ zCMhDtVhYz@ZiP`T;4C1_{f}pn3vCXgyouE1qOW&-9Tuk`)Up{{ zA(R_I=Dy*hUiMz^mBdM-D+SXn_oc!?8A#cm2x~8QoeCw;oK5NmlW5fZ^Cf-$@A_>* z(g$i$Oo)gJFSqCj(>_#?OO^Ss%1>h0U}$dmAWX*EblYV_V-{N)$0n;E*SMquGp$|3 zgv>NKp-B@0iIF?*e)r)+wwvLeM>N@}4ZAK6IoscBG^w_}$c+{!!GSk-3S#2bBn`~e zyvt?nzjDCP_B)-vk(m+#m@$5UGae=&Q`xvb0(a;+o#5(dbPEt$aCZ~*1{xB}_G`AP zv^$~+AfEZc*wJ{m79|bHYWvQ2g%iejL7hsiV0a+wu^wIi!AEs!)af_3EqaWP0J*zf ze*)vbl>+&Qxij2e_gVZZ(?Fn--G_k2j`jzQJ??YOgHo#|iUCWEuqYT235NPGeBTlo znLA*~2z5SDa7@>xyIj6=TN)CW5ZaVU%X;kIipQv5SZrBc;*egkEu(YwbvYZ4(-0sRj)lEc&|dO ziVy?E{}N`Lx%-k^?%MuZznoh@L>W5{;|G-?pn-~mI`sT|re`cLh8&3qzUbN`6C7Wd zh-yj95r75FV!#p8E__A(*RL@#P4f$ij~0B0aE#)B)w>mN!4Fh|_UEZvEOE>J_7WNR zBv(d25FG1ocf!ra-N_=V8r}NK-=mitLBON9f9oVvT>V0uHfUS!6h|N#=Zqb+ ze~Z4uwqACW<%b?5`B=Sxare7#xSENHA!(d_PZut^BlRk}aL!#10t^I%#u)j42~)u* zjV@wCNbUZ#Sfl+xEnMZ{OL1te0efl%2OgzcaPkFC;q025F9 z@;z31qR;smAG%Rx|L?rqJVvdH^5mJ;6|t+&hweQ3y60~hJt<9RXyQqL@lS*St2{$` za`pWfI;304uzvd6_n^-zZc0-3tj%tKh^smH@q?B5><^EYZqVm z?_ex9aIv3$^AYVN`Sh9p>U3l79TE)7?MT@D1|=QqExALH1C+vC9Nb}}o3`Y$p)IH8 z(c9CkQJe+fO+^}x>M`Ae+MSEGZSM6wE|`$E*Y&Sn++=L)>)zVOf`~xCgz|PoBoL=! zQ^CYnSNRsC*AdP;HgSSBVN?k}Oq#CB%hw_*awU4dfq8hic}pKxn9gjQ=*jGWx@6d3 zK1j*t*YgwyBFqbl+lI!yS#Fqz0aDSg#pRFRxJfmul~O0sN$y#MaG_)!Oc{9TAGkz| z<}ewvZu@i(3xzLjJ7Z;^bp;gO`WAJDT5=PX8RdkU7h%LQ3odcMNTgQGsrXH2Y*0C7 zngxDBPCRtB35atG<3ij1r@(Y~eR!R*f5*Lb2m}0T4d3Bi-}ln?u1+Q&jnL}WLEC3&?|IY^xzKW6(kiq zlm6CqAJn}6=;z5>zvE2&cNwXRW8@|%iUc#63bu{TW570{c1>wQ)JhI|$&M;v>k6@+ zQae9@3?|ViSvW*u?>OivV?vbfg>5f-mklnwOZ&ZU^5o5_ z&-Hh0OC1Ub{=OHv?#$owmPC z)=}!id1*K7zy+>#-vPPCe_`BD!jH~t+!9rmY{z{r)!vU&o0*Vi6yIjio6o=|$C-Hf zvyz5EWhF}<1Ylz60UdGTNwYx8I!c?JP(cy6agHmjV&_q4HRyC!i}zT|G8IAB8nExT;8AmEhcV>NBz)^uCtd9n^`JU zGp3v7JE!SfYi8c?#TUH);tlDVA5F&5_qj+;TZUjtr$yIihkj#DnYnM+zSZ27+<6u% z+AlW{TC>W?r#)=DwHCVOrRUpJtn$A1yV^tgB8bzQ;|a-)&$Z*=t;6E%a0vDtmdyS6 z_YABHT=+TaXI_~Z>NO3E5{FI)hi!*fFwzcD-`gFu-r-puR7ej@>0I!uG7Cl~ggS@X z-MF$7Ks}?}kaBQY>%}9?7y1dAZ~ZnyKTD^#P^M)AI*b|h!t{z%YspJUj>r7@t1!nl zHyb^SMrFj_6|cN#GoJ_;7S>~FCO)un@lJ+i72-uw!+NbZ<9cOQO9F;k*-d9#cbXZ1 z-4?&qzC2)I6gz5nR{J~Z_WWO7@w&4;>YkO3+-rms;`)}J|5-)Z=yilku@Xdc1G&&2e8AWR_lr3WWxEKsavsBM#Wbt0H1a%M~R}GfR4BQcIwM z!`!jV+OjGXZ=Bfl0tJV(@MGr$KQM_#>GceP`7+7||)Sa7T6?SBV=D2SOjRZ=glVd4XL)T>{Ev@<;+ zs#niNhT2w4^{@P4zKWkT$l!WvzyML1l&BRSpdlE$ZEHp=p-C8Z;p4c)`2$QH7WXAp zONwX>Wm4;kivEnSAoJJxPhRjk*CPc?Nc0g*5NBrdZ@Y^Z%nV5wcuPYbD6s^rR_63( zAlahHajGYPMfENVQS4Da*YcAwF$s&EEa81X>3;5=4*kh`^1^>#on%0$W+Cz+fLCWC zUcl7H&P4G3`ZM2gWRp`v@ObjPdM_3bF;g=&Nd-(I3u35|X=x$eYve*e5Si*V86kEA z(!ji$nXTHy+OlCg16zZh(N+LgxbSSNvCI{LEAtXf+pRcU$T4rLJd3s5T%DknooCgR zDS+7bbu-B`sWnm5J=H%lQXf~+8l2$C(ai@YF$svwAv_6jCKc6P0- zAc~-QNG#4)-AY36CO$?GYymJK-~u3)d+D{;{)zT4^qT1tC?G0`ipYOU1O=sjN~oqx z!PI>5-hhc3!9)V7n*jD*ebRtM`T!;-O4=f;ipGivFqJDC+w?L49-$2H=ZDuP|4?2R zUjOXu;Mt@u%lI&mlynY*B}SFdHo-;aPB>8O#lE@7%i&ix$R96RCq5Bwo*5<+zIHj5 zS{;0Q%c@w{!2a!KX483X-gh$lJAH}6enT~5RB)vMXe{V3TwAMSlS7C#YKWZGDQ*at!-X^m0P6z6=KvB%^i- z%Boa1+9l+Z=_^_9@p}a*i*%0*8G@XQu_oEv++^NUT*6GfnQ&HKsYNmGO*wNFB|UdC z0nKPvH+O65Pt@#Ea}+eC-X2p29rFaxQnH;X?gA?fMx`+&tiC#4xBVqni1?WgTy-8=Ujg(2}yLIh$YRl~>!x54_mQOOg4a5V~YK5I64)y zm;;gdY+sCf5Y#jKH)#54srU3XncfeBkX z6Don*?hb_4!;m44gLea}8t1cMz6BC;zIssafQC8TGt|)57&oj4nzwUchn+N3F?$t` z$y}5ciIlRFP|zaLHy(^T4&+u744u1Yi4DLR5(Wrb*LgP|D&!F7tr`cdDx(_5{g>E5 zWnfSQTT29xWv#{oI~Mb`7gR#V4O>ZI%Sm`YW_dtINie7E&N5yB(^${hd* zp){{5)>jE3xhrPW(sn9lG>7fd9GX{uMVsKOa|8PFLGLMpH7EdWZ;#+ zK$Zn5j3Uj(%F}^GPVkxp*IObys2%;$+yn6<6myU2dV1b-1gu5{(Z3#U?B<{PQ5PW)3D`cW1;ol?7-!E(h)Me zGA7`*PouBvsR;7PXg5OzC4@xOwC10gU56i?dE$E=CofwR?$x!=+57%uF8vLoGs%_He{OZ2Y?_d2Z=g#it zWPj`0?h7K4vhUvF9NLGho$Xoc< zw|HRHlBV}Mn;Ftfp9`DaRQd*=$*`(L#M*jm9#lwj#6_UT z2!?rg=5)|7eZ)}4lVzjG_RW2>SnR-8Ui^uFxlMY+d0na;M|;6v9;V~Bs0Z*RG%=V2 zO`Cv7k;pU>MMj`>?vKHDD5|0XGp7Y;)mWQvH-Nza9I65aj4@y#U@gVTOBAg|jgQC1 zQL_|bdv#U=69#XBhTC9w2KlQHsYwf>>DTPJ{n3l={#Us@q|WHl82elM)1&tP7F$+Q z$gM_1p*7$*&a`O`O7Ff`a=B^2=HS_?uyqF@ZOh1Ez>ISN20YuU;OWrRH!@HYCLk;Y zVeGzjFd#6u7&wa9g2Tsm^^5q{|_5Ys+Z((Rl8nhw&WSLwYRC@u>c+}0F;<|<0uJuMkh zJe&--t`dSV#39QI>b&3cx$i9a{U z;M)+f%8xf%hHsI*WGV~GyZxnxcq0-{5=z%J3~8hoBE3rT zgO>R(F@KYA-&}ChX3al8>azbYXURqZoU7_1#Zbz)Q6-N|7B_)<2e_??Q!V$EBnq8AEFrtfyS- zZP0b7vO}IU8Ev$22ZnIr>-`cxo*f3>7W!vb-}mbWd(Z!IoNxV}anM~ucEk|DVM;Vq zQG!*v2+*VWLYD`QaBRCC1p)(#Y#{h^XuuUN-$OFgj3^*LLy)VGBfQitdFn5F=6Ukv zcT0`@wA<)F%0F1aGaPa zxTSy0sP_b9UD6;(0Id%Ig=O{usHGL50Wok0UbDxOmD5A@tHX)f0xOET0qP1J3iZjh zVM7=akm}szD?Q*7eiW@({@(1e7yG>=9`!@YSy#9BRZ9SkF^$3899^B5fCAi7KA=L0 z$+Cw{o?$}UV#dZ8CslsyPlG^hOITW9wR&GI4Qg_F$n{(DC9T~JHDdb>d{3SD-(j|DLEKpOrLWM57&}p{bFj{aE07tR$v4yz@ z2ANS_qvrsiR4iuO409dHCT4Dr=3H*Q1qfW`kV_tXfxt)yKtoxU zZ36_|l@bNDc=y>hqxB`8jTgCkVbhtjXm;5bZP1?TS2fEiC`$43L&-is0Dhns@f5(X zZ!f@);o?AnZ;d z^PGKeiXpyRA6)f-f8YgynE=cWJPGh6ya$vCKoMLTNO`a{Kv1?MA-n5El3))Hy<*)~!Ci4vi+Gaw>RQMRFUG>XNy4g!`wlG-& zPS>O}cFe7qR;ghK%(fXbEgYO-r%|4Cu(X?q6bulJxL-WDuLC*JTrOjdHcB9_C_t5E||{Gau>=d zl?{X-Oi5D!D&ityYK3Zwk#TvHB(2N9afvV)GA~zsuYqz$0qKfNBVr9D#Q=D)NKtPE zLu+UdoRTw;jl9LaT$K$_JlwUBHzN&B;N>BaL4;yp z3_ut>1!OXP0pJs_jW?uJteu8tQSPM|;Zo-#|J3y=AAWS!KflrOBi~%|@~h>Rac#pY z3FgDs+>BOIY@~6Ei4cGrQ9e9Syf?F{^~LQ4hhW$xIL=mmm2pJE7a3`0klhc_a6g3s z4*=50?21=QYX^-rRtX+Zsl9E4Xl#-w2!}w{yY%r!J}O^-E_3>NnRhSyTGiXllWk@Q z36A@}(pLIjBdf7#QXwgu;j0N@>iwaH(~GK@eTWV%kJt`?3Jt&kY`}4b2_yi5?J@&| z@&H&GI;3~WNCG2C!sw6&L0PTIU^@W9QNXcFu#t@bF|Rh}X#kN-N>J>YQGmrz62KS> zhyfNQ#1S5AW+X6>Elc(swV%!6G_w;eLf$$?1rJ7r-6gePwvb@T+LT8?1e6U_${i0v zo?G)(I0`nMc#cicR>GTMxBmtu&OZDYgwLWr)g)UT4gHy(B3r20in{Os#CvJtQQx3th zvX9b-?~{Ft?--^~2AP6j7)yn1yAIPyXg7+_Jjq~KzH|mkE_+R1vYGWbv)rH>lE?wm zomB2X1dlNFe`gcdDr5_J)M`|c0XCH7EKd*>5y@0l9c!>5NetT17R31I4JG=m7+aD@ zlNM((l5o79=~`JkBkWooCUV+G8`UkVB(`u8Q;`#hNKJ;feu?u z)a4DMhcSe7Pp=rQF{qi@l-w;&xYtkI)L;U0dcOC0j=B1Ca~uCMpUi*m>bVt>fh6Lb zN+2a_I3>K=1i={6yA~a*`9QyWt2fxk$dcDmt2r=@8s>_R16SXlSY#!v5OfQG11QS_ zJ{}lHis;n-0+rIZDhD8#*cceE9I)zlQ*SRnI4q?SS%|PungXJPu}JVlvI2!#oh!B^ z0&H?+YVCU|O#*?&R6L~&)sRUsE!2tZ9DJbjdWq7$0&FR!)O}v{T=$d&V$;^JM5T@W zuiKNPH?BR(45KHHm85CXOhBA@gAmK$>bh`day4;DDdO?fG2ac0;8ajJ3ZrRBc@u6f z8$}wt1>&|H(?J4h@ksq>0Zb2D{uZ|E(Zyv$fHlbt)WAM=kG3-<*itY7QRS)#Op*Zr zp8;+TuxT}rNk+g(>}@f|8g%DXMxSPkoU%(p<*~|?g}x=w?mLfbgvT|?JHWe{IIv3y zS28E0C|lRC3mjBT$ky0{>Q>y06bb3lPBd$JK!kY1m^(Jb-Km5Kazw%Hq!uSuW+XUG zItPr{HA{n-F-XA`DE|s3kZadhonCHz;{uix#J-mdWFS*omUfAlq_#QZ zx-3#G2(M964mV{TOzrKXGcJ=4tPt$?#3=s?8n(XPt}NlxNmPv#lVR%h zcm!Gh+Tzk2eZ}3saqf_&@Ow9O&!zf^GTGqz{`k^Cv>&|a;E4o5??fPx=Yx}p-oP>g z)j9Z29qf&w4r8uKw4M#Y;Q(*}fCEWYCV(;*3UE~^0l+)=4-;WP)<(EVJ43>N?evTD zRnFG|&e!LBE$5q8@__QOwOnn-cdoL4hT*7!rHHB`S~HD71MSLfb#~j#y`WMV&tW`d zquy;?8l#dU^wrsdn?tEG+YG&xVIw9Hi0DpHrWK&1h6zL4NF@Zu%KaSt4>XJ#;!bs6jHPW zp-&uu27&}(HzRpvVonZ9*M@0O5MiLCbMgQlRd-Vlp$UviVu-*EO)X9$P&ycsR4{bZ zl@Wvc&JbIyt{ZEzI-z0?d4v7)!(N^x-(wD13_+6QU;ewtu#tnk^gq6ms}Hg)^*U za4{|nB>>j|lyae5*M&<4!LX}YxVboPf`u|U(c;M|=?rrGmaa2yc(GUbDLF$>BxDHzhgAtfYUzeoXq~ObN*9n{~ ztbADuMhqt{M#o^-e}htKfKozQkT)Rvgq~L6# zDMri%#F7#f&?S&V1C|DCvXCsPmQ6NUNR|XPVIg4?U`bdKc!y2c1m0QD#Vid=!vdfY z(klfV#Po5{!f@;aIHOh%myBnn~ z=Me{63=sT@avp;1%&Y0&%Xs;-u}mZzOfxq%$pl6*ggO5{@N_Cu!M%d9d_}N;kwJ|& zB^?I3Oz|2Me+>8G3;~i#dNLJReDwwuLz<0bv3k;_ukpeqZ+QLkH@?-X|Eh3Lb{q>I z&l%QUQU~S(-_c`n;qf;63W-@QyM4}4hCc%T3^Ec-p@{13c5{#fQ0A5;p$q^nN)tvl zOg5?SZ^ru4IW+Rb&!XPPs;7$m03VqDIf>QiHeSIc0?1KJThJCX01HH}echy4LUuy7 zOl=Vr>j^%Ya)Ec>Hk46zinQQJrptnj37RNj2TCOwygz7;v;ey@cTHS7(u?}Gq6aCA zGR?dNFb+QCWXx~V$lHCGHr{riZE1}gig@NNNTS9@#0ql>sYQSzjn+i93CkrS%{dT4 z_AFvzqX0wn)Gx*XL8Fm_au-j=`YPkbQt{F5od`cogAsI|d)zlX_ZVrm zE-}zAwyD}x&?)*t> zLc4t8%YBS|7`pE?b}W$yT~e1M$%w0{f=Wi*2owNSssQ4)2p0s+-^8^@;RjFfc-H3& zU+0Y5lIHLX{|z#v^zs8TEl^zYxbI8lVLsxhR2P!5FJ8DnmwCO+-bZ z%L&lvq~~C||5PutFE9MWklio?;wxMAk)J4a!e{8zMyq<9;f}Gue>aK+(j>7-roCWK ztFXPBdGJLapR4)Bx$m0G4*54uo+2S-ksJEvL4tJ)n#n&V?Mc4Rg4?2q<83fCvG#E` zhh!f#@6XY>=Z0aX7&Bf+ z$!Jy7Fe)YryFt_Z!&NZFczm8EY|9D_;GELHSoh)N(On~!Wd5myEwgjv!k@HrWKJGA zWJ%@ox?4UrAJnqJt3Kno7p&*KxE%tO2Yu+Fe?Uoa2$=ZRJXKUD%gf3DtfZhK$p}gn zMq-h<#Xt4eOAzvagB-@L6YjX9QdFR+e*WzuMtu3ft&p(ZrRvw5+u|Z%=Q&iRR(9=F zeVj6shbdu~G!d;o0T1A`beTDIob{hpI8v#K^70CbYCAc*men#Y^V_+58`cA>Ei)pX zo1!pJQu^z?)6A14XIY_LZ=AHnbk-M4&2R9Q6k_l&ho)C=+!X1(> zlmdZC*8mjU(gZm`BBAHJB1=U@?I*+OF2gd@E^HUe+lu75X8pFy51JEAN3ihaO1zk5 zhnkk8e1bV1qCjBApHA1`QolYqWl|Gkr?%x>!7g5IDvy8bDYv8bnmQk2U^&8RiyY^g zBsp~|C)BUJ_zM^_{=9AruHo1bD=8m_H zVXr;h%b&Jg@}w|jRa9?IkW)@x3~uJCf*{lNJhz=LJ-d-%X4VrNvF=?%)OVvej}sts zlcR!WeU%4wo*UIZUwrh-B7B2p7hmOWxsFedbood7J?mL8I~y@M&s|$)ZH~Y_A@>Rl zMKX*LcI7uaz6lnlRRckQ2uZ}C6rfO=Fp@(errr1(6r?w35w#D(LAt0pI|2vJ@`PX; z1ljU3UAh{hb*i2L`vqs)2ifmi*jYlWZA_DiQ4$xqJ}HrDHkS>!QWH{BG_mJC&OyOD zB1V*CXpBVH%Acymemwc15!j!zpMPj~Xb(#4Vz>>4+qKL~&y1Er;deQczdBdEoOShe z%X09HPTDT7!w6@nAal^4&@1`sj78>5iVj6R^O@@*xUs4ID4i8yLdC4^&Zl|KKeSVQ z>fxRjz2s|sk5#@+g06#45AlhAIMyfrnD>;#NDPoLLAj`yWdL!+z#L#+k`WIu%CV~e zB0-=m&tZTlLn*`niOA4E%oT>8-&Y_UgFjopn-Z+$2e>kW45pSv5D|&(OGSaK1z9x^ zQvzn-n~j?y-5l*&Mxco*Y%_;t8vF#FL(FoZMS#bM<j7ZF7A_QKSRWc|?V zx$sw3n6R~h3|jL$jMh#)sP>Ws+91ewljFuQ2%?ceX(ady=eGC5gfPca`XCa^BqV|n zt=L6_Zrh61_-gCy2V&@}Y~Hcw#P#0J`}9#?|3@vm?|iuZ@+Eo#30ZOy54IfuPzoza z0EK`6BFP*8F({NujN}BxnOQRZ0q)l{n3Km`>*F!ddimgdea>GF*k8fwlR{F!ldom7 z95oCHdmKTPj_r6cEv-HsWP(3b03o8^*hr8*!_5^Iu4QDB;=0$8aSCnY>i-(rRIY0? zCQ-ayi+bak_t>yLtl0E=Hq*OwHI8s0@M>*qd z&cJBTalBaHcridlMMaT16j20~5(NQNKoL}`R8&x;o^UK21Hv(cV+hAgbJ9m}w+Ih! zw%`2z6C{a2Ayk0KsJF-}80d#`2)#;UwFYS%WmilKM}OmH^q7FsFMJ(?!MbTnR>jaT zX?-~y*4D}hCF%hiI^k`>N#O)%3U>-lGlk$nqN8a?!e*2skGA}oxXi!4G}F=tvejwv zZSSpLJn@fzulw&v8a?tZSyyMix6sNlN&XTtd}Z1iA{^g6DCt zLEqHdd^kojh`Gpkt)ZXo_mj6+xR2+)c3))o^H%z_#j4LP@yxxa`hR~s&nfSIm#^`K zfe-?VOMhz%UEWBx&{ta!liN5^D6TF{uGI)o0V} z=+_(8gq1a&QiaK5b5H4-*m^n=BFd1%IcyUyH0A}7!&EWiYi&*BP&{Nw&n<(B-AuE= zsu;(hjTr0hrjufgWzx83&=}%o2@ptZgh>b{AlOSd2_=NK4yMJ_ekg{V$m=o;i6*0Y zofo=RFaD$JxqEwyg}<=u*6$qlia*@nWq(BrVKTPnuvB_)TWeC-?zYY7vjCJ^wo(#E z0t70QQcR)5#`=xam|cZKItF;Leo<+FN`04`(>Ld8nNgUboirNnWvkj4yk>HgJ5>V> zh-EQdree@8#94aR$-!uY_=mKta;Ku4sG`=iD56fCtznI^vyf(< z%jKYH(7`-pPTMSP6ve$-w)RWha(AlFemcW~PTAgRPag4gUp)5b4zvj$6~DpXpcM@b8U&2C0m}(lHW0JB zL!$w_gb!ABPi47h+yA#NWc%lTo~>Q5Pv!q_P}gmFpzZ~t#9 z0LCTf@evafOMwiGF+>Xx14E$8;9GF|=&m_E5?}PCZ%pzZmMqpz#6*Xv;J8+$qelI! zk8_ zOE4Tj9$Gqm;Htu*=l<81fKY0|TBS1O#9~-EuSZse_n)0EBqoqWsB$a?dc@pTI9C)b zwNurQ`|I-ZaJT{CW*cyv`mBTLo)?cxV_w={=F?T2+02nnQcvf<2g>7 z(@agRJ$``RKQo@b2>7mrE_s~tSca=nO+<+QO~=JjNW@6>sAXUvhCp>7LICim6F@f& zQ__^QrELu$9CvB}RKkOVe3Z4@Y8_^TX`VC@R12qm2vY;VP{#o@91l*+tF@KmpcaN_ zLDMh&dCd9C)@5#)oBvI2+tu0@GL4FL$syj&p-Xn(SGvU1R_Kvy7g|PrWK1WAE#RWZ z!5gxt2^kp!3^4@|E$*6t!KjSI`6`u_5v+_>M$WH{YN;EojMU*}WrTtQ4><9xcsE*M z;5D*<8i}0$Q#ALq{Ep;y>}l+E{~b|HD{fjrqFpgL1Qz_gzAxu4of9DM2rM> z)hIb4=1Sy9Rfr+N@G$-)}986j%k52_i(Oa+LuFz|peZhDbzgS*JsH55eW)apRQm z8W{7!8i=nGUIz^TJa3nZ=_4T_iP}Bc3z14u3+86Lmgn!g#ax&1zuWS^lZG&JOie1K z-H-LVi}V>@@qN>|ylJp>%~R>qNM=h8np+K2RQ5#3$QT2W05Js0V;IE|rE$av7%>7y zwP3^uh$BYC5D66$B7_k@9C79dAchf)C>=2ZMsWnV5pbs4XJHBJGi1+t#mbmNP!_1Q z1^^L2djMw8=4vanDTa_p7rlrc+%KZPy z$48IeC7J0zKt-yQ5k>Ua)sx~}cU@EiM#jh-D2;>&0f<1XB8CVdh8SWXhFBECfLHshypEuwUxJ- zH=g$N-0dDVfATL57RsqN*W>hVu>X=9_K&3=C?Zi|4b}Cv^%x13F=Cbm%0r~?5KHP0 zG&&F><$>8+S3kzKRXT0=gKdML5K+G3RA*95Vz$DWwx18M`EIts)2thAv&Lzo-gv{T zF;iy@=@0t-Do_>BwH)F^K0H$h^BGA_uQLcEod)c_a$HA;C~=qJ<|NB#p+`&6s%}J6c4jx=el3A4|RUDXS#b_aL2_yr}p0K?=5lj-yZnt zfBDY#a`0#tZ1e_|%7A_%E2#*GoCFgUVZubGh*^d#4&QSaEQJz1yFLkQkq-Au_T3qb zn$1$eIOS~6L*2L*TFtWxqiAH`xS>-F)O8B zkAA+;NKmD{yQU-^hofvI z%ot^YPG7+>f+dCk7e}0Um!J9R)^7RUQu-2u+Ea6!;L^*3qS~Q&Y0B2&{zHSoweP_+ z$Vy#SS0}7Vq%#4Hx~DB6KmY=PP!g&_jX;1<6$n765r9Cb3IrgO1RwwbLRBaU5P(n? zAP@);AON8x%!0nnD5#3Vyy61=r~}D7RC0d!U2M>4pYF3Q7;W-cd^`?2cY3~;E{DNB zjbWAf0j9%XFyIkPOWvU0pKiL4pa+Oikh#>-j7>ERjArc}4 zVu%D{S%?r~h#>}IhyVd%hyh|CLWqPIA_O9VNFV|sh8Tz;hDeAZLWlr?2%!!?Ger;( z1E7Jh4J5*9i*&dj#(h8S%c4ErExK&UIhWJNZSn2#@ZICNkMq0;1LR)9#JO;+cj(~< z9Gy}I1G``P6A>nB6Oak2Eq`QavPyDn$HtKZ1RwyRgg~G=KmZ5?2mpZqgfapU2m}a( zDj*Pm009UP2mk>HKmZ5?2m}HI0uZVoPar+s9#j%wPLrXotH^GZk%)CCqPW@map3HOuoK*JlT7lZ~9VdWN94|86eWcSc&e z@;5&;I7M31DJkq+M?j%uO2$w~S(aj&7_+Zy>+0`X^8^i67rNzxt_uSzI!^>x^Q;)M+ z?=(0zc@UPZMib@OKNv?iBBqa=`oyk#FI%#ZCbjS=+@IcR92=85^lKhacjHuG$Y7d6 zZAqiR@EM9c3e$?^fHO?!7=LYZt+!Fp;s*xRak7`iG4pB*3^kD1VKH)P=}g0}2^Gkb zi(^LN+>+RXhLuYG5H81{mj*`H*!3n{>SCz9Z`T7+p6akFs@<@fp7lVa5Yd_QucLv=fU zbz#mfD%}D#+z0)aWCB+NppU-qBzEd8w z$N|qW&XZj+4+gXb$<2u_B!ZwYVtNLsBFE9#0a-^i2T`U#V=fS>ph%|1$C%ij2&;{V zOdLg?k{6yV1s4`1v-i&;=VyoW^*Mhz;PZ%yu23)f(nB+`UwZRrW?FZ7cW#KSY}j@i z;_cQCAZS~kl)$_=;B!btoJs+EAcWsS(%fnb;wi=0_S5-z{AGkAi2ge7C$B#O39^8& z$gx9@;ro9&JqyRhN8dTx*WX^GBToC=XD8VFe704j5R2~fe)0z=KlAyKFq1(vAF_%_ zMBxFzKqUu+fObYviGYYe44uQO6*2_~Hnlz5WWopdiTRE{tH`kCWNzN#dM4*1Wa)iw zRv6^KJ5Z^^JMS7(>RpZ6-j%2{!n-Q(YWN~u8QvCqg?Y)v+5Q}k4Wp7{YMs}ErH7KY z5#$;=!H6)MX`spqi-mWV9;3ch#>IJ$GiG=*v%SCOP{wg>q2_6@RKh`F=EQqlw!C@r zO(J?Zn8lLHgJ1YUw{yO@{H}4jeV@*~f|PRUojUpw9`DrJD`?`l>OT&8>iSJu6AS@F zh0{RYwA5Lj0uTWuuoWsIMilRv`o2}M7BK@RLQ#fqH4sUizI$lh>le!20o$Lu@2piN zvaQ#Zrl~7UZ#a!Ut#>8PSVvIV41xf&161_x5+#8^-i$6&j_)`KiF)Oh!$`Z35FsRm zl}8!u)VnzGS?*U{1?})tnzo+n`j@W!>?`f_))S(DqJ{el4Sv4Z_%=(w zi`wqGS5I&po_1~S0cphg7m-9QLlQ(T3mC46q<})F)46UAJ1c-d5CP#0z_&+jhH+kxO za4L05-@S1KfL#DdG_MeyBVTh^5SGAyS#(aG1r;djMyV3$<2+8F2^>GAAJt=W zTkdSn)OWG?&dNWucVDyctT99`mY00?-Ua^ijr{euU9Jw63xg{ck+$ zh51+mngtXn8RQ7k?S(Z$G@$`&*wP)TUONR&fVECkP@-m_ImlmqA@Drl{eP32ho=Ou z1_YTJpJl}-+%uQ~=XG7Mi@w?$FPJA)oE8SoFzHeBL^rdwB6F&a`@EoqWGybiLZvms>WD1Lc+s%NR(KI` z`|wO4qMVu5P^r~6lQSQasTqNh1Zw@3ru>RpXL+R#U9Ey&Gg(T4l@c#@_i+1zi&stY z@3x#|n|tfYaoWu?TS)csCQt$etnE+>cnMQ{{Hyb72@^$LJtsBFsXvpnrY1mdx5Np3 z%|Q0x@`uvo5wB12c+o17YSY(yo0F-PLgFY!cG`t=yaoqH&x4~eL0IKgI8_Mo58w+K zvug>IZp{EfP}LIw5KsmEtJmRXfCH1S@B-nJzB<5LS`IS<4D*IJGccb@^y5v9_HrjO}mD6IHUl}vMlKMc&+2s`jsDv5`;#5d^`!_+Olw$-#PwT7PbrBcVH|LMpwu~e9ZclNR4hRw229;K973jD$N@q@(5;fQ$PG3_8U|OOA z2Sn(sA&L<047eq@48RkG%L1Mkq04XJ=-YbHLM8O0hb+34m$Gi=n!;|nUc)`?X&dr!w@RnfAQH9JjG~?Piaj zGjqLT3B~HUPv*l6s~L_VZdp#qortnqPy0cL0-~kaB4@3nXbd6>ZhyFtp*P%5{;(I! zWcPiGpIU*o{qMQ(hpUbgixfF6!p+r-&kuruV35`zsF&+Wv6Q{iQFa&~tj`^Ott#+voMmS2s6bc%65CeELs*_~dy1R1TlD ze`2sD`g5OT@4-_BKt0J}=Ci9&o4v<;)cN_yhhh7B3h3F+9EoQ;H(%Gsz2`7q@UvT9 zY+WWlJL2f8c^z+E;i;DmwZFJtvU2zK-}krAc`klp*M6=u^HF>0i>EK0q+wW=$mDXI zVHI^v0^}Uz6cI>s0U{c;vra@sI@;lOfK#Buf4s)CgcktZCh+TjPw-}G!58ux{4!3Q zfL8;c2B+jGo{OJ`oDLC@5P}$}MCGxVHQsuIPom$?;0ZB}f}BwS z7i5pGVelvNfo%UqbXj7Q2^V=8*!d>8F5tEN@Ymwm*uym$`r7qzRzHsk zY4{wxG*~rIOJO|l@<&0wkY0ZPm<%EqR|shk+%`Xkj=O(3r{cHrCpjs42F?A;Vf&v* z{AoN3&&G()MLeI(8!+8A{kXq4QsgJ)m`N~AE;+~QvC$s;IlA1$jmKZ#^?UOV z511F*Dp{3b6q}RT&8ZwF>`XaJ8Po3{ywXEg(jl|ITPnIuG&y`eKObp#(DQ*4tpSQz z%z=C*ygRs$pd;PL|675_QX1@WEL*p1mWzke-;?B^^81{It#80{!+U_Zj^qT`wLqR_HVJACQp?-(6B@8393Vj6j;VCYme-QyA6olfT*R4<(g8 zMj#L<{Ma=(&nfwO&XaCw{ zklmY%^y!(7+W7ew^}hZKf{lw|0tivA=S6vMK95%GGURlSZy0&0n3Vr!cnXFOhhIt) zlLroIm>4OHq#+YR=&=ryk?$m^qsiq$tiEn z66;P%*PC_SvaBP(t8yQ52{cad`0Qz7j%q;kVsvcv>g|?Xcct?Q874zQxutQy}JqV6#kHA zbP;u%0mwux>X2)iy%5F|&e$)@Rc)WM<*uim@ z$x)#(#uehT*u`bG*^5fY7h*dQ1OZYI;&S43-jPoQx~{I4A3zKN#Jvl1v@aaK`O{O- zSqyzLn#V*Ny(tV|`N~lKwmbB zH0&Z6ANH)g)()qFbe;QxPv7wWFTQ>~^Y60AxRTjlNp|hc6jTl{-dfTEIp85JOJ;v(~%g!wRBcERRHou`ePkta%Jf;b9 zOu#uv>>X%7#t2&+$O)?Pm3XG><7GT$riPgM%XhsdSW|u$?QTq2j38bF@#~|r{x)77 zoQNlkuhl54SdQ-HBnvZ9MlsK_IV`<8p%S5rc~+ke7+pa3Q3QteTz~WP6=7r*P{{NT zSqEU3>A(DFd^9;IhSXDI9j4g z6O|xxmTB}6xZ7yuzvrc|b6(()-*fG=TJ-7V@xT3()2?lQv?wjXb#s6c;hKT4Mkk;Z zq^j}M^g%3-)IH;ZmHm2tGPiG7*7hpp=RTCIgb55<0k5__Im|yG3PLcdSue`kO+?EE zSu`#WReJ!>z)_r>Rn|#r89}U)*WtAA^{`-LhD-<{_$~Ke>2s;X7*Ie|v^sPzEMR(e zA^KV<9HNMchUmn(e-4Q-`z5dD@Pt4HOickb4N8STNhEfN>h@J3>6zT@LkEq9Ij?y> z=MA&>=zi>veY)&STkOE~!+hv=?v7Wklvo7lmJKu_Q-Dl3Fk)?Im|!jzlhhP%33P#Z z_DMR1N01vlCe_ZHV$%;8I9F#xyx`)B-$?7aqgpa(ByHMIFpxs3%IMd9#-B>Ri4Mhl z*1&Vhtm8G{l{8IpL5NYPPza1uh#_L)o5ysW0Px?{ zM`9aGrQ|gFL)`XOjIK+?^n3+HH@``O@(~A5|2V;{Z%#CzYOGX#?Kvnz3#Xfq!uuBv8?gvVdS$V&YWj(miN<)^`k)aDcK9i$rn2C}xQ>iLS)Ffz< zDz|AwNIFK;Y_V%z9G7|g_G}=Y|ARg*%pT<#3A zw|kY*1MP;*sl^ibft_I$EYN z-xi+z-*}?;M5`l@OKpt3o!cJjvNIK{v}vRmy6=!@?{xvrlw2qo5`@nBNg(ULZ72L) zjRFW$ybo%q#~BD_re4J9xV9c8t*d?}08hy$GvEDpyXIwSLKBn*oTL~w*T8-EJNRmC z7O2>Dwev4IE6E@K9kyokPvXQKv&>$c8=a2_je6*%-jE?>O)pRGrCZFpOQUN5ZJL0~ zpPzO2`O^D$^NYFXYO60)Sf_34ty{k&ra&MgruG<@RY%0Ik|+c8c&LJQWKED+wQFyS z?r!I}J&yHq&U#98+|3!Y{^;MkBvSBc8abwtuz%QlkM|JBMCuwNihSyIbn^2A8J$6G z7JM~OjWAIcBcMXx*|y`^S(yUWKrAD;5(5|}jTp|v-%{&rv(Te%z2k2#B6cc#eP+Jj zF_pI6U1z)TT3b8RegR95S~fW$-?`sC2HyWg!FTvT{UZAW_IXwkg`Y|VAnbndYqR&%bmaygY{#mz7w!=?w1NkNiSY!gXo2fg_-2zGET+GS(0=f;`;*&h1Bw%5yI znPKFN%qPA7f?S2$jx9Q)jE6KWrpL%-U%hW%B;dM4RCb2APK1K&rO{@w$2)2cn0T*v zz4G1Pkt>kuPvO_Rr^$;jN573*aO^K+S8h$6g$`W$rUN3L=|lE;6V_erJNLalmTY_F z2i5Zm6fcTTM1yYnzPR?WA;()O;!Q*3qe#7YJ!7pvAVi7%F%!(Uo@drbt-oU8)y4Y& z$v6?5iIjHJampm)Mrbn(1|t*M9!#t|a8i+q8gssf>k9YjfScMw9Jh2G=HgFpFZkq( z)DS5QM6eydt}>1=Q5U%qxoTkmH1ZkNgT{%#B#4Gap-r|5li@+hGr>f?S4~O%Q`jL< zdE~MtfMmj3>^1mdomNBmeizku#<+PE?(=cn>NyH;rcRVNnEG+Vo&pevES5+uxXShb z&Vq@lMQRdzMS@Vxs%t#klZ>1pO+v5zIuy z0;Xmnl-962dy|Y5wqp&^S(_Aq4Ua(<01w1agNch7yxa-QJa$)DeoBj?*ScOX+ZNf9 zFLc8TgSA;UU+&GwSXIRXnafO8DIW|F=s*EcCMck2Xp|}V7RMgJMG-MkK(y3QJRl~b zV8Mt2K>-s{XMy5gWv&~l=2e_%eZ&Ma^&$w~2h01eIYqsQSHn!bDQ1EiMy8sY*9;Vh z+N(bfUHi628RR^dLLfYX(=_OntRo;-3 zR}gcC{b)P)ekWG%b$yAQ`Obg!km5$Pm5ZQcwG-DN;H@nJ-vUr!P1l0%6rs!P)H`$L zFG9xJI(rk+5Dp?z@DOIM3x&vV9p}GanKWFmDbS=^4nFI5#}SWz=DYTxZ)IEhSv>X^ zE~p8rk|Ib|9T5udibJW8nOIPzdi-}MF?M8UM}p3djL?!{MVZ1KGda99fgpBvD!B|P zX%cO*%%mx+(~Ml8Xve!rRIQ_Z;HJmM_43#K<70mKSNFYmeq>qNhiGrIGrrX;xL){S(lqxn zemLvSLr4DNX+H6TmJ~EL1AE#|vQ691Y=YJRuyihy0Vt{+(R*3n;;aWcx~E-?K-gNt zvaoxKgc6dd;S_3&8SU#F1?=;~C%f)UTVmev_3DFj(Yg8jUp~q>{+vaKJhJKb3uOc) zkf?5m1{j;1tA!z<72q`J?2PhlB1o9vXcrIF+mp5Hj3zLZ8a3TSv;ENa{tCN8XKvJK zZWaAE=G`9)Z)%IQ@&s>jPh2}P?Kzb&5soauO*%D}Sb*~4CbUYBK)9EV%DqGwPXa=_ zxI-RVq8N!Bwq>|Mit#legUZ>SL(3sfKU9}_rrO@2_Mt7hMGLq5;#Vy_;kPxdd^ubW zF!1JNAm-SOvI#pqaA+RtLqzy_Oaos*aYPZ?$DPo6LrBYlaByb0eltd<$So%=gF4Ap zbU%=@Z&>qVEc$eH?yGaY=>GSY(99G@=5Yx{iIEf$wy=OiG{pq{Oj2di`EP6MKd&RS zomUZ=h}| zoQQ|wI*gUtygNblM5h zjsU?nI8s7VwEFv5of~ClW=VEu2A&7&>265CfTuzsK)Rb*fK={upHsRDA;MOekVA}%injhq6iL0{7`1`oD3>5f(zcXfR?KHO z+IKeD)EJv)W>Z6KYMf1A(~NAgsXm)#VACwHCO4WMj$yf>q#{y4;MZCPKG{~9MK^y_ z>*khj^B*p2ZJDr`rOfn}^pU?iEp_*zkP;lHr)N}(iD_CzWCB73qDt$ZEm1!|9hYXC zfst^7?Feqnw>wKTr?0lZb>dSd82T16LwY;2ip5Kw;4zWbjo$4Ow>7-Y+xIthbvt2Y zGY{o^&WHY2@Kb2CI;a>_yThX?7fFoawB+Q;3C=`vr~rb3Ahi5D0hCOU+Rk)G?kQ9K z6XSU0i64a_O31U6%#aw(k93dEH+ATe7TcyBB!Jr^I6}nK%0?&Mv&|&b)t&fe;Q4sTzQWk)q0h&d!Jm zC@R_>psVlpxCd=#9g9m3(N}}1U}&qYGjVrjT#wkcHE{(V3n>l%CxgQzw#e%6htD46 z2x6D$>5OEx6*}XDSP}av>rW;QBuS!#{Bxa?q(vMZhgAIFbUP# zwu2S;OpB3j9!&OU%y$&R1W_VoCb3a2$UHm=5;Z7<#seSscHs>}N7jW%4oZ6=6Adkk zNf@nRxLykg$fDL+VP{K(QVb=L+nQL$schEhFfKC{da0c1eG}P&a`^lI<~6}-pgU2b z&~6xFh+#gcayw0ocpP>Ge6V9Xec|v8a=wQ1HJopV^UZO-%K5sSzuc>kT4O4ub1Vs* z>*#Hdn!aD$>ruZt)}ARm=u?6J_~8-NK)azdGFWeHS#cmPLD#KIRS?m#`@$ybw5gt1 zYf~LI&A=wGsmi9hY^u+uS~dYTHO11!th(%1PvzWJgznD=)0Akd$1&Z`d8?u&QNM_+ z>z};a?|3T!+KFM@ZeZ6i^03w8(ux%8zym>*mUf7U733PlwLPR!a&e|%Lwl>J2=~go zb-T<|V5~WW4zjM|wB3MlRgkNmQA!l!8OquM0az&t@UViOph&F@9>f)?$sE(Bh&QNA zd!S#lM5z!eKtw=M)iwi*h@m<-+eYf07@*@=fgkf?)pip`5W*(Prb~c9O{&EsL;;9` z3T=i&LOlQw1yBr#t+2x|0CB4s7>@=n3V?t>j4b&7d*}ZJ|1bD|!T$^XU-18e{}=qf V;Qs~xFZh4K{|o+K@c)9l0RYS1?cM+Y literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml index 84d3d5b3..e8fb901f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,12 +31,9 @@ classifiers = [ "Topic :: Scientific/Engineering :: Artificial Intelligence", ] dependencies = [ + "einops>=0.7.0", "timm>=0.9.12", "torch>=1.13", - "torchvision>=0.14", - "einops>=0.3", - "pretrainedmodels==0.7.4", - "efficientnet-pytorch==0.7.1", ] dynamic = ["version"] @@ -50,7 +47,6 @@ style = [ tests = [ "pytest>=7.3", "pytest-cov>=4", - "six>=1.16.0" ] all = [ "torchseg[style,tests]", @@ -71,7 +67,7 @@ exclude_lines = [ [tool.isort] profile = "black" -known_first_party = ["docs", "tests", "torchseg", "train"] +known_first_party = ["tests", "torchseg", "train"] skip_gitignore = true color_output = true diff --git a/requirements/required.txt b/requirements/required.txt index 6d227436..4a875efb 100644 --- a/requirements/required.txt +++ b/requirements/required.txt @@ -3,8 +3,5 @@ setuptools==69.0.0 # install einops==0.7.0 -timm==0.9.2 -torch==2.1.2 -torchvision==0.16.2 -pretrainedmodels==0.7.4 -efficientnet-pytorch==0.7.1 \ No newline at end of file +timm==0.9.12 +torch==2.1.2 \ No newline at end of file diff --git a/requirements/tests.txt b/requirements/tests.txt index 5d8a41e1..1a9c5bc1 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,5 +1,3 @@ # tests pytest==7.4.4 -pytest-cov==4.1.0 -mock==5.1.0 -six==1.16.0 \ No newline at end of file +pytest-cov==4.1.0 \ No newline at end of file diff --git a/scripts/list_compatible_encoders.py b/scripts/list_compatible_encoders.py new file mode 100644 index 00000000..696cb07a --- /dev/null +++ b/scripts/list_compatible_encoders.py @@ -0,0 +1,40 @@ +import json + +import timm +from tqdm import tqdm + +if __name__ == "__main__": + # Check for models that support `features_only=True`` + works, fails = {}, [] + for model in tqdm(timm.list_models()): + try: + m = timm.create_model(model, pretrained=False, features_only=True) + works[model] = dict( + indices=m.feature_info.out_indices, + channels=m.feature_info.channels(), + reduction=m.feature_info.reduction(), + module=m.feature_info.module_name(), + ) + except RuntimeError: + fails.append(model) + + with open("encoders_features_only_supported.json", "w") as f: + json.dump(works, f, indent=2) + + # Check for models that support `get_intermediate_layers`` + intermediate_layers_support = [] + unsupported = [] + + for model in tqdm(fails): + m = timm.create_model(model, pretrained=False) + if hasattr(m, "get_intermediate_layers"): + intermediate_layers_support.append(model) + else: + unsupported.append(model) + + with open("encoders_get_intermediate_layers_supported.json", "w") as f: + json.dump(intermediate_layers_support, f, indent=2) + + # Save unsupported timm models + with open("encoders_unsupported.json", "w") as f: + json.dump(unsupported, f, indent=2) diff --git a/tests/test_models.py b/tests/test_models.py index 906c06df..106ec4c1 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,126 +1,201 @@ -import sys -from unittest import mock - import pytest import torch -# mock detection module -sys.modules["torchvision._C"] = mock.Mock() -import torchseg # noqa - - -def get_encoders(): - exclude_encoders = [ - "senet154", - "resnext101_32x16d", - "resnext101_32x32d", - "resnext101_32x48d", - ] - encoders = torchseg.encoders.get_encoder_names() - encoders = [e for e in encoders if e not in exclude_encoders] - encoders.append("tu-resnet34") # for timm universal encoder - return encoders - - -ENCODERS = get_encoders() +import torchseg +from torchseg.encoders import mix_transformer_encoders + +ENCODERS = [ + "seresnet18", + "senet154", + "mobilenetv2_035", + "mobileone_s0", + "inception_v4", + "inception_resnet_v2", + "dpn68", + "densenet121", + "resnet18", + "resnet50", + "efficientnet_b0", + "mobilenetv3_small_075", + "resnext50_32x4d", +] +VIT_ENCODERS = [ + "vit_base_patch8_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch16_384", +] +DEEPLABV3_ENCODERS = [ + "seresnet18", + "senet154", + "mobilenetv2_035", + "mobileone_s0", + "resnet18", + "resnet50", + "efficientnet_b0", + "mobilenetv3_small_075", + "resnext50_32x4d", +] +MIT_ENCODERS = list(mix_transformer_encoders.keys()) DEFAULT_ENCODER = "resnet18" +SCALE_FACTORS = {8: (4, 2, 1, 0.5, 0.25), 16: (8, 4, 2, 1, 0.5), 32: (16, 8, 4, 2, 1)} def get_sample(model_class): - if model_class in [ + if model_class in ( torchseg.Unet, torchseg.Linknet, torchseg.FPN, torchseg.PSPNet, torchseg.UnetPlusPlus, torchseg.MAnet, - ]: + ): sample = torch.ones([1, 3, 64, 64]) elif model_class == torchseg.PAN: sample = torch.ones([2, 3, 256, 256]) - elif model_class == torchseg.DeepLabV3: + elif model_class in (torchseg.DeepLabV3, torchseg.DeepLabV3Plus): sample = torch.ones([2, 3, 128, 128]) else: raise ValueError(f"Not supported model class {model_class}") return sample -def _test_forward(model, sample, test_shape=False): - with torch.no_grad(): - out = model(sample) - if test_shape: - assert out.shape[2:] == sample.shape[2:] - - -def _test_forward_backward(model, sample, test_shape=False): - out = model(sample) - out.mean().backward() - if test_shape: - assert out.shape[2:] == sample.shape[2:] - - +@torch.inference_mode() @pytest.mark.parametrize("encoder_name", ENCODERS) @pytest.mark.parametrize("encoder_depth", [3, 5]) @pytest.mark.parametrize( "model_class", [ + torchseg.Unet, torchseg.FPN, torchseg.PSPNet, torchseg.Linknet, - torchseg.Unet, torchseg.UnetPlusPlus, + torchseg.MAnet, ], ) -def test_forward(model_class, encoder_name, encoder_depth, **kwargs): +def test_timm_models(model_class, encoder_name, encoder_depth, **kwargs): if ( model_class is torchseg.Unet or model_class is torchseg.UnetPlusPlus or model_class is torchseg.MAnet ): kwargs["decoder_channels"] = (16, 16, 16, 16, 16)[-encoder_depth:] - if model_class in [ - torchseg.UnetPlusPlus, - torchseg.Linknet, - ] and encoder_name.startswith("mit_b"): - return # skip mit_b* - if ( - model_class is torchseg.FPN - and encoder_name.startswith("mit_b") - and encoder_depth != 5 - ): - return # skip mit_b* + model = model_class( encoder_name, encoder_depth=encoder_depth, encoder_weights=None, **kwargs ) - sample = get_sample(model_class) model.eval() + sample = get_sample(model_class) if encoder_depth == 5 and model_class != torchseg.PSPNet: test_shape = True else: test_shape = False - _test_forward(model, sample, test_shape) + out = model(sample) + + if test_shape: + assert out.shape[2:] == sample.shape[2:] +@torch.inference_mode() +@pytest.mark.parametrize("encoder_name", VIT_ENCODERS) @pytest.mark.parametrize( "model_class", [ - torchseg.PAN, + torchseg.Unet, torchseg.FPN, torchseg.PSPNet, torchseg.Linknet, - torchseg.Unet, torchseg.UnetPlusPlus, torchseg.MAnet, - torchseg.DeepLabV3, ], ) -def test_forward_backward(model_class): +def test_timm_vit_models(model_class, encoder_name, **kwargs): + if ( + model_class is torchseg.Unet + or model_class is torchseg.UnetPlusPlus + or model_class is torchseg.MAnet + ): + kwargs["decoder_channels"] = (16, 16, 16, 16, 16)[-5:] + + image_size = int(encoder_name.split("_")[-1]) + patch_size = int(encoder_name.split("patch")[1].split("_")[0]) + scales = SCALE_FACTORS[patch_size] + kwargs["encoder_params"] = {"scale_factors": scales, "img_size": image_size} + model = model_class(encoder_name, encoder_depth=5, encoder_weights=None, **kwargs) + model.eval() + sample = torch.ones([2, 3, image_size, image_size]) + if model_class != torchseg.PSPNet: + test_shape = True + else: + test_shape = False + + out = model(sample) + + if test_shape: + assert out.shape[2:] == sample.shape[2:] + + +@torch.inference_mode() +@pytest.mark.parametrize("encoder_name", DEEPLABV3_ENCODERS) +@pytest.mark.parametrize("model_class", [torchseg.DeepLabV3, torchseg.DeepLabV3Plus]) +def test_deeplabv3(model_class, encoder_name, **kwargs): + """ + DeepLabV3 requires output_stride=8. Some timm models don't support + output_strides other than 32. So, we skip these tests + """ + model = model_class(encoder_name, encoder_depth=5, encoder_weights=None, **kwargs) + model.eval() sample = get_sample(model_class) - model = model_class(DEFAULT_ENCODER, encoder_weights=None) - _test_forward_backward(model, sample) + out = model(sample) + assert out.shape[2:] == sample.shape[2:] +@torch.inference_mode() +@pytest.mark.parametrize("encoder_name", MIT_ENCODERS) +@pytest.mark.parametrize("encoder_depth", [3, 5]) +@pytest.mark.parametrize( + "model_class", + [ + torchseg.Unet, + torchseg.FPN, + torchseg.PSPNet, + torchseg.Linknet, + torchseg.UnetPlusPlus, + ], +) +def test_mix_transformer(model_class, encoder_name, encoder_depth, **kwargs): + if model_class in [torchseg.UnetPlusPlus, torchseg.Linknet]: + return # skip mit_b* + if model_class is torchseg.FPN and encoder_depth != 5: + return # skip mit_b* + + if ( + model_class is torchseg.Unet + or model_class is torchseg.UnetPlusPlus + or model_class is torchseg.MAnet + ): + kwargs["decoder_channels"] = (16, 16, 16, 16, 16)[-encoder_depth:] + model = model_class( + encoder_name, encoder_depth=encoder_depth, encoder_weights=None, **kwargs + ) + model.eval() + sample = get_sample(model_class) + + if encoder_depth == 5 and model_class != torchseg.PSPNet: + test_shape = True + else: + test_shape = False + + out = model(sample) + + if test_shape: + assert out.shape[2:] == sample.shape[2:] + + +@torch.inference_mode() @pytest.mark.parametrize( "model_class", [ @@ -139,10 +214,11 @@ def test_aux_output(model_class): ) sample = get_sample(model_class) label_size = (sample.shape[0], 2) - mask, label = model(sample) + label = model(sample)[1] assert label.size() == label_size +@torch.inference_mode() @pytest.mark.parametrize("upsampling", [2, 4, 8]) @pytest.mark.parametrize("model_class", [torchseg.FPN, torchseg.PSPNet]) def test_upsample(model_class, upsampling): @@ -151,41 +227,3 @@ def test_upsample(model_class, upsampling): sample = get_sample(model_class) mask = model(sample) assert mask.size()[-1] / 64 == upsampling / default_upsampling - - -@pytest.mark.parametrize("model_class", [torchseg.FPN]) -@pytest.mark.parametrize("in_channels", [1, 2, 4]) -def test_in_channels(model_class, in_channels): - sample = torch.ones([1, in_channels, 64, 64]) - model = model_class(DEFAULT_ENCODER, encoder_weights=None, in_channels=in_channels) - model.eval() - with torch.no_grad(): - model(sample) - - assert model.encoder._in_channels == in_channels - - -@pytest.mark.parametrize("encoder_name", ENCODERS) -def test_dilation(encoder_name): - if ( - encoder_name in ["inceptionresnetv2", "xception", "inceptionv4"] - or encoder_name.startswith("vgg") - or encoder_name.startswith("densenet") - or encoder_name.startswith("timm-res") - or encoder_name.startswith("mit_b") - ): - return - - encoder = torchseg.encoders.get_encoder(encoder_name, output_stride=16) - - encoder.eval() - with torch.no_grad(): - sample = torch.ones([1, 3, 64, 64]) - output = encoder(sample) - - shapes = [out.shape[-1] for out in output] - assert shapes == [64, 32, 16, 8, 4, 4] # last downsampling replaced with dilation - - -if __name__ == "__main__": - pytest.main([__file__]) diff --git a/torchseg/__init__.py b/torchseg/__init__.py index eb62f987..69201347 100644 --- a/torchseg/__init__.py +++ b/torchseg/__init__.py @@ -15,13 +15,16 @@ Unet, UnetPlusPlus, ) +from .encoders import list_encoders -__all__ = ("encoders", "decoders", "losses") +__all__ = ("encoders", "decoders", "losses", "list_encoders") def create_model( arch: str, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, + encoder_depth: Optional[int] = 5, encoder_weights: Optional[str] = "imagenet", in_channels: int = 3, classes: int = 1, @@ -52,6 +55,8 @@ def create_model( return model_class( encoder_name=encoder_name, + encoder_indices=encoder_indices, + encoder_depth=encoder_depth, encoder_weights=encoder_weights, in_channels=in_channels, classes=classes, diff --git a/torchseg/base/heads.py b/torchseg/base/heads.py index 3f63d5bc..3f908b79 100644 --- a/torchseg/base/heads.py +++ b/torchseg/base/heads.py @@ -1,11 +1,14 @@ import torch.nn as nn -from .modules import Activation - class SegmentationHead(nn.Sequential): def __init__( - self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1 + self, + in_channels, + out_channels, + kernel_size=3, + activation=nn.Identity(), + upsampling=1, ): conv2d = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2 @@ -15,13 +18,12 @@ def __init__( if upsampling > 1 else nn.Identity() ) - activation = Activation(activation) super().__init__(conv2d, upsampling, activation) class ClassificationHead(nn.Sequential): def __init__( - self, in_channels, classes, pooling="avg", dropout=0.2, activation=None + self, in_channels, classes, pooling="avg", dropout=0.2, activation=nn.Identity() ): if pooling not in ("max", "avg"): raise ValueError(f"Pooling should be one of ('max', 'avg'), got {pooling}.") @@ -29,5 +31,4 @@ def __init__( flatten = nn.Flatten() dropout = nn.Dropout(p=dropout, inplace=True) if dropout else nn.Identity() linear = nn.Linear(in_channels, classes, bias=True) - activation = Activation(activation) super().__init__(pool, flatten, dropout, linear, activation) diff --git a/torchseg/base/modules.py b/torchseg/base/modules.py index 9cf1e874..5106a706 100644 --- a/torchseg/base/modules.py +++ b/torchseg/base/modules.py @@ -1,4 +1,3 @@ -import torch import torch.nn as nn @@ -46,58 +45,6 @@ def forward(self, x): return x * self.cSE(x) + x * self.sSE(x) -class ArgMax(nn.Module): - def __init__(self, dim=None): - super().__init__() - self.dim = dim - - def forward(self, x): - return torch.argmax(x, dim=self.dim) - - -class Clamp(nn.Module): - def __init__(self, min=0, max=1): - super().__init__() - self.min, self.max = min, max - - def forward(self, x): - return torch.clamp(x, self.min, self.max) - - -class Activation(nn.Module): - def __init__(self, name, **params): - super().__init__() - - if name is None or name == "identity": - self.activation = nn.Identity(**params) - elif name == "sigmoid": - self.activation = nn.Sigmoid() - elif name == "softmax2d": - self.activation = nn.Softmax(dim=1, **params) - elif name == "softmax": - self.activation = nn.Softmax(**params) - elif name == "logsoftmax": - self.activation = nn.LogSoftmax(**params) - elif name == "tanh": - self.activation = nn.Tanh() - elif name == "argmax": - self.activation = ArgMax(**params) - elif name == "argmax2d": - self.activation = ArgMax(dim=1, **params) - elif name == "clamp": - self.activation = Clamp(**params) - elif callable(name): - self.activation = name(**params) - else: - raise ValueError( - f"Activation should be callable/sigmoid/softmax/logsoftmax/tanh/" - f"argmax/argmax2d/clamp/None; got {name}" - ) - - def forward(self, x): - return self.activation(x) - - class Attention(nn.Module): def __init__(self, name, **params): super().__init__() diff --git a/torchseg/decoders/deeplabv3/model.py b/torchseg/decoders/deeplabv3/model.py index d5b9dfac..6f236c55 100644 --- a/torchseg/decoders/deeplabv3/model.py +++ b/torchseg/decoders/deeplabv3/model.py @@ -1,4 +1,6 @@ -from typing import Optional +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -47,13 +49,16 @@ class DeepLabV3(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: int = 8, encoder_weights: Optional[str] = "imagenet", decoder_channels: int = 256, in_channels: int = 3, classes: int = 1, - activation: Optional[str] = None, + activation: Callable = nn.Identity(), upsampling: int = 8, + encoder_params: Optional[dict] = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -61,9 +66,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, weights=encoder_weights, - output_stride=8, + output_stride=encoder_output_stride, + **encoder_params, ) self.decoder = DeepLabV3Decoder( @@ -133,6 +140,7 @@ class DeepLabV3Plus(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, encoder_weights: Optional[str] = "imagenet", encoder_output_stride: int = 16, @@ -140,8 +148,9 @@ def __init__( decoder_atrous_rates: tuple = (12, 24, 36), in_channels: int = 3, classes: int = 1, - activation: Optional[str] = None, + activation: Callable = nn.Identity(), upsampling: int = 4, + encoder_params: Optional[dict] = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -155,9 +164,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, weights=encoder_weights, output_stride=encoder_output_stride, + **encoder_params, ) self.decoder = DeepLabV3PlusDecoder( diff --git a/torchseg/decoders/fpn/model.py b/torchseg/decoders/fpn/model.py index 4c6b446b..d870b65a 100644 --- a/torchseg/decoders/fpn/model.py +++ b/torchseg/decoders/fpn/model.py @@ -1,4 +1,6 @@ -from typing import Optional +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -48,7 +50,9 @@ class FPN(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: int = 32, encoder_weights: Optional[str] = "imagenet", decoder_pyramid_channels: int = 256, decoder_segmentation_channels: int = 128, @@ -56,8 +60,9 @@ def __init__( decoder_dropout: float = 0.2, in_channels: int = 3, classes: int = 1, - activation: Optional[str] = None, + activation: Callable = nn.Identity(), upsampling: int = 4, + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -69,8 +74,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = FPNDecoder( diff --git a/torchseg/decoders/linknet/model.py b/torchseg/decoders/linknet/model.py index 62a0f839..4f3e6917 100644 --- a/torchseg/decoders/linknet/model.py +++ b/torchseg/decoders/linknet/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -50,12 +52,15 @@ class Linknet(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: int = 32, encoder_weights: Optional[str] = "imagenet", decoder_use_batchnorm: bool = True, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -66,8 +71,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = LinknetDecoder( diff --git a/torchseg/decoders/manet/model.py b/torchseg/decoders/manet/model.py index e54cf924..50022fac 100644 --- a/torchseg/decoders/manet/model.py +++ b/torchseg/decoders/manet/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -56,14 +58,17 @@ class MAnet(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: int = 32, encoder_weights: Optional[str] = "imagenet", decoder_use_batchnorm: bool = True, decoder_channels: list[int] = (256, 128, 64, 32, 16), decoder_pab_channels: int = 64, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -71,8 +76,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = MAnetDecoder( diff --git a/torchseg/decoders/pan/model.py b/torchseg/decoders/pan/model.py index e71c3609..9f3d54c5 100644 --- a/torchseg/decoders/pan/model.py +++ b/torchseg/decoders/pan/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -44,12 +46,15 @@ def __init__( self, encoder_name: str = "resnet34", encoder_weights: Optional[str] = "imagenet", + encoder_indices: Optional[tuple[int]] = None, + encoder_depth: int = 5, encoder_output_stride: int = 16, decoder_channels: int = 32, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), upsampling: int = 4, + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -61,9 +66,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, - depth=5, + indices=encoder_indices, + depth=encoder_depth, weights=encoder_weights, output_stride=encoder_output_stride, + **encoder_params, ) self.decoder = PANDecoder( diff --git a/torchseg/decoders/pspnet/model.py b/torchseg/decoders/pspnet/model.py index 612ef4f0..e5eaea4f 100644 --- a/torchseg/decoders/pspnet/model.py +++ b/torchseg/decoders/pspnet/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -53,14 +55,17 @@ def __init__( self, encoder_name: str = "resnet34", encoder_weights: Optional[str] = "imagenet", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 3, + encoder_output_stride: int = 32, psp_out_channels: int = 512, psp_use_batchnorm: bool = True, psp_dropout: float = 0.2, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), upsampling: int = 8, + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -68,8 +73,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = PSPDecoder( diff --git a/torchseg/decoders/unet/model.py b/torchseg/decoders/unet/model.py index ffc5a606..8053d77f 100644 --- a/torchseg/decoders/unet/model.py +++ b/torchseg/decoders/unet/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -46,6 +48,9 @@ class Unet(SegmentationModel): - dropout (float): Dropout factor in [0, 1) - activation (str): An activation function to apply "sigmoid"/"softmax" (could be **None** to return logits) + head_upsampling: Factor to upsample input to segmentation head. Defaults to 1. + This allows for use of U-Net decoder with models that need additional + upsampling to be at the original input image resolution. .. _Unet: @@ -56,23 +61,30 @@ class Unet(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: Optional[int] = None, encoder_weights: Optional[str] = "imagenet", decoder_use_batchnorm: bool = True, decoder_channels: list[int] = (256, 128, 64, 32, 16), decoder_attention_type: Optional[str] = None, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), + encoder_params: dict = {}, aux_params: Optional[dict] = None, + head_upsampling: int = 1, ): super().__init__() self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = UnetDecoder( @@ -89,6 +101,7 @@ def __init__( out_channels=classes, activation=activation, kernel_size=3, + upsampling=head_upsampling, ) if aux_params is not None: diff --git a/torchseg/decoders/unetplusplus/model.py b/torchseg/decoders/unetplusplus/model.py index 4ddffcb5..72aa5db9 100644 --- a/torchseg/decoders/unetplusplus/model.py +++ b/torchseg/decoders/unetplusplus/model.py @@ -1,4 +1,6 @@ -from typing import Optional, Union +from typing import Callable, Optional + +import torch.nn as nn from ...base import ClassificationHead, SegmentationHead, SegmentationModel from ...encoders import get_encoder @@ -53,14 +55,17 @@ class UnetPlusPlus(SegmentationModel): def __init__( self, encoder_name: str = "resnet34", + encoder_indices: Optional[tuple[int]] = None, encoder_depth: int = 5, + encoder_output_stride: int = 32, encoder_weights: Optional[str] = "imagenet", decoder_use_batchnorm: bool = True, decoder_channels: list[int] = (256, 128, 64, 32, 16), decoder_attention_type: Optional[str] = None, in_channels: int = 3, classes: int = 1, - activation: Optional[Union[str, callable]] = None, + activation: Callable = nn.Identity(), + encoder_params: dict = {}, aux_params: Optional[dict] = None, ): super().__init__() @@ -71,8 +76,11 @@ def __init__( self.encoder = get_encoder( encoder_name, in_channels=in_channels, + indices=encoder_indices, depth=encoder_depth, + output_stride=encoder_output_stride, weights=encoder_weights, + **encoder_params, ) self.decoder = UnetPlusPlusDecoder( diff --git a/torchseg/encoders/__init__.py b/torchseg/encoders/__init__.py index f6b33884..ac99d22d 100644 --- a/torchseg/encoders/__init__.py +++ b/torchseg/encoders/__init__.py @@ -1,89 +1,96 @@ import torch.utils.model_zoo as model_zoo -from .densenet import densenet_encoders -from .dpn import dpn_encoders -from .efficientnet import efficient_net_encoders -from .inceptionresnetv2 import inceptionresnetv2_encoders -from .inceptionv4 import inceptionv4_encoders from .mix_transformer import mix_transformer_encoders -from .mobilenet import mobilenet_encoders -from .mobileone import mobileone_encoders -from .resnet import resnet_encoders -from .senet import senet_encoders -from .timm_efficientnet import timm_efficientnet_encoders -from .timm_gernet import timm_gernet_encoders -from .timm_mobilenetv3 import timm_mobilenetv3_encoders -from .timm_regnet import timm_regnet_encoders -from .timm_res2net import timm_res2net_encoders -from .timm_resnest import timm_resnest_encoders -from .timm_sknet import timm_sknet_encoders -from .timm_universal import TimmUniversalEncoder -from .vgg import vgg_encoders -from .xception import xception_encoders +from .supported import TIMM_ENCODERS, TIMM_VIT_ENCODERS, UNSUPPORTED_ENCODERS +from .timm import TimmEncoder, TimmViTEncoder -encoders = {} -encoders.update(resnet_encoders) -encoders.update(dpn_encoders) -encoders.update(vgg_encoders) -encoders.update(senet_encoders) -encoders.update(densenet_encoders) -encoders.update(inceptionresnetv2_encoders) -encoders.update(inceptionv4_encoders) -encoders.update(efficient_net_encoders) -encoders.update(mobilenet_encoders) -encoders.update(xception_encoders) -encoders.update(timm_efficientnet_encoders) -encoders.update(timm_resnest_encoders) -encoders.update(timm_res2net_encoders) -encoders.update(timm_regnet_encoders) -encoders.update(timm_sknet_encoders) -encoders.update(timm_mobilenetv3_encoders) -encoders.update(timm_gernet_encoders) -encoders.update(mix_transformer_encoders) -encoders.update(mobileone_encoders) +def list_unsupported_encoders(): + return UNSUPPORTED_ENCODERS -def get_encoder(name, in_channels=3, depth=5, weights=None, output_stride=32, **kwargs): - if name.startswith("tu-"): - name = name[3:] - encoder = TimmUniversalEncoder( - name=name, - in_channels=in_channels, - depth=depth, - output_stride=output_stride, - pretrained=weights is not None, - **kwargs, - ) - return encoder - try: - Encoder = encoders[name]["encoder"] - except KeyError: - err = f"Wrong encoder name `{name}`, supported encoders: {list(encoders.keys())}" # noqa: E501 - raise KeyError(err) +def list_encoders(): + return ( + list(TIMM_ENCODERS.keys()) + + TIMM_VIT_ENCODERS + + list(mix_transformer_encoders.keys()) + ) - params = encoders[name]["params"] - params.update(depth=depth) - encoder = Encoder(**params) - if weights is not None: +def get_encoder( + name, + in_channels=3, + depth=None, + indices=None, + weights=None, + output_stride=32, + scale_factors=None, + **kwargs, +): + assert ( + depth is not None or indices is not None + ), "Either `depth` or `indices` should be specified" + + # MixTransformer encoder + if name.startswith("mit_b"): + encoders = mix_transformer_encoders + params = encoders[name]["params"] + params.update(depth=depth) + try: - settings = encoders[name]["pretrained_settings"][weights] + Encoder = encoders[name]["encoder"] except KeyError: - err = f""" - Wrong pretrained weights `{weights}` for encoder `{name}`. - Available options are: {list(encoders[name]["pretrained_settings"].keys())} - """ + err = f"Wrong mit encoder name `{name}`, supported encoders: {list(encoders.keys())}" # noqa: E501 raise KeyError(err) - encoder.load_state_dict(model_zoo.load_url(settings["url"])) - - encoder.set_in_channels(in_channels, pretrained=weights is not None) - if output_stride != 32: - encoder.make_dilated(output_stride) + params = encoders[name]["params"] + params.update(depth=depth) + encoder = Encoder(**params) - return encoder + if weights is not None: + try: + settings = encoders[name]["pretrained_settings"][weights] + except KeyError: + err = f""" + Wrong pretrained weights `{weights}` for encoder `{name}`. + Available options are: {list(encoders[name]["pretrained_settings"].keys())} # noqa: E501 + """ + raise KeyError(err) + encoder.load_state_dict(model_zoo.load_url(settings["url"])) -def get_encoder_names(): - return list(encoders.keys()) + # Timm Encoders + else: + if name.split(".")[0] in TIMM_ENCODERS: + encoder = TimmEncoder( + name=name, + in_channels=in_channels, + depth=depth, + indices=indices, + output_stride=output_stride, + pretrained=weights is not None, + **kwargs, + ) + elif name.split(".")[0] in TIMM_VIT_ENCODERS: + encoder = TimmViTEncoder( + name=name, + in_channels=in_channels, + depth=depth, + indices=indices, + pretrained=weights is not None, + scale_factors=scale_factors, + **kwargs, + ) + elif name.split(".")[0] in UNSUPPORTED_ENCODERS: + err = f""" + {name} is an unsupported timm encoder that does not support + `features_only=True` or does not have a `get_intermediate_layers` method. + """ + raise ValueError(err) + else: + err = f""" + {name} is an unknown encoder. Check available encoders using + `torchseg.list_encoders()` + """ + raise ValueError(err) + return encoder diff --git a/torchseg/encoders/_base.py b/torchseg/encoders/_base.py deleted file mode 100644 index ee17337f..00000000 --- a/torchseg/encoders/_base.py +++ /dev/null @@ -1,59 +0,0 @@ -import abc - -from . import _utils as utils - - -class EncoderMixin(abc.ABC): - """Add encoder functionality such as: - - output channels specification of feature tensors (produced by encoder) - - patching first convolution for arbitrary input channels - """ - - _output_stride = 32 - - @property - def out_channels(self): - """Return channels dimensions for each tensor of forward output of encoder""" - return self._out_channels[: self._depth + 1] - - @property - def output_stride(self): - return min(self._output_stride, 2**self._depth) - - def set_in_channels(self, in_channels, pretrained=True): - """Change first convolution channels""" - if in_channels == 3: - return - - self._in_channels = in_channels - if self._out_channels[0] == 3: - self._out_channels = tuple([in_channels] + list(self._out_channels)[1:]) - - utils.patch_first_conv( - model=self, new_in_channels=in_channels, pretrained=pretrained - ) - - def get_stages(self): - """Override it in your implementation""" - raise NotImplementedError - - def make_dilated(self, output_stride): - if output_stride == 16: - stage_list = [5] - dilation_list = [2] - - elif output_stride == 8: - stage_list = [4, 5] - dilation_list = [2, 4] - - else: - err = f"Output stride should be 16 or 8, got {output_stride}." - raise ValueError(err) - - self._output_stride = output_stride - - stages = self.get_stages() - for stage_indx, dilation_rate in zip(stage_list, dilation_list): - utils.replace_strides_with_dilation( - module=stages[stage_indx], dilation_rate=dilation_rate - ) diff --git a/torchseg/encoders/_utils.py b/torchseg/encoders/_utils.py deleted file mode 100644 index 86128099..00000000 --- a/torchseg/encoders/_utils.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -import torch.nn as nn - - -def patch_first_conv(model, new_in_channels, default_in_channels=3, pretrained=True): - """Change first convolution layer input channels. - In case: - in_channels == 1 or in_channels == 2 -> reuse original weights - in_channels > 3 -> make random kaiming normal initialization - """ - - # get first conv - for module in model.modules(): - if isinstance(module, nn.Conv2d) and module.in_channels == default_in_channels: - break - - weight = module.weight.detach() - module.in_channels = new_in_channels - - if not pretrained: - module.weight = nn.parameter.Parameter( - torch.Tensor( - module.out_channels, - new_in_channels // module.groups, - *module.kernel_size, - ) - ) - module.reset_parameters() - - elif new_in_channels == 1: - new_weight = weight.sum(1, keepdim=True) - module.weight = nn.parameter.Parameter(new_weight) - - else: - new_weight = torch.Tensor( - module.out_channels, new_in_channels // module.groups, *module.kernel_size - ) - - for i in range(new_in_channels): - new_weight[:, i] = weight[:, i % default_in_channels] - - new_weight = new_weight * (default_in_channels / new_in_channels) - module.weight = nn.parameter.Parameter(new_weight) - - -def replace_strides_with_dilation(module, dilation_rate): - """Patch Conv2d modules replacing strides with dilation""" - for mod in module.modules(): - if isinstance(mod, nn.Conv2d): - mod.stride = (1, 1) - mod.dilation = (dilation_rate, dilation_rate) - kh, kw = mod.kernel_size - mod.padding = ((kh // 2) * dilation_rate, (kh // 2) * dilation_rate) - - # Kostyl for EfficientNet - if hasattr(mod, "static_padding"): - mod.static_padding = nn.Identity() diff --git a/torchseg/encoders/densenet.py b/torchseg/encoders/densenet.py deleted file mode 100644 index 6ec11128..00000000 --- a/torchseg/encoders/densenet.py +++ /dev/null @@ -1,130 +0,0 @@ -import re - -import torch.nn as nn -from pretrainedmodels.models.torchvision_models import pretrained_settings -from torchvision.models.densenet import DenseNet - -from ._base import EncoderMixin - - -class TransitionWithSkip(nn.Module): - def __init__(self, module): - super().__init__() - self.module = module - - def forward(self, x): - for module in self.module: - x = module(x) - if isinstance(module, nn.ReLU): - skip = x - return x, skip - - -class DenseNetEncoder(DenseNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - del self.classifier - - def make_dilated(self, *args, **kwargs): - raise ValueError( - "DenseNet encoders do not support dilated mode " - "due to pooling operation for downsampling!" - ) - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential( - self.features.conv0, self.features.norm0, self.features.relu0 - ), - nn.Sequential( - self.features.pool0, - self.features.denseblock1, - TransitionWithSkip(self.features.transition1), - ), - nn.Sequential( - self.features.denseblock2, TransitionWithSkip(self.features.transition2) - ), - nn.Sequential( - self.features.denseblock3, TransitionWithSkip(self.features.transition3) - ), - nn.Sequential(self.features.denseblock4, self.features.norm5), - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - if isinstance(x, (list, tuple)): - x, skip = x - features.append(skip) - else: - features.append(x) - - return features - - def load_state_dict(self, state_dict): - pattern = re.compile( - r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" # noqa: E501 - ) - for key in list(state_dict.keys()): - res = pattern.match(key) - if res: - new_key = res.group(1) + res.group(2) - state_dict[new_key] = state_dict[key] - del state_dict[key] - - # remove linear - state_dict.pop("classifier.bias", None) - state_dict.pop("classifier.weight", None) - - super().load_state_dict(state_dict) - - -densenet_encoders = { - "densenet121": { - "encoder": DenseNetEncoder, - "pretrained_settings": pretrained_settings["densenet121"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 1024), - "num_init_features": 64, - "growth_rate": 32, - "block_config": (6, 12, 24, 16), - }, - }, - "densenet169": { - "encoder": DenseNetEncoder, - "pretrained_settings": pretrained_settings["densenet169"], - "params": { - "out_channels": (3, 64, 256, 512, 1280, 1664), - "num_init_features": 64, - "growth_rate": 32, - "block_config": (6, 12, 32, 32), - }, - }, - "densenet201": { - "encoder": DenseNetEncoder, - "pretrained_settings": pretrained_settings["densenet201"], - "params": { - "out_channels": (3, 64, 256, 512, 1792, 1920), - "num_init_features": 64, - "growth_rate": 32, - "block_config": (6, 12, 48, 32), - }, - }, - "densenet161": { - "encoder": DenseNetEncoder, - "pretrained_settings": pretrained_settings["densenet161"], - "params": { - "out_channels": (3, 96, 384, 768, 2112, 2208), - "num_init_features": 96, - "growth_rate": 48, - "block_config": (6, 12, 36, 24), - }, - }, -} diff --git a/torchseg/encoders/dpn.py b/torchseg/encoders/dpn.py deleted file mode 100644 index 42436aed..00000000 --- a/torchseg/encoders/dpn.py +++ /dev/null @@ -1,146 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from pretrainedmodels.models.dpn import DPN, pretrained_settings - -from ._base import EncoderMixin - - -class DPNEncoder(DPN, EncoderMixin): - def __init__(self, stage_idxs, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._stage_idxs = stage_idxs - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.last_linear - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential( - self.features[0].conv, self.features[0].bn, self.features[0].act - ), - nn.Sequential( - self.features[0].pool, self.features[1 : self._stage_idxs[0]] - ), - self.features[self._stage_idxs[0] : self._stage_idxs[1]], - self.features[self._stage_idxs[1] : self._stage_idxs[2]], - self.features[self._stage_idxs[2] : self._stage_idxs[3]], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - if isinstance(x, (list, tuple)): - features.append(F.relu(torch.cat(x, dim=1), inplace=True)) - else: - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("last_linear.bias", None) - state_dict.pop("last_linear.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -dpn_encoders = { - "dpn68": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn68"], - "params": { - "stage_idxs": (4, 8, 20, 24), - "out_channels": (3, 10, 144, 320, 704, 832), - "groups": 32, - "inc_sec": (16, 32, 32, 64), - "k_r": 128, - "k_sec": (3, 4, 12, 3), - "num_classes": 1000, - "num_init_features": 10, - "small": True, - "test_time_pool": True, - }, - }, - "dpn68b": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn68b"], - "params": { - "stage_idxs": (4, 8, 20, 24), - "out_channels": (3, 10, 144, 320, 704, 832), - "b": True, - "groups": 32, - "inc_sec": (16, 32, 32, 64), - "k_r": 128, - "k_sec": (3, 4, 12, 3), - "num_classes": 1000, - "num_init_features": 10, - "small": True, - "test_time_pool": True, - }, - }, - "dpn92": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn92"], - "params": { - "stage_idxs": (4, 8, 28, 32), - "out_channels": (3, 64, 336, 704, 1552, 2688), - "groups": 32, - "inc_sec": (16, 32, 24, 128), - "k_r": 96, - "k_sec": (3, 4, 20, 3), - "num_classes": 1000, - "num_init_features": 64, - "test_time_pool": True, - }, - }, - "dpn98": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn98"], - "params": { - "stage_idxs": (4, 10, 30, 34), - "out_channels": (3, 96, 336, 768, 1728, 2688), - "groups": 40, - "inc_sec": (16, 32, 32, 128), - "k_r": 160, - "k_sec": (3, 6, 20, 3), - "num_classes": 1000, - "num_init_features": 96, - "test_time_pool": True, - }, - }, - "dpn107": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn107"], - "params": { - "stage_idxs": (5, 13, 33, 37), - "out_channels": (3, 128, 376, 1152, 2432, 2688), - "groups": 50, - "inc_sec": (20, 64, 64, 128), - "k_r": 200, - "k_sec": (4, 8, 20, 3), - "num_classes": 1000, - "num_init_features": 128, - "test_time_pool": True, - }, - }, - "dpn131": { - "encoder": DPNEncoder, - "pretrained_settings": pretrained_settings["dpn131"], - "params": { - "stage_idxs": (5, 13, 41, 45), - "out_channels": (3, 128, 352, 832, 1984, 2688), - "groups": 40, - "inc_sec": (16, 32, 32, 128), - "k_r": 160, - "k_sec": (4, 8, 28, 3), - "num_classes": 1000, - "num_init_features": 128, - "test_time_pool": True, - }, - }, -} diff --git a/torchseg/encoders/efficientnet.py b/torchseg/encoders/efficientnet.py deleted file mode 100644 index 3d5635a2..00000000 --- a/torchseg/encoders/efficientnet.py +++ /dev/null @@ -1,152 +0,0 @@ -import torch.nn as nn -from efficientnet_pytorch import EfficientNet -from efficientnet_pytorch.utils import get_model_params, url_map, url_map_advprop - -from ._base import EncoderMixin - - -class EfficientNetEncoder(EfficientNet, EncoderMixin): - def __init__(self, stage_idxs, out_channels, model_name, depth=5): - blocks_args, global_params = get_model_params(model_name, override_params=None) - super().__init__(blocks_args, global_params) - - self._stage_idxs = stage_idxs - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - del self._fc - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self._conv_stem, self._bn0, self._swish), - self._blocks[: self._stage_idxs[0]], - self._blocks[self._stage_idxs[0] : self._stage_idxs[1]], - self._blocks[self._stage_idxs[1] : self._stage_idxs[2]], - self._blocks[self._stage_idxs[2] :], - ] - - def forward(self, x): - stages = self.get_stages() - - block_number = 0.0 - drop_connect_rate = self._global_params.drop_connect_rate - - features = [] - for i in range(self._depth + 1): - # Identity and Sequential stages - if i < 2: - x = stages[i](x) - - # Block stages need drop_connect rate - else: - for module in stages[i]: - drop_connect = drop_connect_rate * block_number / len(self._blocks) - block_number += 1.0 - x = module(x, drop_connect) - - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("_fc.bias", None) - state_dict.pop("_fc.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -def _get_pretrained_settings(encoder): - pretrained_settings = { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": url_map[encoder], - "input_space": "RGB", - "input_range": [0, 1], - }, - "advprop": { - "mean": [0.5, 0.5, 0.5], - "std": [0.5, 0.5, 0.5], - "url": url_map_advprop[encoder], - "input_space": "RGB", - "input_range": [0, 1], - }, - } - return pretrained_settings - - -efficient_net_encoders = { - "efficientnet-b0": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b0"), - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (3, 5, 9, 16), - "model_name": "efficientnet-b0", - }, - }, - "efficientnet-b1": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b1"), - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (5, 8, 16, 23), - "model_name": "efficientnet-b1", - }, - }, - "efficientnet-b2": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b2"), - "params": { - "out_channels": (3, 32, 24, 48, 120, 352), - "stage_idxs": (5, 8, 16, 23), - "model_name": "efficientnet-b2", - }, - }, - "efficientnet-b3": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b3"), - "params": { - "out_channels": (3, 40, 32, 48, 136, 384), - "stage_idxs": (5, 8, 18, 26), - "model_name": "efficientnet-b3", - }, - }, - "efficientnet-b4": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b4"), - "params": { - "out_channels": (3, 48, 32, 56, 160, 448), - "stage_idxs": (6, 10, 22, 32), - "model_name": "efficientnet-b4", - }, - }, - "efficientnet-b5": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b5"), - "params": { - "out_channels": (3, 48, 40, 64, 176, 512), - "stage_idxs": (8, 13, 27, 39), - "model_name": "efficientnet-b5", - }, - }, - "efficientnet-b6": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b6"), - "params": { - "out_channels": (3, 56, 40, 72, 200, 576), - "stage_idxs": (9, 15, 31, 45), - "model_name": "efficientnet-b6", - }, - }, - "efficientnet-b7": { - "encoder": EfficientNetEncoder, - "pretrained_settings": _get_pretrained_settings("efficientnet-b7"), - "params": { - "out_channels": (3, 64, 48, 80, 224, 640), - "stage_idxs": (11, 18, 38, 55), - "model_name": "efficientnet-b7", - }, - }, -} diff --git a/torchseg/encoders/inceptionresnetv2.py b/torchseg/encoders/inceptionresnetv2.py deleted file mode 100644 index ca43d131..00000000 --- a/torchseg/encoders/inceptionresnetv2.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch.nn as nn -from pretrainedmodels.models.inceptionresnetv2 import ( - InceptionResNetV2, - pretrained_settings, -) - -from ._base import EncoderMixin - - -class InceptionResNetV2Encoder(InceptionResNetV2, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - # correct paddings - for m in self.modules(): - if isinstance(m, nn.Conv2d): - if m.kernel_size == (3, 3): - m.padding = (1, 1) - if isinstance(m, nn.MaxPool2d): - m.padding = (1, 1) - - # remove linear layers - del self.avgpool_1a - del self.last_linear - - def make_dilated(self, *args, **kwargs): - raise ValueError( - "InceptionResnetV2 encoder does not support dilated mode " - "due to pooling operation for downsampling!" - ) - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv2d_1a, self.conv2d_2a, self.conv2d_2b), - nn.Sequential(self.maxpool_3a, self.conv2d_3b, self.conv2d_4a), - nn.Sequential(self.maxpool_5a, self.mixed_5b, self.repeat), - nn.Sequential(self.mixed_6a, self.repeat_1), - nn.Sequential(self.mixed_7a, self.repeat_2, self.block8, self.conv2d_7b), - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("last_linear.bias", None) - state_dict.pop("last_linear.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -inceptionresnetv2_encoders = { - "inceptionresnetv2": { - "encoder": InceptionResNetV2Encoder, - "pretrained_settings": pretrained_settings["inceptionresnetv2"], - "params": {"out_channels": (3, 64, 192, 320, 1088, 1536), "num_classes": 1000}, - } -} diff --git a/torchseg/encoders/inceptionv4.py b/torchseg/encoders/inceptionv4.py deleted file mode 100644 index da60d72f..00000000 --- a/torchseg/encoders/inceptionv4.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch.nn as nn -from pretrainedmodels.models.inceptionv4 import InceptionV4, pretrained_settings - -from ._base import EncoderMixin - - -class InceptionV4Encoder(InceptionV4, EncoderMixin): - def __init__(self, stage_idxs, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._stage_idxs = stage_idxs - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - # correct paddings - for m in self.modules(): - if isinstance(m, nn.Conv2d): - if m.kernel_size == (3, 3): - m.padding = (1, 1) - if isinstance(m, nn.MaxPool2d): - m.padding = (1, 1) - - # remove linear layers - del self.last_linear - - def make_dilated(self, stage_list, dilation_list): - raise ValueError( - "InceptionV4 encoder does not support dilated mode " - "due to pooling operation for downsampling!" - ) - - def get_stages(self): - return [ - nn.Identity(), - self.features[: self._stage_idxs[0]], - self.features[self._stage_idxs[0] : self._stage_idxs[1]], - self.features[self._stage_idxs[1] : self._stage_idxs[2]], - self.features[self._stage_idxs[2] : self._stage_idxs[3]], - self.features[self._stage_idxs[3] :], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("last_linear.bias", None) - state_dict.pop("last_linear.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -inceptionv4_encoders = { - "inceptionv4": { - "encoder": InceptionV4Encoder, - "pretrained_settings": pretrained_settings["inceptionv4"], - "params": { - "stage_idxs": (3, 5, 9, 15), - "out_channels": (3, 64, 192, 384, 1024, 1536), - "num_classes": 1001, - }, - } -} diff --git a/torchseg/encoders/mix_transformer.py b/torchseg/encoders/mix_transformer.py index a1a2d5c8..93a26641 100644 --- a/torchseg/encoders/mix_transformer.py +++ b/torchseg/encoders/mix_transformer.py @@ -514,34 +514,23 @@ def forward(self, x, H, W): # End of NVIDIA code # --------------------------------------------------------------- -from ._base import EncoderMixin # noqa E402 +class MixVisionTransformerEncoder(MixVisionTransformer): + output_stride = 32 -class MixVisionTransformerEncoder(MixVisionTransformer, EncoderMixin): def __init__(self, out_channels, depth=5, **kwargs): super().__init__(**kwargs) - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - def make_dilated(self, *args, **kwargs): - raise ValueError("MixVisionTransformer encoder does not support dilated mode") - - def set_in_channels(self, in_channels, *args, **kwargs): - if in_channels != 3: - raise ValueError( - """ - MixVisionTransformer encoder does not support - in_channels setting other than 3 - """ - ) + self.depth = depth + self.in_channels = 3 + self.out_channels = out_channels[: self.depth + 1] + self.output_stride = min(self.output_stride, 2**self.depth) def forward(self, x): # create dummy output for the first block B, C, H, W = x.shape dummy = torch.empty([B, 0, H // 2, W // 2], dtype=x.dtype, device=x.device) - return [x, dummy] + self.forward_features(x)[: self._depth - 1] + return [x, dummy] + self.forward_features(x)[: self.depth - 1] def load_state_dict(self, state_dict): state_dict.pop("head.weight", None) diff --git a/torchseg/encoders/mobilenet.py b/torchseg/encoders/mobilenet.py deleted file mode 100644 index b8ba700f..00000000 --- a/torchseg/encoders/mobilenet.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch.nn as nn -import torchvision - -from ._base import EncoderMixin - - -class MobileNetV2Encoder(torchvision.models.MobileNetV2, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - del self.classifier - - def get_stages(self): - return [ - nn.Identity(), - self.features[:2], - self.features[2:4], - self.features[4:7], - self.features[7:14], - self.features[14:], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("classifier.1.bias", None) - state_dict.pop("classifier.1.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -mobilenet_encoders = { - "mobilenet_v2": { - "encoder": MobileNetV2Encoder, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth", - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": {"out_channels": (3, 16, 24, 32, 96, 1280)}, - } -} diff --git a/torchseg/encoders/mobileone.py b/torchseg/encoders/mobileone.py deleted file mode 100644 index 408b35dd..00000000 --- a/torchseg/encoders/mobileone.py +++ /dev/null @@ -1,558 +0,0 @@ -# -# For licensing see accompanying LICENSE file. -# Copyright (C) 2022 Apple Inc. All Rights Reserved. -# -import copy -from collections.abc import Sequence -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from . import _utils as utils -from ._base import EncoderMixin - - -class SEBlock(nn.Module): - """Squeeze and Excite module. - - Pytorch implementation of `Squeeze-and-Excitation Networks` - - https://arxiv.org/pdf/1709.01507.pdf - """ - - def __init__(self, in_channels: int, rd_ratio: float = 0.0625) -> None: - """Construct a Squeeze and Excite Module. - - :param in_channels: Number of input channels. - :param rd_ratio: Input channel reduction ratio. - """ - super().__init__() - self.reduce = nn.Conv2d( - in_channels=in_channels, - out_channels=int(in_channels * rd_ratio), - kernel_size=1, - stride=1, - bias=True, - ) - self.expand = nn.Conv2d( - in_channels=int(in_channels * rd_ratio), - out_channels=in_channels, - kernel_size=1, - stride=1, - bias=True, - ) - - def forward(self, inputs: torch.Tensor) -> torch.Tensor: - """Apply forward pass.""" - b, c, h, w = inputs.size() - x = F.avg_pool2d(inputs, kernel_size=[h, w]) - x = self.reduce(x) - x = F.relu(x) - x = self.expand(x) - x = torch.sigmoid(x) - x = x.view(-1, c, 1, 1) - return inputs * x - - -class MobileOneBlock(nn.Module): - """MobileOne building block. - - This block has a multi-branched architecture at train-time - and plain-CNN style architecture at inference time - For more details, please refer to our paper: - `An Improved One millisecond Mobile Backbone` - - https://arxiv.org/pdf/2206.04040.pdf - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - kernel_size: int, - stride: int = 1, - padding: int = 0, - dilation: int = 1, - groups: int = 1, - inference_mode: bool = False, - use_se: bool = False, - num_conv_branches: int = 1, - ) -> None: - """Construct a MobileOneBlock module. - - :param in_channels: Number of channels in the input. - :param out_channels: Number of channels produced by the block. - :param kernel_size: Size of the convolution kernel. - :param stride: Stride size. - :param padding: Zero-padding size. - :param dilation: Kernel dilation factor. - :param groups: Group number. - :param inference_mode: If True, instantiates model in inference mode. - :param use_se: Whether to use SE-ReLU activations. - :param num_conv_branches: Number of linear conv branches. - """ - super().__init__() - self.inference_mode = inference_mode - self.groups = groups - self.stride = stride - self.kernel_size = kernel_size - self.in_channels = in_channels - self.out_channels = out_channels - self.num_conv_branches = num_conv_branches - - # Check if SE-ReLU is requested - if use_se: - self.se = SEBlock(out_channels) - else: - self.se = nn.Identity() - self.activation = nn.ReLU() - - if inference_mode: - self.reparam_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=True, - ) - else: - # Re-parameterizable skip connection - self.rbr_skip = ( - nn.BatchNorm2d(num_features=in_channels) - if out_channels == in_channels and stride == 1 - else None - ) - - # Re-parameterizable conv branches - rbr_conv = list() - for _ in range(self.num_conv_branches): - rbr_conv.append(self._conv_bn(kernel_size=kernel_size, padding=padding)) - self.rbr_conv = nn.ModuleList(rbr_conv) - - # Re-parameterizable scale branch - self.rbr_scale = None - if kernel_size > 1: - self.rbr_scale = self._conv_bn(kernel_size=1, padding=0) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """Apply forward pass.""" - # Inference mode forward pass. - if self.inference_mode: - return self.activation(self.se(self.reparam_conv(x))) - - # Multi-branched train-time forward pass. - # Skip branch output - identity_out = 0 - if self.rbr_skip is not None: - identity_out = self.rbr_skip(x) - - # Scale branch output - scale_out = 0 - if self.rbr_scale is not None: - scale_out = self.rbr_scale(x) - - # Other branches - out = scale_out + identity_out - for ix in range(self.num_conv_branches): - out += self.rbr_conv[ix](x) - - return self.activation(self.se(out)) - - def reparameterize(self): - """Following works like `RepVGG: Making VGG-style ConvNets Great Again` - - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched - architecture used at training time to obtain a plain CNN-like structure - for inference. - """ - if self.inference_mode: - return - kernel, bias = self._get_kernel_bias() - self.reparam_conv = nn.Conv2d( - in_channels=self.rbr_conv[0].conv.in_channels, - out_channels=self.rbr_conv[0].conv.out_channels, - kernel_size=self.rbr_conv[0].conv.kernel_size, - stride=self.rbr_conv[0].conv.stride, - padding=self.rbr_conv[0].conv.padding, - dilation=self.rbr_conv[0].conv.dilation, - groups=self.rbr_conv[0].conv.groups, - bias=True, - ) - self.reparam_conv.weight.data = kernel - self.reparam_conv.bias.data = bias - - # Delete un-used branches - for para in self.parameters(): - para.detach_() - self.__delattr__("rbr_conv") - self.__delattr__("rbr_scale") - if hasattr(self, "rbr_skip"): - self.__delattr__("rbr_skip") - - self.inference_mode = True - - def _get_kernel_bias(self) -> tuple[torch.Tensor, torch.Tensor]: - """Obtain the re-parameterized kernel and bias. - Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 - - :return: Tuple of (kernel, bias) after fusing branches. - """ - # get weights and bias of scale branch - kernel_scale = 0 - bias_scale = 0 - if self.rbr_scale is not None: - kernel_scale, bias_scale = self._fuse_bn_tensor(self.rbr_scale) - # Pad scale branch kernel to match conv branch kernel size. - pad = self.kernel_size // 2 - kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) - - # get weights and bias of skip branch - kernel_identity = 0 - bias_identity = 0 - if self.rbr_skip is not None: - kernel_identity, bias_identity = self._fuse_bn_tensor(self.rbr_skip) - - # get weights and bias of conv branches - kernel_conv = 0 - bias_conv = 0 - for ix in range(self.num_conv_branches): - _kernel, _bias = self._fuse_bn_tensor(self.rbr_conv[ix]) - kernel_conv += _kernel - bias_conv += _bias - - kernel_final = kernel_conv + kernel_scale + kernel_identity - bias_final = bias_conv + bias_scale + bias_identity - return kernel_final, bias_final - - def _fuse_bn_tensor(self, branch) -> tuple[torch.Tensor, torch.Tensor]: - """Fuse batchnorm layer with preceeding conv layer. - Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 - - :param branch: - :return: Tuple of (kernel, bias) after fusing batchnorm. - """ - if isinstance(branch, nn.Sequential): - kernel = branch.conv.weight - running_mean = branch.bn.running_mean - running_var = branch.bn.running_var - gamma = branch.bn.weight - beta = branch.bn.bias - eps = branch.bn.eps - else: - assert isinstance(branch, nn.BatchNorm2d) - if not hasattr(self, "id_tensor"): - input_dim = self.in_channels // self.groups - kernel_value = torch.zeros( - (self.in_channels, input_dim, self.kernel_size, self.kernel_size), - dtype=branch.weight.dtype, - device=branch.weight.device, - ) - for i in range(self.in_channels): - kernel_value[ - i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2 - ] = 1 - self.id_tensor = kernel_value - kernel = self.id_tensor - running_mean = branch.running_mean - running_var = branch.running_var - gamma = branch.weight - beta = branch.bias - eps = branch.eps - std = (running_var + eps).sqrt() - t = (gamma / std).reshape(-1, 1, 1, 1) - return kernel * t, beta - running_mean * gamma / std - - def _conv_bn(self, kernel_size: int, padding: int) -> nn.Sequential: - """Construct conv-batchnorm layers. - - :param kernel_size: Size of the convolution kernel. - :param padding: Zero-padding size. - :return: Conv-BN module. - """ - mod_list = nn.Sequential() - mod_list.add_module( - "conv", - nn.Conv2d( - in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=kernel_size, - stride=self.stride, - padding=padding, - groups=self.groups, - bias=False, - ), - ) - mod_list.add_module("bn", nn.BatchNorm2d(num_features=self.out_channels)) - return mod_list - - -class MobileOne(nn.Module, EncoderMixin): - """MobileOne Model - - Pytorch implementation of `An Improved One millisecond Mobile Backbone` - - https://arxiv.org/pdf/2206.04040.pdf - """ - - def __init__( - self, - out_channels, - num_blocks_per_stage: Sequence[int] = (2, 8, 10, 1), - width_multipliers: Optional[list[float]] = None, - inference_mode: bool = False, - use_se: bool = False, - depth=5, - in_channels=3, - num_conv_branches: int = 1, - ) -> None: - """Construct MobileOne model. - - :param num_blocks_per_stage: List of number of blocks per stage. - :param num_classes: Number of classes in the dataset. - :param width_multipliers: List of width multiplier for blocks in a stage. - :param inference_mode: If True, instantiates model in inference mode. - :param use_se: Whether to use SE-ReLU activations. - :param num_conv_branches: Number of linear conv branches. - """ - super().__init__() - - assert len(width_multipliers) == 4 - self.inference_mode = inference_mode - self._out_channels = out_channels - self.in_planes = min(64, int(64 * width_multipliers[0])) - self.use_se = use_se - self.num_conv_branches = num_conv_branches - self._depth = depth - self._in_channels = in_channels - self.set_in_channels(self._in_channels) - - # Build stages - self.stage0 = MobileOneBlock( - in_channels=self._in_channels, - out_channels=self.in_planes, - kernel_size=3, - stride=2, - padding=1, - inference_mode=self.inference_mode, - ) - self.cur_layer_idx = 1 - self.stage1 = self._make_stage( - int(64 * width_multipliers[0]), num_blocks_per_stage[0], num_se_blocks=0 - ) - self.stage2 = self._make_stage( - int(128 * width_multipliers[1]), num_blocks_per_stage[1], num_se_blocks=0 - ) - self.stage3 = self._make_stage( - int(256 * width_multipliers[2]), - num_blocks_per_stage[2], - num_se_blocks=int(num_blocks_per_stage[2] // 2) if use_se else 0, - ) - self.stage4 = self._make_stage( - int(512 * width_multipliers[3]), - num_blocks_per_stage[3], - num_se_blocks=num_blocks_per_stage[3] if use_se else 0, - ) - - def get_stages(self): - return [ - nn.Identity(), - self.stage0, - self.stage1, - self.stage2, - self.stage3, - self.stage4, - ] - - def _make_stage( - self, planes: int, num_blocks: int, num_se_blocks: int - ) -> nn.Sequential: - """Build a stage of MobileOne model. - - :param planes: Number of output channels. - :param num_blocks: Number of blocks in this stage. - :param num_se_blocks: Number of SE blocks in this stage. - :return: A stage of MobileOne model. - """ - # Get strides for all layers - strides = [2] + [1] * (num_blocks - 1) - blocks = [] - for ix, stride in enumerate(strides): - use_se = False - if num_se_blocks > num_blocks: - raise ValueError( - "Number of SE blocks cannot " "exceed number of layers." - ) - if ix >= (num_blocks - num_se_blocks): - use_se = True - - # Depthwise conv - blocks.append( - MobileOneBlock( - in_channels=self.in_planes, - out_channels=self.in_planes, - kernel_size=3, - stride=stride, - padding=1, - groups=self.in_planes, - inference_mode=self.inference_mode, - use_se=use_se, - num_conv_branches=self.num_conv_branches, - ) - ) - # Pointwise conv - blocks.append( - MobileOneBlock( - in_channels=self.in_planes, - out_channels=planes, - kernel_size=1, - stride=1, - padding=0, - groups=1, - inference_mode=self.inference_mode, - use_se=use_se, - num_conv_branches=self.num_conv_branches, - ) - ) - self.in_planes = planes - self.cur_layer_idx += 1 - return nn.Sequential(*blocks) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """Apply forward pass.""" - stages = self.get_stages() - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("linear.weight", None) - state_dict.pop("linear.bias", None) - super().load_state_dict(state_dict, **kwargs) - - def set_in_channels(self, in_channels, pretrained=True): - """Change first convolution channels""" - if in_channels == 3: - return - - self._in_channels = in_channels - self._out_channels = tuple([in_channels] + list(self._out_channels)[1:]) - utils.patch_first_conv( - model=self.stage0.rbr_conv, - new_in_channels=in_channels, - pretrained=pretrained, - ) - utils.patch_first_conv( - model=self.stage0.rbr_scale, - new_in_channels=in_channels, - pretrained=pretrained, - ) - - -def reparameterize_model(model: torch.nn.Module) -> nn.Module: - """Return a model where a multi-branched structure - used in training is re-parameterized into a single branch - for inference. - - :param model: MobileOne model in train mode. - :return: MobileOne model in inference mode. - """ - # Avoid editing original graph - model = copy.deepcopy(model) - for module in model.modules(): - if hasattr(module, "reparameterize"): - module.reparameterize() - return model - - -mobileone_encoders = { - "mobileone_s0": { - "encoder": MobileOne, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://docs-assets.developer.apple.com/ml-research/datasets/mobileone/mobileone_s0_unfused.pth.tar", # noqa - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": { - "out_channels": (3, 48, 48, 128, 256, 1024), - "width_multipliers": (0.75, 1.0, 1.0, 2.0), - "num_conv_branches": 4, - "inference_mode": False, - }, - }, - "mobileone_s1": { - "encoder": MobileOne, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://docs-assets.developer.apple.com/ml-research/datasets/mobileone/mobileone_s1_unfused.pth.tar", # noqa - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": { - "out_channels": (3, 64, 96, 192, 512, 1280), - "width_multipliers": (1.5, 1.5, 2.0, 2.5), - "inference_mode": False, - }, - }, - "mobileone_s2": { - "encoder": MobileOne, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://docs-assets.developer.apple.com/ml-research/datasets/mobileone/mobileone_s2_unfused.pth.tar", # noqa - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": { - "out_channels": (3, 64, 96, 256, 640, 2048), - "width_multipliers": (1.5, 2.0, 2.5, 4.0), - "inference_mode": False, - }, - }, - "mobileone_s3": { - "encoder": MobileOne, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://docs-assets.developer.apple.com/ml-research/datasets/mobileone/mobileone_s3_unfused.pth.tar", # noqa - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": { - "out_channels": (3, 64, 128, 320, 768, 2048), - "width_multipliers": (2.0, 2.5, 3.0, 4.0), - "inference_mode": False, - }, - }, - "mobileone_s4": { - "encoder": MobileOne, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://docs-assets.developer.apple.com/ml-research/datasets/mobileone/mobileone_s4_unfused.pth.tar", # noqa - "input_space": "RGB", - "input_range": [0, 1], - } - }, - "params": { - "out_channels": (3, 64, 192, 448, 896, 2048), - "width_multipliers": (3.0, 3.5, 3.5, 4.0), - "use_se": True, - "inference_mode": False, - }, - }, -} diff --git a/torchseg/encoders/resnet.py b/torchseg/encoders/resnet.py deleted file mode 100644 index 9767e9ee..00000000 --- a/torchseg/encoders/resnet.py +++ /dev/null @@ -1,211 +0,0 @@ -from copy import deepcopy - -import torch.nn as nn -from pretrainedmodels.models.torchvision_models import pretrained_settings -from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet - -from ._base import EncoderMixin - - -class ResNetEncoder(ResNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.fc - del self.avgpool - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv1, self.bn1, self.relu), - nn.Sequential(self.maxpool, self.layer1), - self.layer2, - self.layer3, - self.layer4, - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("fc.bias", None) - state_dict.pop("fc.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -new_settings = { - "resnet18": { - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth", # noqa: E501 - }, - "resnet50": { - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth", # noqa: E501 - }, - "resnext50_32x4d": { - "imagenet": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth", - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth", # noqa: E501 - }, - "resnext101_32x4d": { - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth", # noqa: E501 - }, - "resnext101_32x8d": { - "imagenet": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth", - "instagram": "https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth", # noqa: E501 - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth", # noqa: E501 - }, - "resnext101_32x16d": { - "instagram": "https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth", # noqa: E501 - "ssl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth", # noqa: E501 - "swsl": "https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth", # noqa: E501 - }, - "resnext101_32x32d": { - "instagram": "https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth" # noqa: E501 - }, - "resnext101_32x48d": { - "instagram": "https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth" # noqa: E501 - }, -} - -pretrained_settings = deepcopy(pretrained_settings) -for model_name, sources in new_settings.items(): - if model_name not in pretrained_settings: - pretrained_settings[model_name] = {} - - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_size": [3, 224, 224], - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - - -resnet_encoders = { - "resnet18": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnet18"], - "params": { - "out_channels": (3, 64, 64, 128, 256, 512), - "block": BasicBlock, - "layers": [2, 2, 2, 2], - }, - }, - "resnet34": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnet34"], - "params": { - "out_channels": (3, 64, 64, 128, 256, 512), - "block": BasicBlock, - "layers": [3, 4, 6, 3], - }, - }, - "resnet50": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnet50"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 6, 3], - }, - }, - "resnet101": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnet101"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - }, - }, - "resnet152": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnet152"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 8, 36, 3], - }, - }, - "resnext50_32x4d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext50_32x4d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 6, 3], - "groups": 32, - "width_per_group": 4, - }, - }, - "resnext101_32x4d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext101_32x4d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - "groups": 32, - "width_per_group": 4, - }, - }, - "resnext101_32x8d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext101_32x8d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - "groups": 32, - "width_per_group": 8, - }, - }, - "resnext101_32x16d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext101_32x16d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - "groups": 32, - "width_per_group": 16, - }, - }, - "resnext101_32x32d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext101_32x32d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - "groups": 32, - "width_per_group": 32, - }, - }, - "resnext101_32x48d": { - "encoder": ResNetEncoder, - "pretrained_settings": pretrained_settings["resnext101_32x48d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottleneck, - "layers": [3, 4, 23, 3], - "groups": 32, - "width_per_group": 48, - }, - }, -} diff --git a/torchseg/encoders/senet.py b/torchseg/encoders/senet.py deleted file mode 100644 index 87110abf..00000000 --- a/torchseg/encoders/senet.py +++ /dev/null @@ -1,149 +0,0 @@ -import torch.nn as nn -from pretrainedmodels.models.senet import ( - SEBottleneck, - SENet, - SEResNetBottleneck, - SEResNeXtBottleneck, - pretrained_settings, -) - -from ._base import EncoderMixin - - -class SENetEncoder(SENet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - del self.last_linear - del self.avg_pool - - def get_stages(self): - return [ - nn.Identity(), - self.layer0[:-1], - nn.Sequential(self.layer0[-1], self.layer1), - self.layer2, - self.layer3, - self.layer4, - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("last_linear.bias", None) - state_dict.pop("last_linear.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -senet_encoders = { - "senet154": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["senet154"], - "params": { - "out_channels": (3, 128, 256, 512, 1024, 2048), - "block": SEBottleneck, - "dropout_p": 0.2, - "groups": 64, - "layers": [3, 8, 36, 3], - "num_classes": 1000, - "reduction": 16, - }, - }, - "se_resnet50": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["se_resnet50"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SEResNetBottleneck, - "layers": [3, 4, 6, 3], - "downsample_kernel_size": 1, - "downsample_padding": 0, - "dropout_p": None, - "groups": 1, - "inplanes": 64, - "input_3x3": False, - "num_classes": 1000, - "reduction": 16, - }, - }, - "se_resnet101": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["se_resnet101"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SEResNetBottleneck, - "layers": [3, 4, 23, 3], - "downsample_kernel_size": 1, - "downsample_padding": 0, - "dropout_p": None, - "groups": 1, - "inplanes": 64, - "input_3x3": False, - "num_classes": 1000, - "reduction": 16, - }, - }, - "se_resnet152": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["se_resnet152"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SEResNetBottleneck, - "layers": [3, 8, 36, 3], - "downsample_kernel_size": 1, - "downsample_padding": 0, - "dropout_p": None, - "groups": 1, - "inplanes": 64, - "input_3x3": False, - "num_classes": 1000, - "reduction": 16, - }, - }, - "se_resnext50_32x4d": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["se_resnext50_32x4d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SEResNeXtBottleneck, - "layers": [3, 4, 6, 3], - "downsample_kernel_size": 1, - "downsample_padding": 0, - "dropout_p": None, - "groups": 32, - "inplanes": 64, - "input_3x3": False, - "num_classes": 1000, - "reduction": 16, - }, - }, - "se_resnext101_32x4d": { - "encoder": SENetEncoder, - "pretrained_settings": pretrained_settings["se_resnext101_32x4d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SEResNeXtBottleneck, - "layers": [3, 4, 23, 3], - "downsample_kernel_size": 1, - "downsample_padding": 0, - "dropout_p": None, - "groups": 32, - "inplanes": 64, - "input_3x3": False, - "num_classes": 1000, - "reduction": 16, - }, - }, -} diff --git a/torchseg/encoders/supported.py b/torchseg/encoders/supported.py new file mode 100644 index 00000000..decdb6bc --- /dev/null +++ b/torchseg/encoders/supported.py @@ -0,0 +1,5025 @@ +# 743/1017 support features_only=True +TIMM_ENCODERS = { + "bat_resnext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "botnet26t_256": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "botnet50ts_256": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "caformer_b36": { + "channels": [128, 256, 512, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "caformer_m36": { + "channels": [96, 192, 384, 576], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "caformer_s18": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "caformer_s36": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "coatnet_0_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_0_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_1_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_1_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_2_224": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_2_rw_224": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_3_224": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_3_rw_224": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_4_224": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_5_224": { + "channels": [192, 256, 512, 1280, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_bn_0_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_nano_cc_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_nano_rw_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_pico_rw_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_0_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_1_rw2_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_1_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_2_rw_224": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_2_rw_384": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_3_rw_224": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnet_rmlp_nano_rw_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "coatnext_nano_rw_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "convformer_b36": { + "channels": [128, 256, 512, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "convformer_m36": { + "channels": [96, 192, 384, 576], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "convformer_s18": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "convformer_s36": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "convnext_atto": { + "channels": [40, 80, 160, 320], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_atto_ols": { + "channels": [40, 80, 160, 320], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_base": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_femto": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_femto_ols": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_large": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_large_mlp": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_nano": { + "channels": [80, 160, 320, 640], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_nano_ols": { + "channels": [80, 160, 320, 640], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_pico": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_pico_ols": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_small": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_tiny": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_tiny_hnf": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_xlarge": { + "channels": [256, 512, 1024, 2048], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnext_xxlarge": { + "channels": [384, 768, 1536, 3072], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_atto": { + "channels": [40, 80, 160, 320], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_base": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_femto": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_huge": { + "channels": [352, 704, 1408, 2816], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_large": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_nano": { + "channels": [80, 160, 320, 640], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_pico": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_small": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "convnextv2_tiny": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "cs3darknet_focus_l": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_focus_m": { + "channels": [48, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_focus_s": { + "channels": [32, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_focus_x": { + "channels": [80, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_l": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_m": { + "channels": [48, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_s": { + "channels": [32, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3darknet_x": { + "channels": [80, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3edgenet_x": { + "channels": [80, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3se_edgenet_x": { + "channels": [80, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3sedarknet_l": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3sedarknet_x": { + "channels": [80, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cs3sedarknet_xdw": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cspdarknet53": { + "channels": [32, 64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "stem.conv1", + "stages.0", + "stages.1", + "stages.2", + "stages.3", + "stages.4", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "cspresnet50": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cspresnet50d": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cspresnet50w": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "cspresnext50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "darknet17": { + "channels": [32, 64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "stem.conv1", + "stages.0", + "stages.1", + "stages.2", + "stages.3", + "stages.4", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "darknet21": { + "channels": [32, 64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "stem.conv1", + "stages.0", + "stages.1", + "stages.2", + "stages.3", + "stages.4", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "darknet53": { + "channels": [32, 64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "stem.conv1", + "stages.0", + "stages.1", + "stages.2", + "stages.3", + "stages.4", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "darknetaa53": { + "channels": [32, 64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "stem.conv1", + "stages.0", + "stages.1", + "stages.2", + "stages.3", + "stages.4", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "davit_base": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "davit_giant": { + "channels": [384, 768, 1536, 3072], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "davit_huge": { + "channels": [256, 512, 1024, 2048], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "davit_large": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "davit_small": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "davit_tiny": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "densenet121": { + "channels": [64, 256, 512, 1024, 1024], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm0", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "densenet161": { + "channels": [96, 384, 768, 2112, 2208], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm0", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "densenet169": { + "channels": [64, 256, 512, 1280, 1664], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm0", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "densenet201": { + "channels": [64, 256, 512, 1792, 1920], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm0", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "densenet264d": { + "channels": [96, 384, 768, 3456, 4032], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm2", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "densenetblur121d": { + "channels": [64, 256, 512, 1024, 1024], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.norm2", + "features.denseblock1", + "features.denseblock2", + "features.denseblock3", + "features.norm5", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dla102": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla102x": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla102x2": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla169": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla34": { + "channels": [32, 64, 128, 256, 512], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla46_c": { + "channels": [32, 64, 64, 128, 256], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla46x_c": { + "channels": [32, 64, 64, 128, 256], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla60": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla60_res2net": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla60_res2next": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla60x": { + "channels": [32, 128, 256, 512, 1024], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dla60x_c": { + "channels": [32, 64, 64, 128, 256], + "indices": (1, 2, 3, 4, 5), + "module": ["level1", "level2", "level3", "level4", "level5"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f0": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f1": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f2": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f3": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f4": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f5": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dm_nfnet_f6": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn107": { + "channels": [128, 376, 1152, 2432, 2688], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_4", + "features.conv3_8", + "features.conv4_20", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn131": { + "channels": [128, 352, 832, 1984, 2688], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_4", + "features.conv3_8", + "features.conv4_28", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn48b": { + "channels": [10, 144, 320, 512, 832], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_3", + "features.conv3_4", + "features.conv4_6", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn68": { + "channels": [10, 144, 320, 704, 832], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_3", + "features.conv3_4", + "features.conv4_12", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn68b": { + "channels": [10, 144, 320, 704, 832], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_3", + "features.conv3_4", + "features.conv4_12", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn92": { + "channels": [64, 336, 704, 1552, 2688], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_3", + "features.conv3_4", + "features.conv4_20", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "dpn98": { + "channels": [96, 336, 768, 1728, 2688], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.conv1_1", + "features.conv2_3", + "features.conv3_6", + "features.conv4_20", + "features.conv5_3", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_botnext26ts_256": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_halonext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_nfnet_l0": { + "channels": [64, 256, 512, 1536, 2304], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_nfnet_l1": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_nfnet_l2": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_nfnet_l3": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_resnet33ts": { + "channels": [32, 256, 512, 1536, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_resnext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "eca_vovnet39b": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet101d_pruned": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet200d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet269d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet26t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet50d_pruned": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnet50t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnetlight": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnext26t_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "ecaresnext50t_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "edgenext_base": { + "channels": [80, 160, 288, 584], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "edgenext_small": { + "channels": [48, 96, 160, 304], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "edgenext_small_rw": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "edgenext_x_small": { + "channels": [32, 64, 100, 192], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "edgenext_xx_small": { + "channels": [24, 48, 88, 168], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientformerv2_l": { + "channels": [40, 80, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientformerv2_s0": { + "channels": [32, 48, 96, 176], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientformerv2_s1": { + "channels": [32, 48, 120, 224], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientformerv2_s2": { + "channels": [32, 64, 144, 288], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientnet_b0": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b0_g16_evos": { + "channels": [16, 32, 48, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b0_g8_gn": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b0_gn": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b1": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b1_pruned": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b2": { + "channels": [16, 24, 48, 120, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b2_pruned": { + "channels": [16, 24, 48, 120, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b3": { + "channels": [24, 32, 48, 136, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b3_g8_gn": { + "channels": [32, 32, 48, 128, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b3_gn": { + "channels": [32, 32, 48, 128, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b3_pruned": { + "channels": [24, 32, 48, 136, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b4": { + "channels": [24, 32, 56, 160, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b5": { + "channels": [24, 40, 64, 176, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b6": { + "channels": [32, 40, 72, 200, 576], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b7": { + "channels": [32, 48, 80, 224, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_b8": { + "channels": [32, 56, 88, 248, 704], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_cc_b0_4e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_cc_b0_8e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_cc_b1_8e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_el": { + "channels": [32, 40, 56, 176, 232], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_el_pruned": { + "channels": [32, 40, 56, 176, 232], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_em": { + "channels": [24, 32, 48, 144, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_es": { + "channels": [24, 32, 48, 144, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_es_pruned": { + "channels": [24, 32, 48, 144, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_l2": { + "channels": [72, 104, 176, 480, 1376], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_lite0": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_lite1": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_lite2": { + "channels": [16, 24, 48, 120, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_lite3": { + "channels": [24, 32, 48, 136, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnet_lite4": { + "channels": [24, 32, 56, 160, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_l": { + "channels": [32, 64, 96, 224, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_m": { + "channels": [24, 48, 80, 176, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_rw_m": { + "channels": [32, 56, 80, 192, 328], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_rw_s": { + "channels": [24, 48, 64, 160, 272], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_rw_t": { + "channels": [24, 40, 48, 128, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_s": { + "channels": [24, 48, 64, 160, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientnetv2_xl": { + "channels": [32, 64, 96, 256, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "efficientvit_b0": { + "channels": [16, 32, 64, 128], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_b1": { + "channels": [32, 64, 128, 256], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_b2": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_b3": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_l1": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_l2": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_l3": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "efficientvit_m0": { + "channels": [64, 128, 192], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "efficientvit_m1": { + "channels": [128, 144, 192], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "efficientvit_m2": { + "channels": [128, 192, 224], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "efficientvit_m3": { + "channels": [128, 240, 320], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "efficientvit_m4": { + "channels": [128, 256, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "efficientvit_m5": { + "channels": [192, 288, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "ese_vovnet19b_dw": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet19b_slim": { + "channels": [64, 112, 256, 384, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet19b_slim_dw": { + "channels": [64, 112, 256, 384, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet39b": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet39b_evos": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet57b": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "ese_vovnet99b": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "fastvit_ma36": { + "channels": [76, 152, 304, 608], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_s12": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_sa12": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_sa24": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_sa36": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_t12": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fastvit_t8": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "fbnetc_100": { + "channels": [16, 24, 32, 112, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "fbnetv3_b": { + "channels": [16, 24, 40, 120, 1344], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "fbnetv3_d": { + "channels": [16, 24, 40, 128, 1440], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "fbnetv3_g": { + "channels": [24, 40, 56, 160, 1728], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "focalnet_base_lrf": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_base_srf": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_huge_fl3": { + "channels": [352, 704, 1408, 2816], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_huge_fl4": { + "channels": [352, 704, 1408, 2816], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_large_fl3": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_large_fl4": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_small_lrf": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_small_srf": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_tiny_lrf": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_tiny_srf": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_xlarge_fl3": { + "channels": [256, 512, 1024, 2048], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "focalnet_xlarge_fl4": { + "channels": [256, 512, 1024, 2048], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "gc_efficientnetv2_rw_t": { + "channels": [24, 40, 48, 128, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "gcresnet33ts": { + "channels": [32, 256, 512, 1536, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gcresnet50t": { + "channels": [32, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gcresnext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gcresnext50ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gernet_l": { + "channels": [32, 128, 192, 640, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gernet_m": { + "channels": [32, 128, 192, 640, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "gernet_s": { + "channels": [13, 48, 48, 384, 1920], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnet_050": { + "channels": [8, 12, 20, 40, 80], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnet_100": { + "channels": [16, 24, 40, 80, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnet_130": { + "channels": [20, 32, 52, 104, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnetv2_100": { + "channels": [16, 24, 40, 80, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnetv2_130": { + "channels": [20, 32, 52, 104, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "ghostnetv2_160": { + "channels": [24, 40, 64, 128, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "halo2botnet50ts_256": { + "channels": [32, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "halonet26t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "halonet50ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "halonet_h1": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "haloregnetz_b": { + "channels": [32, 48, 96, 192, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_a": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_b": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_c": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_d": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_e": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hardcorenas_f": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w18": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w18_small": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w18_small_v2": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w18_ssld": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w30": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w32": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w40": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w44": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w48": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w48_ssld": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "hrnet_w64": { + "channels": [64, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stage1", "stage2", "stage3", "stage4"], + "reduction": [2, 4, 8, 16, 32], + }, + "inception_next_base": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "inception_next_small": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "inception_next_tiny": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "inception_resnet_v2": { + "channels": [64, 192, 320, 1088, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["conv2d_2b", "conv2d_4a", "repeat", "repeat_1", "conv2d_7b"], + "reduction": [2, 4, 8, 16, 32], + }, + "inception_v3": { + "channels": [64, 192, 288, 768, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "Conv2d_2b_3x3", + "Conv2d_4a_3x3", + "Mixed_5d", + "Mixed_6e", + "Mixed_7c", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "inception_v4": { + "channels": [64, 160, 384, 1024, 1536], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.2", + "features.3", + "features.9", + "features.17", + "features.21", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "lambda_resnet26rpt_256": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "lambda_resnet26t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "lambda_resnet50ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "lamhalobotnet50ts_256": { + "channels": [32, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "lcnet_035": { + "channels": [16, 24, 48, 88, 176], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "lcnet_050": { + "channels": [16, 32, 64, 128, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "lcnet_075": { + "channels": [24, 48, 96, 192, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "lcnet_100": { + "channels": [32, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "lcnet_150": { + "channels": [48, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_senet154": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnet152": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnet18": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnet34": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnext101_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnext26_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_seresnext50_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["layer0", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "legacy_xception": { + "channels": [64, 128, 256, 728, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act2", "block2.rep.0", "block3.rep.0", "block12.rep.0", "act4"], + "reduction": [2, 4, 8, 16, 32], + }, + "levit_conv_128": { + "channels": [128, 256, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_128s": { + "channels": [128, 256, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_192": { + "channels": [192, 288, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_256": { + "channels": [256, 384, 512], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_256d": { + "channels": [256, 384, 512], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_384": { + "channels": [384, 512, 768], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_384_s8": { + "channels": [384, 512, 768], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [8, 16, 32], + }, + "levit_conv_512": { + "channels": [512, 768, 1024], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "levit_conv_512_s8": { + "channels": [512, 640, 896], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [8, 16, 32], + }, + "levit_conv_512d": { + "channels": [512, 640, 768], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [16, 32, 64], + }, + "maxvit_base_tf_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_base_tf_384": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_base_tf_512": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_large_tf_224": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_large_tf_384": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_large_tf_512": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_nano_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_pico_rw_256": { + "channels": [32, 32, 64, 128, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_base_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_base_rw_384": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_nano_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_pico_rw_256": { + "channels": [32, 32, 64, 128, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_small_rw_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_small_rw_256": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_rmlp_tiny_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_small_tf_224": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_small_tf_384": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_small_tf_512": { + "channels": [64, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_pm_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_rw_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_tf_224": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_tf_384": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_tiny_tf_512": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_xlarge_tf_224": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_xlarge_tf_384": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxvit_xlarge_tf_512": { + "channels": [192, 192, 384, 768, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvit_rmlp_nano_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvit_rmlp_small_rw_256": { + "channels": [96, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvit_rmlp_tiny_rw_256": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvitv2_nano_rw_256": { + "channels": [96, 96, 192, 384, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvitv2_rmlp_base_rw_224": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvitv2_rmlp_base_rw_384": { + "channels": [128, 128, 256, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "maxxvitv2_rmlp_large_rw_224": { + "channels": [160, 160, 320, 640, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "mixnet_l": { + "channels": [32, 40, 56, 160, 264], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mixnet_m": { + "channels": [24, 32, 40, 120, 200], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mixnet_s": { + "channels": [16, 24, 40, 120, 200], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mixnet_xl": { + "channels": [40, 48, 64, 192, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mixnet_xxl": { + "channels": [56, 80, 96, 288, 480], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mnasnet_050": { + "channels": [8, 16, 24, 48, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mnasnet_075": { + "channels": [16, 24, 32, 72, 240], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mnasnet_100": { + "channels": [16, 24, 40, 96, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mnasnet_140": { + "channels": [24, 32, 56, 136, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mnasnet_small": { + "channels": [8, 16, 16, 32, 144], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_035": { + "channels": [8, 8, 16, 32, 112], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_050": { + "channels": [8, 16, 16, 48, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_075": { + "channels": [16, 24, 24, 72, 240], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_100": { + "channels": [16, 24, 32, 96, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_110d": { + "channels": [16, 24, 32, 104, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_120d": { + "channels": [24, 32, 40, 112, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv2_140": { + "channels": [24, 32, 48, 136, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_large_075": { + "channels": [16, 24, 32, 88, 720], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_large_100": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_rw": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_small_050": { + "channels": [16, 8, 16, 24, 288], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_small_075": { + "channels": [16, 16, 24, 40, 432], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilenetv3_small_100": { + "channels": [16, 16, 24, 48, 576], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobileone_s0": { + "channels": [48, 48, 128, 256, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobileone_s1": { + "channels": [64, 96, 192, 512, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobileone_s2": { + "channels": [64, 96, 256, 640, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobileone_s3": { + "channels": [64, 128, 320, 768, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobileone_s4": { + "channels": [64, 192, 448, 896, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevit_s": { + "channels": [32, 64, 96, 128, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevit_xs": { + "channels": [32, 48, 64, 80, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevit_xxs": { + "channels": [16, 24, 48, 64, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_050": { + "channels": [32, 64, 128, 192, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_075": { + "channels": [48, 96, 192, 288, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_100": { + "channels": [64, 128, 256, 384, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_125": { + "channels": [80, 160, 320, 480, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_150": { + "channels": [96, 192, 384, 576, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_175": { + "channels": [112, 224, 448, 672, 896], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "mobilevitv2_200": { + "channels": [128, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stages.0", "stages.1", "stages.2", "stages.3", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nasnetalarge": { + "channels": [96, 168, 1008, 2016, 4032], + "indices": (0, 1, 2, 3, 4), + "module": [ + "conv0", + "cell_stem_1.conv_1x1.act", + "reduction_cell_0.conv_1x1.act", + "reduction_cell_1.conv_1x1.act", + "act", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "nest_base": { + "channels": [128, 256, 512], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nest_base_jx": { + "channels": [128, 256, 512], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nest_small": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nest_small_jx": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nest_tiny": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nest_tiny_jx": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["levels.0", "levels.1", "levels.2"], + "reduction": [4, 8, 16], + }, + "nf_ecaresnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_ecaresnet26": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_ecaresnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b0": { + "channels": [40, 40, 80, 160, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b1": { + "channels": [40, 40, 80, 160, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b2": { + "channels": [40, 40, 88, 176, 1064], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b3": { + "channels": [40, 40, 96, 184, 1152], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b4": { + "channels": [48, 48, 112, 216, 1344], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_regnet_b5": { + "channels": [64, 64, 128, 256, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_resnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_resnet26": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_resnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_seresnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_seresnet26": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nf_seresnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f0": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f1": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f2": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f3": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f4": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f5": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f6": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_f7": { + "channels": [64, 256, 512, 1536, 3072], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "nfnet_l0": { + "channels": [64, 256, 512, 1536, 2304], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "pit_b_224": { + "channels": [256, 512, 1024], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [6, 12, 24], + }, + "pit_b_distilled_224": { + "channels": [256, 512, 1024], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [6, 12, 24], + }, + "pit_s_224": { + "channels": [144, 288, 576], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pit_s_distilled_224": { + "channels": [144, 288, 576], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pit_ti_224": { + "channels": [64, 128, 256], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pit_ti_distilled_224": { + "channels": [64, 128, 256], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pit_xs_224": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pit_xs_distilled_224": { + "channels": [96, 192, 384], + "indices": (0, 1, 2), + "module": ["transformers.0", "transformers.1", "transformers.2"], + "reduction": [7, 14, 28], + }, + "pnasnet5large": { + "channels": [96, 270, 1080, 2160, 4320], + "indices": (0, 1, 2, 3, 4), + "module": [ + "conv_0", + "cell_stem_1.conv_1x1.act", + "cell_4.conv_1x1.act", + "cell_8.conv_1x1.act", + "act", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "poolformer_m36": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformer_m48": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformer_s12": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformer_s24": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformer_s36": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformerv2_m36": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformerv2_m48": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformerv2_s12": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformerv2_s24": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "poolformerv2_s36": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 2, 2, 2], + }, + "pvt_v2_b0": { + "channels": [32, 64, 160, 256], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b1": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b2": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b2_li": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b3": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b4": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "pvt_v2_b5": { + "channels": [64, 128, 320, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "regnetv_040": { + "channels": [32, 128, 192, 512, 1088], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetv_064": { + "channels": [32, 144, 288, 576, 1296], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_002": { + "channels": [32, 24, 56, 152, 368], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_004": { + "channels": [32, 32, 64, 160, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_004_tv": { + "channels": [32, 32, 64, 160, 400], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_006": { + "channels": [32, 48, 96, 240, 528], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_008": { + "channels": [32, 64, 128, 288, 672], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_016": { + "channels": [32, 72, 168, 408, 912], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_032": { + "channels": [32, 96, 192, 432, 1008], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_040": { + "channels": [32, 80, 240, 560, 1360], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_064": { + "channels": [32, 168, 392, 784, 1624], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_080": { + "channels": [32, 80, 240, 720, 1920], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_120": { + "channels": [32, 224, 448, 896, 2240], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_160": { + "channels": [32, 256, 512, 896, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetx_320": { + "channels": [32, 336, 672, 1344, 2520], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_002": { + "channels": [32, 24, 56, 152, 368], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_004": { + "channels": [32, 48, 104, 208, 440], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_006": { + "channels": [32, 48, 112, 256, 608], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_008": { + "channels": [32, 64, 128, 320, 768], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_008_tv": { + "channels": [32, 64, 144, 320, 784], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_016": { + "channels": [32, 48, 120, 336, 888], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_032": { + "channels": [32, 72, 216, 576, 1512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_040": { + "channels": [32, 128, 192, 512, 1088], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_040_sgn": { + "channels": [32, 128, 192, 512, 1088], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_064": { + "channels": [32, 144, 288, 576, 1296], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_080": { + "channels": [32, 168, 448, 896, 2016], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_080_tv": { + "channels": [32, 224, 448, 896, 2016], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_120": { + "channels": [32, 224, 448, 896, 2240], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_1280": { + "channels": [32, 528, 1056, 2904, 7392], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_160": { + "channels": [32, 224, 448, 1232, 3024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_2560": { + "channels": [32, 746, 1492, 4103, 10444], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_320": { + "channels": [32, 232, 696, 1392, 3712], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnety_640": { + "channels": [32, 328, 984, 1968, 4920], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_005": { + "channels": [32, 16, 40, 104, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_040": { + "channels": [32, 48, 104, 240, 528], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_040_h": { + "channels": [32, 48, 104, 240, 528], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "s1", "s2", "s3", "s4"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_b16": { + "channels": [32, 48, 96, 192, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_b16_evos": { + "channels": [32, 48, 96, 192, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_c16": { + "channels": [32, 48, 96, 192, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_c16_evos": { + "channels": [32, 48, 96, 192, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_d32": { + "channels": [32, 64, 128, 256, 1792], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_d8": { + "channels": [32, 64, 128, 256, 1792], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_d8_evos": { + "channels": [64, 64, 128, 256, 1792], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "regnetz_e8": { + "channels": [32, 96, 192, 384, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_050": { + "channels": [8, 12, 20, 40, 80], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_058": { + "channels": [12, 16, 24, 48, 92], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_080": { + "channels": [12, 20, 32, 64, 128], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_100": { + "channels": [16, 24, 40, 80, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_111": { + "channels": [16, 28, 44, 88, 176], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_130": { + "channels": [20, 32, 52, 104, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_150": { + "channels": [24, 36, 60, 120, 240], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repghostnet_200": { + "channels": [32, 48, 80, 160, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["conv_stem", "blocks.1", "blocks.3", "blocks.5", "blocks.7"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_a0": { + "channels": [48, 48, 96, 192, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_a1": { + "channels": [64, 64, 128, 256, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_a2": { + "channels": [64, 96, 192, 384, 1408], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b0": { + "channels": [64, 64, 128, 256, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b1": { + "channels": [64, 128, 256, 512, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b1g4": { + "channels": [64, 128, 256, 512, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b2": { + "channels": [64, 160, 320, 640, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b2g4": { + "channels": [64, 160, 320, 640, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b3": { + "channels": [64, 192, 384, 768, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_b3g4": { + "channels": [64, 192, 384, 768, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvgg_d2se": { + "channels": [64, 160, 320, 640, 2560], + "indices": (0, 1, 2, 3, 4), + "module": ["stem", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "repvit_m0_9": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m1": { + "channels": [48, 96, 192, 384], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m1_0": { + "channels": [56, 112, 224, 448], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m1_1": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m1_5": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m2": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m2_3": { + "channels": [80, 160, 320, 640], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "repvit_m3": { + "channels": [64, 128, 256, 512], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "res2net101_26w_4s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50_14w_8s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50_26w_4s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50_26w_6s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50_26w_8s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50_48w_2s": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2net50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "res2next50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest101e": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest14d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest200e": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest269e": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest26d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest50d_1s4x24d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnest50d_4s2x40d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet101c": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet101s": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet10t": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet14t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet152": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet152c": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet152d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet152s": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet18": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet18d": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet200": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet200d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet26": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet26d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet26t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet32ts": { + "channels": [32, 256, 512, 1536, 1536], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet33ts": { + "channels": [32, 256, 512, 1536, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet34": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet34d": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50_gn": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50c": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50s": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet50t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet51q": { + "channels": [64, 256, 512, 1536, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnet61q": { + "channels": [64, 256, 512, 1536, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetaa101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetaa34d": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetaa50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetaa50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetblur101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetblur18": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetblur50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetblur50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs152": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs200": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs270": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs350": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs420": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetrs50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_101d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_101x1_bit": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_101x3_bit": { + "channels": [192, 768, 1536, 3072, 6144], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_152": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_152d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_152x2_bit": { + "channels": [128, 512, 1024, 2048, 4096], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_152x4_bit": { + "channels": [256, 1024, 2048, 4096, 8192], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50d_evos": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50d_frn": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50d_gn": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50x1_bit": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnetv2_50x3_bit": { + "channels": [192, 768, 1536, 3072, 6144], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext101_32x16d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext101_32x32d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext101_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext101_32x8d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext101_64x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext50_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "resnext50d_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnet_100": { + "channels": [16, 38, 61, 128, 185], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnet_130": { + "channels": [21, 50, 79, 167, 240], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnet_150": { + "channels": [24, 58, 92, 193, 277], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnet_200": { + "channels": [32, 77, 122, 257, 370], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnet_300": { + "channels": [48, 116, 183, 386, 554], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnetr_100": { + "channels": [16, 40, 64, 128, 184], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnetr_130": { + "channels": [24, 48, 80, 168, 240], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnetr_150": { + "channels": [24, 56, 96, 192, 280], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnetr_200": { + "channels": [32, 80, 120, 256, 368], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "rexnetr_300": { + "channels": [48, 112, 176, 384, 560], + "indices": (0, 1, 2, 3, 4), + "module": [ + "features.0", + "features.2", + "features.4", + "features.10", + "features.15", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "sebotnet33ts_256": { + "channels": [32, 256, 512, 1024, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "sedarknet21": { + "channels": [32, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [1, 2, 4, 8, 16], + }, + "sehalonet33ts": { + "channels": [32, 256, 512, 1024, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "selecsls42": { + "channels": [32, 128, 288, 480, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.2", "features.1", "features.3", "features.5", "head.1"], + "reduction": [2, 4, 8, 16, 32], + }, + "selecsls42b": { + "channels": [32, 128, 288, 480, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.2", "features.1", "features.3", "features.5", "head.1"], + "reduction": [2, 4, 8, 16, 32], + }, + "selecsls60": { + "channels": [32, 128, 288, 416, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.2", "features.1", "features.4", "features.8", "head.1"], + "reduction": [2, 4, 8, 16, 32], + }, + "selecsls60b": { + "channels": [32, 128, 288, 416, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.2", "features.1", "features.4", "features.8", "head.1"], + "reduction": [2, 4, 8, 16, 32], + }, + "selecsls84": { + "channels": [32, 144, 304, 512, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.2", "features.1", "features.6", "features.12", "head.1"], + "reduction": [2, 4, 8, 16, 32], + }, + "semnasnet_050": { + "channels": [8, 16, 24, 56, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "semnasnet_075": { + "channels": [16, 24, 32, 88, 240], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "semnasnet_100": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "semnasnet_140": { + "channels": [24, 32, 56, 160, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "senet154": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "sequencer2d_l": { + "channels": [192, 384, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [7, 14, 14], + }, + "sequencer2d_m": { + "channels": [192, 384, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [7, 14, 14], + }, + "sequencer2d_s": { + "channels": [192, 384, 384], + "indices": (0, 1, 2), + "module": ["stages.0", "stages.1", "stages.2"], + "reduction": [7, 14, 14], + }, + "seresnet101": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet152": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet152d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet18": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet200d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet269d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet33ts": { + "channels": [32, 256, 512, 1536, 1280], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv2", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet34": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnet50t": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnetaa50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext101_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext101_32x8d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext101_64x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext101d_32x8d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext26d_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext26t_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext26ts": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.conv3", "stages.0", "stages.1", "stages.2", "final_conv"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnext50_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnextaa101d_32x8d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "seresnextaa201d_32x8d": { + "channels": [128, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "skresnet18": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "skresnet34": { + "channels": [64, 64, 128, 256, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "skresnet50": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "skresnet50d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "skresnext50_32x4d": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "spnasnet_100": { + "channels": [16, 24, 40, 96, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "swin_base_patch4_window12_384": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_base_patch4_window7_224": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_large_patch4_window12_384": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_large_patch4_window7_224": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_s3_base_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_s3_small_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_s3_tiny_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_small_patch4_window7_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swin_tiny_patch4_window7_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_base_window12_192": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_base_window12to16_192to256": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_base_window12to24_192to384": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_base_window16_256": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_base_window8_256": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_base_224": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_base_384": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_base_ns_224": { + "channels": [128, 256, 512, 1024], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_giant_224": { + "channels": [512, 1024, 2048, 4096], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_giant_384": { + "channels": [512, 1024, 2048, 4096], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_huge_224": { + "channels": [352, 704, 1408, 2816], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_huge_384": { + "channels": [352, 704, 1408, 2816], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_large_224": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_large_384": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_small_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_small_384": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_small_ns_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_small_ns_256": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_tiny_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_tiny_384": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_cr_tiny_ns_224": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_large_window12_192": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_large_window12to16_192to256": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_large_window12to24_192to384": { + "channels": [192, 384, 768, 1536], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_small_window16_256": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_small_window8_256": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_tiny_window16_256": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "swinv2_tiny_window8_256": { + "channels": [96, 192, 384, 768], + "indices": (0, 1, 2, 3), + "module": ["layers.0", "layers.1", "layers.2", "layers.3"], + "reduction": [4, 8, 16, 32], + }, + "tf_efficientnet_b0": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b1": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b2": { + "channels": [16, 24, 48, 120, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b3": { + "channels": [24, 32, 48, 136, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b4": { + "channels": [24, 32, 56, 160, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b5": { + "channels": [24, 40, 64, 176, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b6": { + "channels": [32, 40, 72, 200, 576], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b7": { + "channels": [32, 48, 80, 224, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_b8": { + "channels": [32, 56, 88, 248, 704], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_cc_b0_4e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_cc_b0_8e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_cc_b1_8e": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_el": { + "channels": [32, 40, 56, 176, 232], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_em": { + "channels": [24, 32, 48, 144, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_es": { + "channels": [24, 32, 48, 144, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_l2": { + "channels": [72, 104, 176, 480, 1376], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_lite0": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_lite1": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_lite2": { + "channels": [16, 24, 48, 120, 352], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_lite3": { + "channels": [24, 32, 48, 136, 384], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnet_lite4": { + "channels": [24, 32, 56, 160, 448], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_b0": { + "channels": [16, 32, 48, 112, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_b1": { + "channels": [16, 32, 48, 112, 192], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_b2": { + "channels": [16, 32, 56, 120, 208], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_b3": { + "channels": [16, 40, 56, 136, 232], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_l": { + "channels": [32, 64, 96, 224, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_m": { + "channels": [24, 48, 80, 176, 512], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_s": { + "channels": [24, 48, 64, 160, 256], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_efficientnetv2_xl": { + "channels": [32, 64, 96, 256, 640], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mixnet_l": { + "channels": [32, 40, 56, 160, 264], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mixnet_m": { + "channels": [24, 32, 40, 120, 200], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mixnet_s": { + "channels": [16, 24, 40, 120, 200], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_large_075": { + "channels": [16, 24, 32, 88, 720], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_large_100": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_large_minimal_100": { + "channels": [16, 24, 40, 112, 960], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_small_075": { + "channels": [16, 16, 24, 40, 432], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_small_100": { + "channels": [16, 16, 24, 48, 576], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tf_mobilenetv3_small_minimal_100": { + "channels": [16, 16, 24, 48, 576], + "indices": (0, 1, 2, 3, 4), + "module": ["bn1", "blocks.0", "blocks.1", "blocks.3", "blocks.5"], + "reduction": [2, 4, 8, 16, 32], + }, + "tiny_vit_11m_224": { + "channels": [64, 128, 256, 448], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "tiny_vit_21m_224": { + "channels": [96, 192, 384, 576], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "tiny_vit_21m_384": { + "channels": [96, 192, 384, 576], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "tiny_vit_21m_512": { + "channels": [96, 192, 384, 576], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "tiny_vit_5m_224": { + "channels": [64, 128, 160, 320], + "indices": (0, 1, 2, 3), + "module": ["stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [4, 8, 16, 32], + }, + "tinynet_a": { + "channels": [16, 24, 40, 112, 320], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tinynet_b": { + "channels": [16, 24, 32, 88, 240], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tinynet_c": { + "channels": [8, 16, 24, 64, 176], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tinynet_d": { + "channels": [8, 16, 24, 64, 176], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tinynet_e": { + "channels": [8, 16, 24, 56, 160], + "indices": (0, 1, 2, 3, 4), + "module": ["blocks.0", "blocks.1", "blocks.2", "blocks.4", "blocks.6"], + "reduction": [2, 4, 8, 16, 32], + }, + "tresnet_l": { + "channels": [76, 152, 1216, 2432], + "indices": (1, 2, 3, 4), + "module": ["body.layer1", "body.layer2", "body.layer3", "body.layer4"], + "reduction": [4, 8, 16, 32], + }, + "tresnet_m": { + "channels": [64, 128, 1024, 2048], + "indices": (1, 2, 3, 4), + "module": ["body.layer1", "body.layer2", "body.layer3", "body.layer4"], + "reduction": [4, 8, 16, 32], + }, + "tresnet_v2_l": { + "channels": [256, 512, 1024, 2048], + "indices": (1, 2, 3, 4), + "module": ["body.layer1", "body.layer2", "body.layer3", "body.layer4"], + "reduction": [4, 8, 16, 32], + }, + "tresnet_xl": { + "channels": [83, 166, 1328, 2656], + "indices": (1, 2, 3, 4), + "module": ["body.layer1", "body.layer2", "body.layer3", "body.layer4"], + "reduction": [4, 8, 16, 32], + }, + "vgg11": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.1", + "features.4", + "features.9", + "features.14", + "features.19", + "features.20", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg11_bn": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.2", + "features.6", + "features.13", + "features.20", + "features.27", + "features.28", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg13": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.3", + "features.8", + "features.13", + "features.18", + "features.23", + "features.24", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg13_bn": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.5", + "features.12", + "features.19", + "features.26", + "features.33", + "features.34", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg16": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.3", + "features.8", + "features.15", + "features.22", + "features.29", + "features.30", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg16_bn": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.5", + "features.12", + "features.22", + "features.32", + "features.42", + "features.43", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg19": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.3", + "features.8", + "features.17", + "features.26", + "features.35", + "features.36", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vgg19_bn": { + "channels": [64, 128, 256, 512, 512, 512], + "indices": (0, 1, 2, 3, 4, 5), + "module": [ + "features.5", + "features.12", + "features.25", + "features.38", + "features.51", + "features.52", + ], + "reduction": [1, 2, 4, 8, 16, 32], + }, + "vovnet39a": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "vovnet57a": { + "channels": [64, 256, 512, 768, 1024], + "indices": (0, 1, 2, 3, 4), + "module": ["stem.1", "stages.0", "stages.1", "stages.2", "stages.3"], + "reduction": [2, 4, 8, 16, 32], + }, + "wide_resnet101_2": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "wide_resnet50_2": { + "channels": [64, 256, 512, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": ["act1", "layer1", "layer2", "layer3", "layer4"], + "reduction": [2, 4, 8, 16, 32], + }, + "xception41": { + "channels": [128, 256, 728, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "blocks.0.stack.act3", + "blocks.1.stack.act3", + "blocks.2.stack.act3", + "blocks.11.stack.act3", + "blocks.12", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "xception41p": { + "channels": [128, 256, 728, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "blocks.0.stack.conv2", + "blocks.1.stack.conv2", + "blocks.2.stack.conv2", + "blocks.11.stack.conv2", + "blocks.12", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "xception65": { + "channels": [128, 256, 728, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "blocks.0.stack.act3", + "blocks.1.stack.act3", + "blocks.2.stack.act3", + "blocks.19.stack.act3", + "blocks.20", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "xception65p": { + "channels": [128, 256, 728, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "blocks.0.stack.conv2", + "blocks.1.stack.conv2", + "blocks.2.stack.conv2", + "blocks.19.stack.conv2", + "blocks.20", + ], + "reduction": [2, 4, 8, 16, 32], + }, + "xception71": { + "channels": [128, 256, 728, 1024, 2048], + "indices": (0, 1, 2, 3, 4), + "module": [ + "blocks.0.stack.act3", + "blocks.2.stack.act3", + "blocks.4.stack.act3", + "blocks.21.stack.act3", + "blocks.22", + ], + "reduction": [2, 4, 8, 16, 32], + }, +} + +# 109/1017 support get_intermediate_layers +TIMM_VIT_ENCODERS = [ + "deit3_base_patch16_224", + "deit3_base_patch16_384", + "deit3_huge_patch14_224", + "deit3_large_patch16_224", + "deit3_large_patch16_384", + "deit3_medium_patch16_224", + "deit3_small_patch16_224", + "deit3_small_patch16_384", + "deit_base_distilled_patch16_224", + "deit_base_distilled_patch16_384", + "deit_base_patch16_224", + "deit_base_patch16_384", + "deit_small_distilled_patch16_224", + "deit_small_patch16_224", + "deit_tiny_distilled_patch16_224", + "deit_tiny_patch16_224", + "eva_large_patch14_196", + "eva_large_patch14_336", + "flexivit_base", + "flexivit_large", + "flexivit_small", + "vit_base_patch8_224", + "vit_base_patch14_dinov2", + "vit_base_patch14_reg4_dinov2", + "vit_base_patch16_18x2_224", + "vit_base_patch16_224", + "vit_base_patch16_224_miil", + "vit_base_patch16_384", + "vit_base_patch16_clip_224", + "vit_base_patch16_clip_384", + "vit_base_patch16_clip_quickgelu_224", + "vit_base_patch16_gap_224", + "vit_base_patch16_plus_240", + "vit_base_patch16_reg8_gap_256", + "vit_base_patch16_rpn_224", + "vit_base_patch16_siglip_224", + "vit_base_patch16_siglip_256", + "vit_base_patch16_siglip_384", + "vit_base_patch16_siglip_512", + "vit_base_patch16_xp_224", + "vit_base_patch32_224", + "vit_base_patch32_384", + "vit_base_patch32_clip_224", + "vit_base_patch32_clip_256", + "vit_base_patch32_clip_384", + "vit_base_patch32_clip_448", + "vit_base_patch32_clip_quickgelu_224", + "vit_base_patch32_plus_256", + "vit_base_r26_s32_224", + "vit_base_r50_s16_224", + "vit_base_r50_s16_384", + "vit_base_resnet26d_224", + "vit_base_resnet50d_224", + "vit_giant_patch14_224", + "vit_giant_patch14_clip_224", + "vit_giant_patch14_dinov2", + "vit_giant_patch14_reg4_dinov2", + "vit_giant_patch16_gap_224", + "vit_gigantic_patch14_224", + "vit_gigantic_patch14_clip_224", + "vit_huge_patch14_224", + "vit_huge_patch14_clip_224", + "vit_huge_patch14_clip_336", + "vit_huge_patch14_clip_378", + "vit_huge_patch14_clip_quickgelu_224", + "vit_huge_patch14_clip_quickgelu_378", + "vit_huge_patch14_gap_224", + "vit_huge_patch14_xp_224", + "vit_huge_patch16_gap_448", + "vit_large_patch14_224", + "vit_large_patch14_clip_224", + "vit_large_patch14_clip_336", + "vit_large_patch14_clip_quickgelu_224", + "vit_large_patch14_clip_quickgelu_336", + "vit_large_patch14_dinov2", + "vit_large_patch14_reg4_dinov2", + "vit_large_patch14_xp_224", + "vit_large_patch16_224", + "vit_large_patch16_384", + "vit_large_patch16_siglip_256", + "vit_large_patch16_siglip_384", + "vit_large_patch32_224", + "vit_large_patch32_384", + "vit_large_r50_s32_224", + "vit_large_r50_s32_384", + "vit_medium_patch16_gap_240", + "vit_medium_patch16_gap_256", + "vit_medium_patch16_gap_384", + "vit_medium_patch16_reg4_256", + "vit_medium_patch16_reg4_gap_256", + "vit_small_patch8_224", + "vit_small_patch14_dinov2", + "vit_small_patch14_reg4_dinov2", + "vit_small_patch16_18x2_224", + "vit_small_patch16_36x1_224", + "vit_small_patch16_224", + "vit_small_patch16_384", + "vit_small_patch32_224", + "vit_small_patch32_384", + "vit_small_r26_s32_224", + "vit_small_r26_s32_384", + "vit_small_resnet26d_224", + "vit_small_resnet50d_s16_224", + "vit_so400m_patch14_siglip_224", + "vit_so400m_patch14_siglip_384", + "vit_tiny_patch16_224", + "vit_tiny_patch16_384", + "vit_tiny_r_s16_p8_224", + "vit_tiny_r_s16_p8_384", +] + +UNSUPPORTED_ENCODERS = [ + "beit_base_patch16_224", + "beit_base_patch16_384", + "beit_large_patch16_224", + "beit_large_patch16_384", + "beit_large_patch16_512", + "beitv2_base_patch16_224", + "beitv2_large_patch16_224", + "cait_m36_384", + "cait_m48_448", + "cait_s24_224", + "cait_s24_384", + "cait_s36_384", + "cait_xs24_384", + "cait_xxs24_224", + "cait_xxs24_384", + "cait_xxs36_224", + "cait_xxs36_384", + "coat_lite_medium", + "coat_lite_medium_384", + "coat_lite_mini", + "coat_lite_small", + "coat_lite_tiny", + "coat_mini", + "coat_small", + "coat_tiny", + "convit_base", + "convit_small", + "convit_tiny", + "convmixer_768_32", + "convmixer_1024_20_ks9_p14", + "convmixer_1536_20", + "crossvit_9_240", + "crossvit_9_dagger_240", + "crossvit_15_240", + "crossvit_15_dagger_240", + "crossvit_15_dagger_408", + "crossvit_18_240", + "crossvit_18_dagger_240", + "crossvit_18_dagger_408", + "crossvit_base_240", + "crossvit_small_240", + "crossvit_tiny_240", + "efficientformer_l1", + "efficientformer_l3", + "efficientformer_l7", + "eva02_base_patch14_224", + "eva02_base_patch14_448", + "eva02_base_patch16_clip_224", + "eva02_enormous_patch14_clip_224", + "eva02_large_patch14_224", + "eva02_large_patch14_448", + "eva02_large_patch14_clip_224", + "eva02_large_patch14_clip_336", + "eva02_small_patch14_224", + "eva02_small_patch14_336", + "eva02_tiny_patch14_224", + "eva02_tiny_patch14_336", + "eva_giant_patch14_224", + "eva_giant_patch14_336", + "eva_giant_patch14_560", + "eva_giant_patch14_clip_224", + "gcvit_base", + "gcvit_small", + "gcvit_tiny", + "gcvit_xtiny", + "gcvit_xxtiny", + "gmixer_12_224", + "gmixer_24_224", + "gmlp_b16_224", + "gmlp_s16_224", + "gmlp_ti16_224", + "levit_128", + "levit_128s", + "levit_192", + "levit_256", + "levit_256d", + "levit_384", + "levit_384_s8", + "levit_512", + "levit_512_s8", + "levit_512d", + "mixer_b16_224", + "mixer_b32_224", + "mixer_l16_224", + "mixer_l32_224", + "mixer_s16_224", + "mixer_s32_224", + "mvitv2_base", + "mvitv2_base_cls", + "mvitv2_huge_cls", + "mvitv2_large", + "mvitv2_large_cls", + "mvitv2_small", + "mvitv2_small_cls", + "mvitv2_tiny", + "resmlp_12_224", + "resmlp_24_224", + "resmlp_36_224", + "resmlp_big_24_224", + "samvit_base_patch16", + "samvit_base_patch16_224", + "samvit_huge_patch16", + "samvit_large_patch16", + "tnt_b_patch16_224", + "tnt_s_patch16_224", + "twins_pcpvt_base", + "twins_pcpvt_large", + "twins_pcpvt_small", + "twins_svt_base", + "twins_svt_large", + "twins_svt_small", + "visformer_small", + "visformer_tiny", + "vit_relpos_base_patch16_224", + "vit_relpos_base_patch16_cls_224", + "vit_relpos_base_patch16_clsgap_224", + "vit_relpos_base_patch16_plus_240", + "vit_relpos_base_patch16_rpn_224", + "vit_relpos_base_patch32_plus_rpn_256", + "vit_relpos_medium_patch16_224", + "vit_relpos_medium_patch16_cls_224", + "vit_relpos_medium_patch16_rpn_224", + "vit_relpos_small_patch16_224", + "vit_relpos_small_patch16_rpn_224", + "vit_srelpos_medium_patch16_224", + "vit_srelpos_small_patch16_224", + "volo_d1_224", + "volo_d1_384", + "volo_d2_224", + "volo_d2_384", + "volo_d3_224", + "volo_d3_448", + "volo_d4_224", + "volo_d4_448", + "volo_d5_224", + "volo_d5_448", + "volo_d5_512", + "xcit_large_24_p8_224", + "xcit_large_24_p8_384", + "xcit_large_24_p16_224", + "xcit_large_24_p16_384", + "xcit_medium_24_p8_224", + "xcit_medium_24_p8_384", + "xcit_medium_24_p16_224", + "xcit_medium_24_p16_384", + "xcit_nano_12_p8_224", + "xcit_nano_12_p8_384", + "xcit_nano_12_p16_224", + "xcit_nano_12_p16_384", + "xcit_small_12_p8_224", + "xcit_small_12_p8_384", + "xcit_small_12_p16_224", + "xcit_small_12_p16_384", + "xcit_small_24_p8_224", + "xcit_small_24_p8_384", + "xcit_small_24_p16_224", + "xcit_small_24_p16_384", + "xcit_tiny_12_p8_224", + "xcit_tiny_12_p8_384", + "xcit_tiny_12_p16_224", + "xcit_tiny_12_p16_384", + "xcit_tiny_24_p8_224", + "xcit_tiny_24_p8_384", + "xcit_tiny_24_p16_224", + "xcit_tiny_24_p16_384", +] diff --git a/torchseg/encoders/timm.py b/torchseg/encoders/timm.py new file mode 100644 index 00000000..1304554f --- /dev/null +++ b/torchseg/encoders/timm.py @@ -0,0 +1,147 @@ +import timm +import torch.nn as nn +from einops import rearrange + +from .supported import TIMM_ENCODERS + + +class TimmEncoder(nn.Module): + def __init__( + self, + name, + pretrained=True, + in_channels=3, + depth=None, + indices=None, + output_stride=None, + **kwargs, + ): + super().__init__() + + assert ( + depth is not None or indices is not None + ), "Either `depth` or `indices` should be specified" + + if indices is not None: + depth = len(indices) + else: + indices = tuple(range(depth)) + + total_depth = TIMM_ENCODERS[name.split(".")[0]]["indices"] + if len(total_depth) < depth: + err = f""" + The specified depth={depth}or indices={indices} is greater than + the maximum available depth={total_depth} for the {name} encoder. + """ + raise ValueError(err) + + params = dict( + in_chans=in_channels, + features_only=True, + pretrained=pretrained, + out_indices=indices, + ) + + if output_stride is not None: + params["output_stride"] = output_stride + + if "densenet" in name and "output_stride" in params: + params.pop("output_stride") + + params.update(kwargs) + + self.model = timm.create_model(name, **params) + self.in_channels = in_channels + self.indices = indices + self.depth = depth + self.output_stride = 32 if output_stride is None else output_stride + self.out_channels = [self.in_channels] + self.model.feature_info.channels() + self.reductions = [1] + self.model.feature_info.reduction() + self.fix_padding() + + def fix_padding(self): + """ + Some models like inceptionv4 or inceptionresnetv2 3x3 kernels with no padding + resulting in odd numbered feature height/width dimensions. Update padding=1 + """ + for m in self.model.modules(): + if isinstance(m, nn.Conv2d): + if m.kernel_size == (3, 3) and m.padding == (0, 0): + m.padding = (1, 1) + if isinstance(m, nn.MaxPool2d) and m.padding == 0: + m.padding = 1 + + def forward(self, x): + features = self.model(x) + features = [x] + features + + # Check for swin-like models returning features as channels last + for i in range(len(features)): + if ( + not features[i].shape[1] == self.out_channels[i] + and features[i].shape[-1] == self.out_channels[i] + ): + features[i] = rearrange(features[i], "b h w c -> b c h w") + + return features + + +class TimmViTEncoder(nn.Module): + def __init__( + self, + name, + pretrained=True, + in_channels=3, + depth=None, + indices=None, + norm=True, + scale_factors=None, + **kwargs, + ): + super().__init__() + + assert ( + depth is not None or indices is not None + ), "Either `depth` or `indices` should be specified" + + if indices is not None: + depth = len(indices) + else: + indices = tuple(range(depth)) + + params = dict(in_chans=in_channels, pretrained=pretrained) + params.update(kwargs) + + self.model = timm.create_model(name, **params) + self.in_channels = in_channels + self.indices = indices + self.depth = depth + self.out_channels = [in_channels] + [self.model.num_features] * self.depth + self.patch_size = self.model.patch_embed.patch_size[0] + self.image_size = self.model.patch_embed.img_size[0] + self.num_tokens = (self.image_size // self.patch_size) ** 2 + self.output_stride = self.patch_size + self.reductions = [1] + [self.patch_size] * self.depth + self.norm = norm + + if scale_factors is not None: + err = "`scale_factors` must be the same length as `depth`. Got {len(scale_factors)} != {self.depth}" # noqa: E501 + assert len(scale_factors) == self.depth, err + self.scale_factors = scale_factors + else: + self.scale_factors = [1] * self.depth + + self.upsample = nn.ModuleList( + [ + nn.UpsamplingBilinear2d(scale_factor=scale) + for scale in self.scale_factors + ] + ) + + def forward(self, x): + features = self.model.get_intermediate_layers( + x, n=self.indices, reshape=True, return_prefix_tokens=False, norm=self.norm + ) + features = [up(feat) for up, feat in zip(self.upsample, features)] + features = [x] + features + return features diff --git a/torchseg/encoders/timm_efficientnet.py b/torchseg/encoders/timm_efficientnet.py deleted file mode 100644 index 8516a7d3..00000000 --- a/torchseg/encoders/timm_efficientnet.py +++ /dev/null @@ -1,459 +0,0 @@ -from functools import partial - -import torch.nn as nn -from timm.layers.activations import Swish -from timm.models.efficientnet import ( - EfficientNet, - decode_arch_def, - default_cfgs, - round_channels, -) - -from ._base import EncoderMixin - - -def get_efficientnet_kwargs( - channel_multiplier=1.0, depth_multiplier=1.0, drop_rate=0.2 -): - """Create EfficientNet model. - Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py # noqa: E501 - Paper: https://arxiv.org/abs/1905.11946 - EfficientNet params - name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) - 'efficientnet-b0': (1.0, 1.0, 224, 0.2), - 'efficientnet-b1': (1.0, 1.1, 240, 0.2), - 'efficientnet-b2': (1.1, 1.2, 260, 0.3), - 'efficientnet-b3': (1.2, 1.4, 300, 0.3), - 'efficientnet-b4': (1.4, 1.8, 380, 0.4), - 'efficientnet-b5': (1.6, 2.2, 456, 0.4), - 'efficientnet-b6': (1.8, 2.6, 528, 0.5), - 'efficientnet-b7': (2.0, 3.1, 600, 0.5), - 'efficientnet-b8': (2.2, 3.6, 672, 0.5), - 'efficientnet-l2': (4.3, 5.3, 800, 0.5), - Args: - channel_multiplier: multiplier to number of channels per layer - depth_multiplier: multiplier to number of repeats per stage - """ - arch_def = [ - ["ds_r1_k3_s1_e1_c16_se0.25"], - ["ir_r2_k3_s2_e6_c24_se0.25"], - ["ir_r2_k5_s2_e6_c40_se0.25"], - ["ir_r3_k3_s2_e6_c80_se0.25"], - ["ir_r3_k5_s1_e6_c112_se0.25"], - ["ir_r4_k5_s2_e6_c192_se0.25"], - ["ir_r1_k3_s1_e6_c320_se0.25"], - ] - model_kwargs = dict( - block_args=decode_arch_def(arch_def, depth_multiplier), - num_features=round_channels(1280, channel_multiplier, 8, None), - stem_size=32, - round_chs_fn=partial(round_channels, multiplier=channel_multiplier), - act_layer=Swish, - drop_rate=drop_rate, - drop_path_rate=0.2, - ) - return model_kwargs - - -def gen_efficientnet_lite_kwargs( - channel_multiplier=1.0, depth_multiplier=1.0, drop_rate=0.2 -): - """EfficientNet-Lite model. - - Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite # noqa: E501 - Paper: https://arxiv.org/abs/1905.11946 - - EfficientNet params - name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) - 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), - 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), - 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), - 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), - 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), - - Args: - channel_multiplier: multiplier to number of channels per layer - depth_multiplier: multiplier to number of repeats per stage - """ - arch_def = [ - ["ds_r1_k3_s1_e1_c16"], - ["ir_r2_k3_s2_e6_c24"], - ["ir_r2_k5_s2_e6_c40"], - ["ir_r3_k3_s2_e6_c80"], - ["ir_r3_k5_s1_e6_c112"], - ["ir_r4_k5_s2_e6_c192"], - ["ir_r1_k3_s1_e6_c320"], - ] - model_kwargs = dict( - block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), - num_features=1280, - stem_size=32, - fix_stem=True, - round_chs_fn=partial(round_channels, multiplier=channel_multiplier), - act_layer=nn.ReLU6, - drop_rate=drop_rate, - drop_path_rate=0.2, - ) - return model_kwargs - - -class EfficientNetBaseEncoder(EfficientNet, EncoderMixin): - def __init__(self, stage_idxs, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - - self._stage_idxs = stage_idxs - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - del self.classifier - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv_stem, self.bn1), - self.blocks[: self._stage_idxs[0]], - self.blocks[self._stage_idxs[0] : self._stage_idxs[1]], - self.blocks[self._stage_idxs[1] : self._stage_idxs[2]], - self.blocks[self._stage_idxs[2] :], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("classifier.bias", None) - state_dict.pop("classifier.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -class EfficientNetEncoder(EfficientNetBaseEncoder): - def __init__( - self, - stage_idxs, - out_channels, - depth=5, - channel_multiplier=1.0, - depth_multiplier=1.0, - drop_rate=0.2, - ): - kwargs = get_efficientnet_kwargs( - channel_multiplier, depth_multiplier, drop_rate - ) - super().__init__(stage_idxs, out_channels, depth, **kwargs) - - -class EfficientNetLiteEncoder(EfficientNetBaseEncoder): - def __init__( - self, - stage_idxs, - out_channels, - depth=5, - channel_multiplier=1.0, - depth_multiplier=1.0, - drop_rate=0.2, - ): - kwargs = gen_efficientnet_lite_kwargs( - channel_multiplier, depth_multiplier, drop_rate - ) - super().__init__(stage_idxs, out_channels, depth, **kwargs) - - -def prepare_settings(settings): - return { - "mean": settings.mean, - "std": settings.std, - "url": settings.url, - "input_range": (0, 1), - "input_space": "RGB", - } - - -timm_efficientnet_encoders = { - "timm-efficientnet-b0": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b0"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b0"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b0"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.0, - "depth_multiplier": 1.0, - "drop_rate": 0.2, - }, - }, - "timm-efficientnet-b1": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b1"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b1"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b1"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.0, - "depth_multiplier": 1.1, - "drop_rate": 0.2, - }, - }, - "timm-efficientnet-b2": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b2"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b2"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b2"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 32, 24, 48, 120, 352), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.1, - "depth_multiplier": 1.2, - "drop_rate": 0.3, - }, - }, - "timm-efficientnet-b3": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b3"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b3"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b3"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 40, 32, 48, 136, 384), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.2, - "depth_multiplier": 1.4, - "drop_rate": 0.3, - }, - }, - "timm-efficientnet-b4": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b4"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b4"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b4"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 48, 32, 56, 160, 448), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.4, - "depth_multiplier": 1.8, - "drop_rate": 0.4, - }, - }, - "timm-efficientnet-b5": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b5"].cfgs["in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b5"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b5"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 48, 40, 64, 176, 512), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.6, - "depth_multiplier": 2.2, - "drop_rate": 0.4, - }, - }, - "timm-efficientnet-b6": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b6"].cfgs["aa_in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b6"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b6"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 56, 40, 72, 200, 576), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.8, - "depth_multiplier": 2.6, - "drop_rate": 0.5, - }, - }, - "timm-efficientnet-b7": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b7"].cfgs["aa_in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b7"].cfgs["ap_in1k"] - ), - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_b7"].cfgs["ns_jft_in1k"] - ), - }, - "params": { - "out_channels": (3, 64, 48, 80, 224, 640), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 2.0, - "depth_multiplier": 3.1, - "drop_rate": 0.5, - }, - }, - "timm-efficientnet-b8": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_b8"].cfgs["ra_in1k"] - ), - "advprop": prepare_settings( - default_cfgs["tf_efficientnet_b8"].cfgs["ap_in1k"] - ), - }, - "params": { - "out_channels": (3, 72, 56, 88, 248, 704), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 2.2, - "depth_multiplier": 3.6, - "drop_rate": 0.5, - }, - }, - "timm-efficientnet-l2": { - "encoder": EfficientNetEncoder, - "pretrained_settings": { - "noisy-student": prepare_settings( - default_cfgs["tf_efficientnet_l2"].cfgs["ns_jft_in1k"] - ), - "noisy-student-475": prepare_settings( - default_cfgs["tf_efficientnet_l2"].cfgs["ns_jft_in1k_475"] - ), - }, - "params": { - "out_channels": (3, 136, 104, 176, 480, 1376), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 4.3, - "depth_multiplier": 5.3, - "drop_rate": 0.5, - }, - }, - "timm-tf_efficientnet_lite0": { - "encoder": EfficientNetLiteEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_lite0"].cfgs["in1k"] - ) - }, - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.0, - "depth_multiplier": 1.0, - "drop_rate": 0.2, - }, - }, - "timm-tf_efficientnet_lite1": { - "encoder": EfficientNetLiteEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_lite1"].cfgs["in1k"] - ) - }, - "params": { - "out_channels": (3, 32, 24, 40, 112, 320), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.0, - "depth_multiplier": 1.1, - "drop_rate": 0.2, - }, - }, - "timm-tf_efficientnet_lite2": { - "encoder": EfficientNetLiteEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_lite2"].cfgs["in1k"] - ) - }, - "params": { - "out_channels": (3, 32, 24, 48, 120, 352), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.1, - "depth_multiplier": 1.2, - "drop_rate": 0.3, - }, - }, - "timm-tf_efficientnet_lite3": { - "encoder": EfficientNetLiteEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_lite3"].cfgs["in1k"] - ) - }, - "params": { - "out_channels": (3, 32, 32, 48, 136, 384), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.2, - "depth_multiplier": 1.4, - "drop_rate": 0.3, - }, - }, - "timm-tf_efficientnet_lite4": { - "encoder": EfficientNetLiteEncoder, - "pretrained_settings": { - "imagenet": prepare_settings( - default_cfgs["tf_efficientnet_lite4"].cfgs["in1k"] - ) - }, - "params": { - "out_channels": (3, 32, 32, 56, 160, 448), - "stage_idxs": (2, 3, 5), - "channel_multiplier": 1.4, - "depth_multiplier": 1.8, - "drop_rate": 0.4, - }, - }, -} diff --git a/torchseg/encoders/timm_gernet.py b/torchseg/encoders/timm_gernet.py deleted file mode 100644 index 345f72dc..00000000 --- a/torchseg/encoders/timm_gernet.py +++ /dev/null @@ -1,124 +0,0 @@ -import torch.nn as nn -from timm.models import ByoBlockCfg, ByobNet, ByoModelCfg - -from ._base import EncoderMixin - - -class GERNetEncoder(ByobNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.head - - def get_stages(self): - return [ - nn.Identity(), - self.stem, - self.stages[0], - self.stages[1], - self.stages[2], - nn.Sequential(self.stages[3], self.stages[4], self.final_conv), - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("head.fc.weight", None) - state_dict.pop("head.fc.bias", None) - super().load_state_dict(state_dict, **kwargs) - - -regnet_weights = { - "timm-gernet_s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth" # noqa - }, - "timm-gernet_m": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth" # noqa - }, - "timm-gernet_l": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth" # noqa - }, -} - -pretrained_settings = {} -for model_name, sources in regnet_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - -timm_gernet_encoders = { - "timm-gernet_s": { - "encoder": GERNetEncoder, - "pretrained_settings": pretrained_settings["timm-gernet_s"], - "params": { - "out_channels": (3, 13, 48, 48, 384, 1920), - "cfg": ByoModelCfg( - blocks=( - ByoBlockCfg(type="basic", d=1, c=48, s=2, gs=0, br=1.0), - ByoBlockCfg(type="basic", d=3, c=48, s=2, gs=0, br=1.0), - ByoBlockCfg(type="bottle", d=7, c=384, s=2, gs=0, br=1 / 4), - ByoBlockCfg(type="bottle", d=2, c=560, s=2, gs=1, br=3.0), - ByoBlockCfg(type="bottle", d=1, c=256, s=1, gs=1, br=3.0), - ), - stem_chs=13, - stem_pool=None, - num_features=1920, - ), - }, - }, - "timm-gernet_m": { - "encoder": GERNetEncoder, - "pretrained_settings": pretrained_settings["timm-gernet_m"], - "params": { - "out_channels": (3, 32, 128, 192, 640, 2560), - "cfg": ByoModelCfg( - blocks=( - ByoBlockCfg(type="basic", d=1, c=128, s=2, gs=0, br=1.0), - ByoBlockCfg(type="basic", d=2, c=192, s=2, gs=0, br=1.0), - ByoBlockCfg(type="bottle", d=6, c=640, s=2, gs=0, br=1 / 4), - ByoBlockCfg(type="bottle", d=4, c=640, s=2, gs=1, br=3.0), - ByoBlockCfg(type="bottle", d=1, c=640, s=1, gs=1, br=3.0), - ), - stem_chs=32, - stem_pool=None, - num_features=2560, - ), - }, - }, - "timm-gernet_l": { - "encoder": GERNetEncoder, - "pretrained_settings": pretrained_settings["timm-gernet_l"], - "params": { - "out_channels": (3, 32, 128, 192, 640, 2560), - "cfg": ByoModelCfg( - blocks=( - ByoBlockCfg(type="basic", d=1, c=128, s=2, gs=0, br=1.0), - ByoBlockCfg(type="basic", d=2, c=192, s=2, gs=0, br=1.0), - ByoBlockCfg(type="bottle", d=6, c=640, s=2, gs=0, br=1 / 4), - ByoBlockCfg(type="bottle", d=5, c=640, s=2, gs=1, br=3.0), - ByoBlockCfg(type="bottle", d=4, c=640, s=1, gs=1, br=3.0), - ), - stem_chs=32, - stem_pool=None, - num_features=2560, - ), - }, - }, -} diff --git a/torchseg/encoders/timm_mobilenetv3.py b/torchseg/encoders/timm_mobilenetv3.py deleted file mode 100644 index 61c5ba3d..00000000 --- a/torchseg/encoders/timm_mobilenetv3.py +++ /dev/null @@ -1,150 +0,0 @@ -import math - -import timm -import torch.nn as nn - -from ._base import EncoderMixin - - -def _make_divisible(x, divisible_by=8): - return int(math.ceil(x * 1.0 / divisible_by) * divisible_by) - - -class MobileNetV3Encoder(nn.Module, EncoderMixin): - def __init__(self, model_name, width_mult, depth=5, **kwargs): - super().__init__() - if "large" not in model_name and "small" not in model_name: - raise ValueError(f"MobileNetV3 wrong model name {model_name}") - - self._mode = "small" if "small" in model_name else "large" - self._depth = depth - self._out_channels = self._get_channels(self._mode, width_mult) - self._in_channels = 3 - - # minimal models replace hardswish with relu - self.model = timm.create_model( - model_name=model_name, - scriptable=True, # torch.jit scriptable - exportable=True, # onnx export - features_only=True, - ) - - def _get_channels(self, mode, width_mult): - if mode == "small": - channels = [16, 16, 24, 48, 576] - else: - channels = [16, 24, 40, 112, 960] - channels = [3] + [_make_divisible(x * width_mult) for x in channels] - return tuple(channels) - - def get_stages(self): - if self._mode == "small": - return [ - nn.Identity(), - nn.Sequential(self.model.conv_stem, self.model.bn1, self.model.act1), - self.model.blocks[0], - self.model.blocks[1], - self.model.blocks[2:4], - self.model.blocks[4:], - ] - elif self._mode == "large": - return [ - nn.Identity(), - nn.Sequential( - self.model.conv_stem, - self.model.bn1, - self.model.act1, - self.model.blocks[0], - ), - self.model.blocks[1], - self.model.blocks[2], - self.model.blocks[3:5], - self.model.blocks[5:], - ] - else: - ValueError(f"MobileNetV3 mode should be small or large, got {self._mode}") - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("conv_head.weight", None) - state_dict.pop("conv_head.bias", None) - state_dict.pop("classifier.weight", None) - state_dict.pop("classifier.bias", None) - self.model.load_state_dict(state_dict, **kwargs) - - -mobilenetv3_weights = { - "tf_mobilenetv3_large_075": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth" # noqa - }, - "tf_mobilenetv3_large_100": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth" # noqa - }, - "tf_mobilenetv3_large_minimal_100": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth" # noqa - }, - "tf_mobilenetv3_small_075": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth" # noqa - }, - "tf_mobilenetv3_small_100": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth" # noqa - }, - "tf_mobilenetv3_small_minimal_100": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth" # noqa - }, -} - -pretrained_settings = {} -for model_name, sources in mobilenetv3_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "input_space": "RGB", - } - - -timm_mobilenetv3_encoders = { - "timm-mobilenetv3_large_075": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_075"], - "params": {"model_name": "tf_mobilenetv3_large_075", "width_mult": 0.75}, - }, - "timm-mobilenetv3_large_100": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_100"], - "params": {"model_name": "tf_mobilenetv3_large_100", "width_mult": 1.0}, - }, - "timm-mobilenetv3_large_minimal_100": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_minimal_100"], - "params": {"model_name": "tf_mobilenetv3_large_minimal_100", "width_mult": 1.0}, - }, - "timm-mobilenetv3_small_075": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_075"], - "params": {"model_name": "tf_mobilenetv3_small_075", "width_mult": 0.75}, - }, - "timm-mobilenetv3_small_100": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_100"], - "params": {"model_name": "tf_mobilenetv3_small_100", "width_mult": 1.0}, - }, - "timm-mobilenetv3_small_minimal_100": { - "encoder": MobileNetV3Encoder, - "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_minimal_100"], - "params": {"model_name": "tf_mobilenetv3_small_minimal_100", "width_mult": 1.0}, - }, -} diff --git a/torchseg/encoders/timm_regnet.py b/torchseg/encoders/timm_regnet.py deleted file mode 100644 index ef3ee016..00000000 --- a/torchseg/encoders/timm_regnet.py +++ /dev/null @@ -1,349 +0,0 @@ -import torch.nn as nn -from timm.models.regnet import RegNet, RegNetCfg - -from ._base import EncoderMixin - - -class RegNetEncoder(RegNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - kwargs["cfg"] = RegNetCfg(**kwargs["cfg"]) - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.head - - def get_stages(self): - return [nn.Identity(), self.stem, self.s1, self.s2, self.s3, self.s4] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("head.fc.weight", None) - state_dict.pop("head.fc.bias", None) - super().load_state_dict(state_dict, **kwargs) - - -regnet_weights = { - "timm-regnetx_002": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth" # noqa - }, - "timm-regnetx_004": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth" # noqa - }, - "timm-regnetx_006": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth" # noqa - }, - "timm-regnetx_008": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth" # noqa - }, - "timm-regnetx_016": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth" # noqa - }, - "timm-regnetx_032": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth" # noqa - }, - "timm-regnetx_040": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth" # noqa - }, - "timm-regnetx_064": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth" # noqa - }, - "timm-regnetx_080": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth" # noqa - }, - "timm-regnetx_120": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth" # noqa - }, - "timm-regnetx_160": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth" # noqa - }, - "timm-regnetx_320": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth" # noqa - }, - "timm-regnety_002": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth" # noqa - }, - "timm-regnety_004": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth" # noqa - }, - "timm-regnety_006": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth" # noqa - }, - "timm-regnety_008": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth" # noqa - }, - "timm-regnety_016": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth" # noqa - }, - "timm-regnety_032": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth" # noqa - }, - "timm-regnety_040": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth" # noqa - }, - "timm-regnety_064": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth" # noqa - }, - "timm-regnety_080": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth" # noqa - }, - "timm-regnety_120": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth" # noqa - }, - "timm-regnety_160": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_160-d64013cd.pth" # noqa - }, - "timm-regnety_320": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth" # noqa - }, -} - -pretrained_settings = {} -for model_name, sources in regnet_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_size": [3, 224, 224], - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - - -def _mcfg(**kwargs): - cfg = dict(se_ratio=0.0, bottle_ratio=1.0, stem_width=32) - cfg.update(**kwargs) - return cfg - - -timm_regnet_encoders = { - "timm-regnetx_002": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_002"], - "params": { - "out_channels": (3, 32, 24, 56, 152, 368), - "cfg": _mcfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), - }, - }, - "timm-regnetx_004": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_004"], - "params": { - "out_channels": (3, 32, 32, 64, 160, 384), - "cfg": _mcfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), - }, - }, - "timm-regnetx_006": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_006"], - "params": { - "out_channels": (3, 32, 48, 96, 240, 528), - "cfg": _mcfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), - }, - }, - "timm-regnetx_008": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_008"], - "params": { - "out_channels": (3, 32, 64, 128, 288, 672), - "cfg": _mcfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), - }, - }, - "timm-regnetx_016": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_016"], - "params": { - "out_channels": (3, 32, 72, 168, 408, 912), - "cfg": _mcfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), - }, - }, - "timm-regnetx_032": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_032"], - "params": { - "out_channels": (3, 32, 96, 192, 432, 1008), - "cfg": _mcfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), - }, - }, - "timm-regnetx_040": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_040"], - "params": { - "out_channels": (3, 32, 80, 240, 560, 1360), - "cfg": _mcfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), - }, - }, - "timm-regnetx_064": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_064"], - "params": { - "out_channels": (3, 32, 168, 392, 784, 1624), - "cfg": _mcfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), - }, - }, - "timm-regnetx_080": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_080"], - "params": { - "out_channels": (3, 32, 80, 240, 720, 1920), - "cfg": _mcfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), - }, - }, - "timm-regnetx_120": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_120"], - "params": { - "out_channels": (3, 32, 224, 448, 896, 2240), - "cfg": _mcfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), - }, - }, - "timm-regnetx_160": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_160"], - "params": { - "out_channels": (3, 32, 256, 512, 896, 2048), - "cfg": _mcfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), - }, - }, - "timm-regnetx_320": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnetx_320"], - "params": { - "out_channels": (3, 32, 336, 672, 1344, 2520), - "cfg": _mcfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), - }, - }, - # regnety - "timm-regnety_002": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_002"], - "params": { - "out_channels": (3, 32, 24, 56, 152, 368), - "cfg": _mcfg( - w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25 - ), - }, - }, - "timm-regnety_004": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_004"], - "params": { - "out_channels": (3, 32, 48, 104, 208, 440), - "cfg": _mcfg( - w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25 - ), - }, - }, - "timm-regnety_006": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_006"], - "params": { - "out_channels": (3, 32, 48, 112, 256, 608), - "cfg": _mcfg( - w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25 - ), - }, - }, - "timm-regnety_008": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_008"], - "params": { - "out_channels": (3, 32, 64, 128, 320, 768), - "cfg": _mcfg( - w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25 - ), - }, - }, - "timm-regnety_016": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_016"], - "params": { - "out_channels": (3, 32, 48, 120, 336, 888), - "cfg": _mcfg( - w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25 - ), - }, - }, - "timm-regnety_032": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_032"], - "params": { - "out_channels": (3, 32, 72, 216, 576, 1512), - "cfg": _mcfg( - w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25 - ), - }, - }, - "timm-regnety_040": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_040"], - "params": { - "out_channels": (3, 32, 128, 192, 512, 1088), - "cfg": _mcfg( - w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25 - ), - }, - }, - "timm-regnety_064": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_064"], - "params": { - "out_channels": (3, 32, 144, 288, 576, 1296), - "cfg": _mcfg( - w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25 - ), - }, - }, - "timm-regnety_080": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_080"], - "params": { - "out_channels": (3, 32, 168, 448, 896, 2016), - "cfg": _mcfg( - w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25 - ), - }, - }, - "timm-regnety_120": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_120"], - "params": { - "out_channels": (3, 32, 224, 448, 896, 2240), - "cfg": _mcfg( - w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25 - ), - }, - }, - "timm-regnety_160": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_160"], - "params": { - "out_channels": (3, 32, 224, 448, 1232, 3024), - "cfg": _mcfg( - w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25 - ), - }, - }, - "timm-regnety_320": { - "encoder": RegNetEncoder, - "pretrained_settings": pretrained_settings["timm-regnety_320"], - "params": { - "out_channels": (3, 32, 232, 696, 1392, 3712), - "cfg": _mcfg( - w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25 - ), - }, - }, -} diff --git a/torchseg/encoders/timm_res2net.py b/torchseg/encoders/timm_res2net.py deleted file mode 100644 index 24cc7254..00000000 --- a/torchseg/encoders/timm_res2net.py +++ /dev/null @@ -1,164 +0,0 @@ -import torch.nn as nn -from timm.models.res2net import Bottle2neck -from timm.models.resnet import ResNet - -from ._base import EncoderMixin - - -class Res2NetEncoder(ResNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.fc - del self.global_pool - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv1, self.bn1, self.act1), - nn.Sequential(self.maxpool, self.layer1), - self.layer2, - self.layer3, - self.layer4, - ] - - def make_dilated(self, *args, **kwargs): - raise ValueError("Res2Net encoders do not support dilated mode") - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("fc.bias", None) - state_dict.pop("fc.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -res2net_weights = { - "timm-res2net50_26w_4s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth" # noqa - }, - "timm-res2net50_48w_2s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth" # noqa - }, - "timm-res2net50_14w_8s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth" # noqa - }, - "timm-res2net50_26w_6s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth" # noqa - }, - "timm-res2net50_26w_8s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth" # noqa - }, - "timm-res2net101_26w_4s": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth" # noqa - }, - "timm-res2next50": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth" # noqa - }, -} - -pretrained_settings = {} -for model_name, sources in res2net_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_size": [3, 224, 224], - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - - -timm_res2net_encoders = { - "timm-res2net50_26w_4s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net50_26w_4s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 26, - "block_args": {"scale": 4}, - }, - }, - "timm-res2net101_26w_4s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net101_26w_4s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 23, 3], - "base_width": 26, - "block_args": {"scale": 4}, - }, - }, - "timm-res2net50_26w_6s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net50_26w_6s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 26, - "block_args": {"scale": 6}, - }, - }, - "timm-res2net50_26w_8s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net50_26w_8s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 26, - "block_args": {"scale": 8}, - }, - }, - "timm-res2net50_48w_2s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net50_48w_2s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 48, - "block_args": {"scale": 2}, - }, - }, - "timm-res2net50_14w_8s": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2net50_14w_8s"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 14, - "block_args": {"scale": 8}, - }, - }, - "timm-res2next50": { - "encoder": Res2NetEncoder, - "pretrained_settings": pretrained_settings["timm-res2next50"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": Bottle2neck, - "layers": [3, 4, 6, 3], - "base_width": 4, - "cardinality": 8, - "block_args": {"scale": 4}, - }, - }, -} diff --git a/torchseg/encoders/timm_resnest.py b/torchseg/encoders/timm_resnest.py deleted file mode 100644 index 272c8645..00000000 --- a/torchseg/encoders/timm_resnest.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch.nn as nn -from timm.models.resnest import ResNestBottleneck -from timm.models.resnet import ResNet - -from ._base import EncoderMixin - - -class ResNestEncoder(ResNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.fc - del self.global_pool - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv1, self.bn1, self.act1), - nn.Sequential(self.maxpool, self.layer1), - self.layer2, - self.layer3, - self.layer4, - ] - - def make_dilated(self, *args, **kwargs): - raise ValueError("ResNest encoders do not support dilated mode") - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("fc.bias", None) - state_dict.pop("fc.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -resnest_weights = { - "timm-resnest14d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth" # noqa - }, - "timm-resnest26d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth" # noqa - }, - "timm-resnest50d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth" # noqa - }, - "timm-resnest101e": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth" # noqa - }, - "timm-resnest200e": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth" # noqa - }, - "timm-resnest269e": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth" # noqa - }, - "timm-resnest50d_4s2x40d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth" # noqa - }, - "timm-resnest50d_1s4x24d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth" # noqa - }, -} - -pretrained_settings = {} -for model_name, sources in resnest_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_size": [3, 224, 224], - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - - -timm_resnest_encoders = { - "timm-resnest14d": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest14d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [1, 1, 1, 1], - "stem_type": "deep", - "stem_width": 32, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest26d": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest26d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [2, 2, 2, 2], - "stem_type": "deep", - "stem_width": 32, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest50d": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest50d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 4, 6, 3], - "stem_type": "deep", - "stem_width": 32, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest101e": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest101e"], - "params": { - "out_channels": (3, 128, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 4, 23, 3], - "stem_type": "deep", - "stem_width": 64, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest200e": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest200e"], - "params": { - "out_channels": (3, 128, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 24, 36, 3], - "stem_type": "deep", - "stem_width": 64, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest269e": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest269e"], - "params": { - "out_channels": (3, 128, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 30, 48, 8], - "stem_type": "deep", - "stem_width": 64, - "avg_down": True, - "base_width": 64, - "cardinality": 1, - "block_args": {"radix": 2, "avd": True, "avd_first": False}, - }, - }, - "timm-resnest50d_4s2x40d": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest50d_4s2x40d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 4, 6, 3], - "stem_type": "deep", - "stem_width": 32, - "avg_down": True, - "base_width": 40, - "cardinality": 2, - "block_args": {"radix": 4, "avd": True, "avd_first": True}, - }, - }, - "timm-resnest50d_1s4x24d": { - "encoder": ResNestEncoder, - "pretrained_settings": pretrained_settings["timm-resnest50d_1s4x24d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": ResNestBottleneck, - "layers": [3, 4, 6, 3], - "stem_type": "deep", - "stem_width": 32, - "avg_down": True, - "base_width": 24, - "cardinality": 4, - "block_args": {"radix": 1, "avd": True, "avd_first": True}, - }, - }, -} diff --git a/torchseg/encoders/timm_sknet.py b/torchseg/encoders/timm_sknet.py deleted file mode 100644 index 6b05f7a6..00000000 --- a/torchseg/encoders/timm_sknet.py +++ /dev/null @@ -1,104 +0,0 @@ -import torch.nn as nn -from timm.models.resnet import ResNet -from timm.models.sknet import SelectiveKernelBasic, SelectiveKernelBottleneck - -from ._base import EncoderMixin - - -class SkNetEncoder(ResNet, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - - del self.fc - del self.global_pool - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv1, self.bn1, self.act1), - nn.Sequential(self.maxpool, self.layer1), - self.layer2, - self.layer3, - self.layer4, - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("fc.bias", None) - state_dict.pop("fc.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -sknet_weights = { - "timm-skresnet18": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth" # noqa: E501 - }, - "timm-skresnet34": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth" # noqa: E501 - }, - "timm-skresnext50_32x4d": { - "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth" # noqa: E501 - }, -} - -pretrained_settings = {} -for model_name, sources in sknet_weights.items(): - pretrained_settings[model_name] = {} - for source_name, source_url in sources.items(): - pretrained_settings[model_name][source_name] = { - "url": source_url, - "input_size": [3, 224, 224], - "input_range": [0, 1], - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "num_classes": 1000, - } - -timm_sknet_encoders = { - "timm-skresnet18": { - "encoder": SkNetEncoder, - "pretrained_settings": pretrained_settings["timm-skresnet18"], - "params": { - "out_channels": (3, 64, 64, 128, 256, 512), - "block": SelectiveKernelBasic, - "layers": [2, 2, 2, 2], - "zero_init_last": False, - "block_args": {"sk_kwargs": {"rd_ratio": 1 / 8, "split_input": True}}, - }, - }, - "timm-skresnet34": { - "encoder": SkNetEncoder, - "pretrained_settings": pretrained_settings["timm-skresnet34"], - "params": { - "out_channels": (3, 64, 64, 128, 256, 512), - "block": SelectiveKernelBasic, - "layers": [3, 4, 6, 3], - "zero_init_last": False, - "block_args": {"sk_kwargs": {"rd_ratio": 1 / 8, "split_input": True}}, - }, - }, - "timm-skresnext50_32x4d": { - "encoder": SkNetEncoder, - "pretrained_settings": pretrained_settings["timm-skresnext50_32x4d"], - "params": { - "out_channels": (3, 64, 256, 512, 1024, 2048), - "block": SelectiveKernelBottleneck, - "layers": [3, 4, 6, 3], - "zero_init_last": False, - "cardinality": 32, - "base_width": 4, - }, - }, -} diff --git a/torchseg/encoders/timm_universal.py b/torchseg/encoders/timm_universal.py deleted file mode 100644 index 9702a7c3..00000000 --- a/torchseg/encoders/timm_universal.py +++ /dev/null @@ -1,38 +0,0 @@ -import timm -import torch.nn as nn - - -class TimmUniversalEncoder(nn.Module): - def __init__(self, name, pretrained=True, in_channels=3, depth=5, output_stride=32): - super().__init__() - kwargs = dict( - in_chans=in_channels, - features_only=True, - output_stride=output_stride, - pretrained=pretrained, - out_indices=tuple(range(depth)), - ) - - # not all models support output stride argument, drop it by default - if output_stride == 32: - kwargs.pop("output_stride") - - self.model = timm.create_model(name, **kwargs) - - self._in_channels = in_channels - self._out_channels = [in_channels] + self.model.feature_info.channels() - self._depth = depth - self._output_stride = output_stride - - def forward(self, x): - features = self.model(x) - features = [x] + features - return features - - @property - def out_channels(self): - return self._out_channels - - @property - def output_stride(self): - return min(self._output_stride, 2**self._depth) diff --git a/torchseg/encoders/vgg.py b/torchseg/encoders/vgg.py deleted file mode 100644 index c1fac39f..00000000 --- a/torchseg/encoders/vgg.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch.nn as nn -from pretrainedmodels.models.torchvision_models import pretrained_settings -from torchvision.models.vgg import VGG, make_layers - -from ._base import EncoderMixin - -# fmt: off -cfg = { - 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], - 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], - 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], # noqa: E501 - 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], # noqa: E501 -} -# fmt: on - - -class VGGEncoder(VGG, EncoderMixin): - def __init__(self, out_channels, config, batch_norm=False, depth=5, **kwargs): - super().__init__(make_layers(config, batch_norm=batch_norm), **kwargs) - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - del self.classifier - - def make_dilated(self, *args, **kwargs): - raise ValueError( - "'VGG' models do not support dilated mode due to Max Pooling" - " operations for downsampling!" - ) - - def get_stages(self): - stages = [] - stage_modules = [] - for module in self.features: - if isinstance(module, nn.MaxPool2d): - stages.append(nn.Sequential(*stage_modules)) - stage_modules = [] - stage_modules.append(module) - stages.append(nn.Sequential(*stage_modules)) - return stages - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - keys = list(state_dict.keys()) - for k in keys: - if k.startswith("classifier"): - state_dict.pop(k, None) - super().load_state_dict(state_dict, **kwargs) - - -vgg_encoders = { - "vgg11": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg11"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["A"], - "batch_norm": False, - }, - }, - "vgg11_bn": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg11_bn"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["A"], - "batch_norm": True, - }, - }, - "vgg13": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg13"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["B"], - "batch_norm": False, - }, - }, - "vgg13_bn": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg13_bn"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["B"], - "batch_norm": True, - }, - }, - "vgg16": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg16"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["D"], - "batch_norm": False, - }, - }, - "vgg16_bn": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg16_bn"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["D"], - "batch_norm": True, - }, - }, - "vgg19": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg19"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["E"], - "batch_norm": False, - }, - }, - "vgg19_bn": { - "encoder": VGGEncoder, - "pretrained_settings": pretrained_settings["vgg19_bn"], - "params": { - "out_channels": (64, 128, 256, 512, 512, 512), - "config": cfg["E"], - "batch_norm": True, - }, - }, -} diff --git a/torchseg/encoders/xception.py b/torchseg/encoders/xception.py deleted file mode 100644 index b8b5a659..00000000 --- a/torchseg/encoders/xception.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch.nn as nn -from pretrainedmodels.models.xception import Xception, pretrained_settings - -from ._base import EncoderMixin - - -class XceptionEncoder(Xception, EncoderMixin): - def __init__(self, out_channels, *args, depth=5, **kwargs): - super().__init__(*args, **kwargs) - - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - # modify padding to maintain output shape - self.conv1.padding = (1, 1) - self.conv2.padding = (1, 1) - - del self.fc - - def make_dilated(self, *args, **kwargs): - raise ValueError( - "Xception encoder does not support dilated mode " - "due to pooling operation for downsampling!" - ) - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential( - self.conv1, self.bn1, self.relu, self.conv2, self.bn2, self.relu - ), - self.block1, - self.block2, - nn.Sequential( - self.block3, - self.block4, - self.block5, - self.block6, - self.block7, - self.block8, - self.block9, - self.block10, - self.block11, - ), - nn.Sequential( - self.block12, self.conv3, self.bn3, self.relu, self.conv4, self.bn4 - ), - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict): - # remove linear - state_dict.pop("fc.bias", None) - state_dict.pop("fc.weight", None) - - super().load_state_dict(state_dict) - - -xception_encoders = { - "xception": { - "encoder": XceptionEncoder, - "pretrained_settings": pretrained_settings["xception"], - "params": {"out_channels": (3, 64, 128, 256, 728, 2048)}, - } -}