Config命名格式:
{model}_{backbone}_[misc]_[gpu x batch_per_gpu]_{resolution}_{schedule}_{dataset}
以下为每一项的具体解释:
- {xxx}是必填字段,[yyy]是可选字段。
- {model}:型号类型等psp,deeplabv3等。
- {backbone}:骨干类型,例如r50(ResNet-50),x101(ResNeXt-101)。
- [misc]:杂项设置/模型的插件,例如dconv,gcb,attention,mstrain。
- [gpu x batch_per_gpu]:8x2默认使用GPU和每个GPU的样本。
- {schedule}:训练时间表,20ki意味着20k次迭代。
- {dataset}:数据集cityscapes,voc12aug,ade
norm_cfg = dict(type='SyncBN', requires_grad=True) # 批归一化 backbone_norm_cfg = dict(type='LN', requires_grad=True) # LayerNorm 层归一话 model = dict( type='EncoderDecoder', #语义分割模型类型 pretrained='pretrain/swin_base_patch4_window7_224.pth', # 加载ImageNet预训练的backbone,这类为swin base模型 backbone=dict( type='SwinTransformer', # backone的类型 pretrain_img_size=224, embed_dims=128, patch_size=4, window_size=7, mlp_ratio=4, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.3, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True)), decode_head=dict( type='UPerHead', # 解码器类型 in_channels=[128, 256, 512, 1024], # 解码器输入通道个数 in_index=[0, 1, 2, 3], #选择特征图的索引 pool_scales=(1, 2, 3, 6), channels=512, # 解码器头的中间通道数 dropout_ratio=0.1, #分类层的之前的dropout概率 num_classes=150,#分割类别的数量, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, # 用于在解码时调整大小 loss_decode=dict( # # 解码器损失函数的配置 type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( # auxiliary_head(辅助头)的类型, type='FCNHead', in_channels=512, # 辅助头输入的通道个数 in_index=2, # 选择特征图索引 channels=256,# 解码器的中间通道 num_convs=1, # FCNHead的卷积数,在auxiliary_head通常是1 concat_input=False, # 是否将convs的输出与分类器之前的输入进行拼接 dropout_ratio=0.1, num_classes=150, norm_cfg=dict(type='SyncBN', requires_grad=True), align_corners=False, loss_decode=dict( type='LovaszLoss', classes='present', per_image=True, reduction='mean', loss_weight=0.4)), train_cfg=dict(), test_cfg=dict(mode='whole')) dataset_type = 'SatelliteDataset' # 数据集类型,将用于定义数据集 data_root = 'data/sensing/jpg' # 数据的根路径 img_norm_cfg = dict( # 图像归一化配置以对输入图像进行归一化 mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True) crop_size = (512, 512) #训练时的图像剪裁大小 train_pipeline = [ # # 训练流水线 dict(type='LoadImageFromFile'), # 从文件中加载训练图像 dict(type='LoadAnnotations', reduce_zero_label=False), # 加载标签图像,False代表标签包含0 dict(type='Resize', img_scale=(512, 256), ratio_range=(0.5, 2.0)), # 增强通道以调整图像的大小及其注释 dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), # 从当前图像中随机裁剪一patch dict(type='RandomFlip', prob=0.5), # 翻转图像以及概率 dict(type='PhotoMetricDistortion'), # 通过多种光度学方法使当前图像失真 dict( type='Normalize', # 标准化输入图像的增强通道 mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), # 将图像填充到指定大小 dict(type='DefaultFormatBundle'), # 默认格式捆绑包,用于收集管道中的数据 dict(type='Collect', keys=['img', 'gt_semantic_seg']) # 决定将数据中的哪些键传递给分割器 ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, # 单个GPU的批处理大小 workers_per_gpu=4,# train=dict( type='SatelliteDataset', data_root='data/sensing/jpg', img_dir='images/training', ann_dir='annotations/training', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=False), dict(type='Resize', img_scale=(512, 256), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict( type='Normalize', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ]), val=dict( type='SatelliteDataset', data_root='data/sensing/jpg', img_dir='images/validation', ann_dir='annotations/validation', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ]), test=dict( type='SatelliteDataset', data_root='data/sensing/jpg', img_dir='images/test', ann_dir='annotations/test', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ])) log_config = dict( interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) # 间隔打印日志 dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] # Workflow for runner [('train',1)]表示只有一个工作流程,名为'train'的工作流程只执行一次。 cudnn_benchmark = True #是否使用cudnn_benchmark加快速度,只适用于固定输入大小 optimizer = dict( type='AdamW', lr=6e-05, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys=dict( absolute_pos_embed=dict(decay_mult=0.0), relative_position_bias_table=dict(decay_mult=0.0), norm=dict(decay_mult=0.0)))) optimizer_config = dict() lr_config = dict( policy='poly', # # scheduler的策略还支持Step,CosineAnnealing,Cyclic等。 warmup='linear', warmup_iters=1500, warmup_ratio=1e-06, power=1.0, min_lr=0.0, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=40000) checkpoint_config = dict(by_epoch=False, interval=10000) evaluation = dict(interval=10000, metric='mIoU', pre_eval=True) work_dir = './work_dirs/swin_tiny_patch4_window7_224' gpu_ids = range(0, 1) auto_resume = False