yolo-world 源码解析(五)(3)https://developer.aliyun.com/article/1483892
.\YOLO-World\yolo_world\models\dense_heads\yolo_world_seg_head.py
# 版权声明 # 导入数学库 import math # 导入类型提示相关库 from typing import List, Optional, Tuple, Union, Sequence # 导入 PyTorch 库 import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn.modules.batchnorm import _BatchNorm # 导入 mmcv 库中的模块 from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.dist import get_dist_info from mmengine.structures import InstanceData from mmdet.structures import SampleList from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, OptMultiConfig, InstanceList) from mmdet.models.utils import multi_apply, unpack_gt_instances from mmyolo.models.dense_heads import YOLOv8HeadModule from mmyolo.models.utils import gt_instances_preprocess from mmyolo.registry import MODELS, TASK_UTILS from mmyolo.models.dense_heads.yolov5_ins_head import ( ProtoModule, YOLOv5InsHead ) # 导入自定义的模块 from .yolo_world_head import ContrastiveHead, BNContrastiveHead # 注册 YOLOWorldSegHeadModule 类为模型 @MODELS.register_module() class YOLOWorldSegHeadModule(YOLOv8HeadModule): # 初始化方法 def __init__(self, *args, embed_dims: int, proto_channels: int, mask_channels: int, freeze_bbox: bool = False, use_bn_head: bool = False, **kwargs) -> None: # 初始化属性 self.freeze_bbox = freeze_bbox self.embed_dims = embed_dims self.proto_channels = proto_channels self.mask_channels = mask_channels self.use_bn_head = use_bn_head # 调用父类的初始化方法 super().__init__(*args, **kwargs) def init_weights(self, prior_prob=0.01): """初始化PPYOLOE头部的权重和偏置。""" # 调用父类的初始化权重方法 super().init_weights() # 遍历分类预测、分类对比和特征图步长,分别初始化偏置 for cls_pred, cls_contrast, stride in zip(self.cls_preds, self.cls_contrasts, self.featmap_strides): cls_pred[-1].bias.data[:] = 0.0 # 重置偏置 # 如果分类对比具有偏置属性,则初始化为特定值 if hasattr(cls_contrast, 'bias'): nn.init.constant_( cls_contrast.bias.data, math.log(5 / self.num_classes / (640 / stride)**2)) def head_norm_eval(self): # 遍历分类预测模块,将所有批归一化层设置为评估模式 for m in self.cls_preds: for q in m.modules(): if isinstance(q, _BatchNorm): q.eval() # 遍历回归预测模块,将所有批归一化层设置为评估模式 for m in self.reg_preds: for q in m.modules(): if isinstance(q, _BatchNorm): q.eval() def train(self, mode: bool = True): """将模型转换为训练模式,同时保持归一化层冻结。""" # 调用父类的训练方法 super().train(mode) # 如果冻结边界框,则调用头部归一化评估方法 if self.freeze_bbox: self.head_norm_eval() def forward(self, img_feats: Tuple[Tensor], txt_feats: Tensor) -> Tuple[List]: """从上游网络前向传播特征。""" # 断言图像特征的长度等于级别数 assert len(img_feats) == self.num_levels # 将文本特征复制多份以匹配级别数 txt_feats = [txt_feats for _ in range(self.num_levels)] # 生成掩码原型 mask_protos = self.proto_pred(img_feats[0]) # 多路并行处理,获取分类logit、边界框预测、边界框距离预测和系数预测 cls_logit, bbox_preds, bbox_dist_preds, coeff_preds = multi_apply( self.forward_single, img_feats, txt_feats, self.cls_preds, self.reg_preds, self.cls_contrasts, self.seg_preds) # 如果处于训练模式,则返回所有预测结果和掩码原型 if self.training: return cls_logit, bbox_preds, bbox_dist_preds, coeff_preds, mask_protos # 否则,返回分类logit、边界框预测、系数预测和掩码原型 else: return cls_logit, bbox_preds, None, coeff_preds, mask_protos def forward_single(self, img_feat: Tensor, txt_feat: Tensor, cls_pred: nn.ModuleList, reg_pred: nn.ModuleList, cls_contrast: nn.ModuleList, seg_pred: nn.ModuleList) -> Tuple: """Forward feature of a single scale level.""" # 获取输入特征的形状信息 b, _, h, w = img_feat.shape # 使用分类预测模型对图像特征进行预测 cls_embed = cls_pred(img_feat) # 使用对比损失模型对分类嵌入进行预测 cls_logit = cls_contrast(cls_embed, txt_feat) # 使用回归预测模型对图像特征进行预测 bbox_dist_preds = reg_pred(img_feat) # 使用分割预测模型对图像特征进行预测 coeff_pred = seg_pred(img_feat) # 如果回归最大值大于1 if self.reg_max > 1: # 重塑回归预测结果的形状 bbox_dist_preds = bbox_dist_preds.reshape( [-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2) # TODO: get_flops脚本无法处理矩阵乘法的情况,稍后需要修复 # 计算边界框预测结果 bbox_preds = bbox_dist_preds.softmax(3).matmul( self.proj.view([-1, 1])).squeeze(-1) bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) else: bbox_preds = bbox_dist_preds # 如果处于训练模式 if self.training: return cls_logit, bbox_preds, bbox_dist_preds, coeff_pred else: return cls_logit, bbox_preds, None, coeff_pred # 注册 YOLO World Segmentation Head 类到 MODELS 模块 @MODELS.register_module() class YOLOWorldSegHead(YOLOv5InsHead): # 特殊初始化函数,用于处理不同算法的特殊初始化过程 def special_init(self): """Since YOLO series algorithms will inherit from YOLOv5Head, but different algorithms have special initialization process. The special_init function is designed to deal with this situation. """ # 如果存在训练配置,则构建分配器 if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg.assigner) # 添加常用属性以减少计算 self.featmap_sizes_train = None self.num_level_priors = None self.flatten_priors_train = None self.stride_tensor = None """YOLO World head.""" # 损失函数,计算前向传播和检测头特征的损失 def loss(self, img_feats: Tuple[Tensor], txt_feats: Tensor, batch_data_samples: Union[list, dict]) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network.""" # 执行前向传播并获取输出 outs = self(img_feats, txt_feats) # 快速版本 loss_inputs = outs + (batch_data_samples['bboxes_labels'], batch_data_samples['masks'], batch_data_samples['img_metas']) # 计算损失 losses = self.loss_by_feat(*loss_inputs) return losses # 损失和预测函数 def loss_and_predict( self, img_feats: Tuple[Tensor], txt_feats: Tensor, batch_data_samples: SampleList, proposal_cfg: Optional[ConfigDict] = None def forward(self, img_feats: Tuple[Tensor], txt_feats: Tensor) -> Tuple[List]: """Forward features from the upstream network.""" # 从上游网络中前向传播特征 return self.head_module(img_feats, txt_feats) def predict(self, img_feats: Tuple[Tensor], txt_feats: Tensor, batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. """ # 从检测头部进行前向传播,并在上游网络的特征上预测检测结果 # 获取批量数据样本的元信息 batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] # 获取模型输出 outs = self(img_feats, txt_feats) # 根据模型输出进行预测 predictions = self.predict_by_feat(*outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions def forward(self, img_feats: Tuple[Tensor], txt_feats: Tensor) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. """ # 解包批量数据样本 outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs # 获取模型输出 outs = self(img_feats, txt_feats) # 构建损失函数输入 loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) # 计算损失 losses = self.loss_by_feat(*loss_inputs) # 根据模型输出进行预测 predictions = self.predict_by_feat(*outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg) return losses, predictions # 定义一个测试函数,用于测试时进行数据增强 def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False, with_ori_nms=False, **kwargs): """Test function with test time augmentation.""" # 抛出未实现错误,提示该函数还未被实现 raise NotImplementedError('aug_test is not implemented yet.')
.\YOLO-World\yolo_world\models\dense_heads\__init__.py
# 导入 YOLOWorldHead 和 YOLOWorldHeadModule 类 from .yolo_world_head import YOLOWorldHead, YOLOWorldHeadModule # 导入 YOLOWorldSegHead 和 YOLOWorldSegHeadModule 类 from .yolo_world_seg_head import YOLOWorldSegHead, YOLOWorldSegHeadModule # 定义 __all__ 列表,包含需要导出的类名 __all__ = [ 'YOLOWorldHead', 'YOLOWorldHeadModule', 'YOLOWorldSegHead', 'YOLOWorldSegHeadModule' ]
.\YOLO-World\yolo_world\models\detectors\yolo_world.py
# 导入所需的模块和类 from typing import List, Tuple, Union from torch import Tensor from mmdet.structures import OptSampleList, SampleList from mmyolo.models.detectors import YOLODetector from mmyolo.registry import MODELS # 注册YOLOWorldDetector类到MODELS模块 @MODELS.register_module() class YOLOWorldDetector(YOLODetector): """Implementation of YOLOW Series""" # 初始化函数,接受一些参数 def __init__(self, *args, mm_neck: bool = False, num_train_classes=80, num_test_classes=80, **kwargs) -> None: # 初始化类的属性 self.mm_neck = mm_neck self.num_train_classes = num_train_classes self.num_test_classes = num_test_classes # 调用父类的初始化函数 super().__init__(*args, **kwargs) # 计算损失函数的方法,接受输入和数据样本 def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Union[dict, list]: """Calculate losses from a batch of inputs and data samples.""" # 设置bbox_head的类别数为训练类别数 self.bbox_head.num_classes = self.num_train_classes # 提取图像特征和文本特征 img_feats, txt_feats = self.extract_feat(batch_inputs, batch_data_samples) # 计算损失 losses = self.bbox_head.loss(img_feats, txt_feats, batch_data_samples) # 返回损失 return losses # 预测模型的方法,接受批量输入和数据样本,返回带有后处理的结果列表 def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList, rescale: bool = True) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. """ # 提取图像特征和文本特征 img_feats, txt_feats = self.extract_feat(batch_inputs, batch_data_samples) # 设置边界框头部的类别数为文本特征的第一个维度大小 self.bbox_head.num_classes = txt_feats[0].shape[0] # 使用图像特征、文本特征和数据样本进行预测,返回结果列表 results_list = self.bbox_head.predict(img_feats, txt_feats, batch_data_samples, rescale=rescale) # 将预测结果添加到数据样本中 batch_data_samples = self.add_pred_to_datasample( batch_data_samples, results_list) # 返回更新后的数据样本 return batch_data_samples # 网络前向传播过程,通常包括骨干网络、颈部和头部的前向传播,不包含任何后处理 def _forward( self, batch_inputs: Tensor, batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. """ # 提取图像特征和文本特征 img_feats, txt_feats = self.extract_feat(batch_inputs, batch_data_samples) # 进行边界框头部的前向传播,返回结果 results = self.bbox_head.forward(img_feats, txt_feats) # 返回结果 return results # 定义一个方法用于提取特征,接受两个输入参数:batch_inputs(张量)和batch_data_samples(样本列表),返回一个元组 def extract_feat( self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Tuple[Tuple[Tensor], Tensor]: """Extract features.""" # 如果batch_data_samples是字典类型,则获取其中的'texts'键对应的值 if isinstance(batch_data_samples, dict): texts = batch_data_samples['texts'] # 如果batch_data_samples是列表类型,则遍历其中的数据样本,获取每个数据样本的文本信息 elif isinstance(batch_data_samples, list): texts = [data_sample.texts for data_sample in batch_data_samples] # 如果batch_data_samples既不是字典类型也不是列表类型,则抛出类型错误异常 else: raise TypeError('batch_data_samples should be dict or list.') # 调用backbone模型提取图像和文本特征 img_feats, txt_feats = self.backbone(batch_inputs, texts) # 如果模型包含neck部分 if self.with_neck: # 如果使用多模态neck if self.mm_neck: # 将图像特征和文本特征输入到neck模块中进行处理 img_feats = self.neck(img_feats, txt_feats) else: # 只将图像特征输入到neck模块中进行处理 img_feats = self.neck(img_feats) # 返回提取的图像特征和文本特征 return img_feats, txt_feats