四、模型训练
1.选取预训练模型
选取cnn14作为 backbone,用于提取音频的特征:
from paddlespeech.cls.models import cnn14 backbone = cnn14(pretrained=True, extract_embedding=True)
2.构建分类模型
SoundClassifer接收cnn14作为backbone模型,并创建下游的分类网络:
import paddle.nn as nn class SoundClassifier(nn.Layer): def __init__(self, backbone, num_class, dropout=0.1): super().__init__() self.backbone = backbone self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(self.backbone.emb_size, num_class) def forward(self, x): x = x.unsqueeze(1) x = self.backbone(x) x = self.dropout(x) logits = self.fc(x) return logits model = SoundClassifier(backbone, num_class=len(train_ds.label_list))
3.finetune
# 定义优化器和 Loss optimizer = paddle.optimizer.Adam(learning_rate=1e-4, parameters=model.parameters()) criterion = paddle.nn.loss.CrossEntropyLoss()
from paddleaudio.utils import logger epochs = 20 steps_per_epoch = len(train_loader) log_freq = 10 eval_freq = 10 for epoch in range(1, epochs + 1): model.train() avg_loss = 0 num_corrects = 0 num_samples = 0 for batch_idx, batch in enumerate(train_loader): waveforms, labels = batch feats = feature_extractor(waveforms) feats = paddle.transpose(feats, [0, 2, 1]) # [B, N, T] -> [B, T, N] logits = model(feats) loss = criterion(logits, labels) loss.backward() optimizer.step() if isinstance(optimizer._learning_rate, paddle.optimizer.lr.LRScheduler): optimizer._learning_rate.step() optimizer.clear_grad() # Calculate loss avg_loss += loss.numpy()[0] # Calculate metrics preds = paddle.argmax(logits, axis=1) num_corrects += (preds == labels).numpy().sum() num_samples += feats.shape[0] if (batch_idx + 1) % log_freq == 0: lr = optimizer.get_lr() avg_loss /= log_freq avg_acc = num_corrects / num_samples print_msg = 'Epoch={}/{}, Step={}/{}'.format( epoch, epochs, batch_idx + 1, steps_per_epoch) print_msg += ' loss={:.4f}'.format(avg_loss) print_msg += ' acc={:.4f}'.format(avg_acc) print_msg += ' lr={:.6f}'.format(lr) logger.train(print_msg) avg_loss = 0 num_corrects = 0 num_samples = 0
[2022-08-24 02:20:49,381] [ TRAIN] - Epoch=17/20, Step=10/15 loss=1.3319 acc=0.4875 lr=0.000100 [2022-08-24 02:21:08,107] [ TRAIN] - Epoch=18/20, Step=10/15 loss=1.3222 acc=0.4719 lr=0.000100 [2022-08-24 02:21:08,107] [ TRAIN] - Epoch=18/20, Step=10/15 loss=1.3222 acc=0.4719 lr=0.000100 [2022-08-24 02:21:26,884] [ TRAIN] - Epoch=19/20, Step=10/15 loss=1.2539 acc=0.5125 lr=0.000100 [2022-08-24 02:21:26,884] [ TRAIN] - Epoch=19/20, Step=10/15 loss=1.2539 acc=0.5125 lr=0.000100 [2022-08-24 02:21:45,579] [ TRAIN] - Epoch=20/20, Step=10/15 loss=1.2021 acc=0.5281 lr=0.000100 [2022-08-24 02:21:45,579] [ TRAIN] - Epoch=20/20, Step=10/15 loss=1.2021 acc=0.5281 lr=0.000100
五、模型训练
top_k = 3 wav_file = 'test/test_0.wav' n_fft = 1024 win_length = 1024 hop_length = 320 f_min=50.0 f_max=16000.0 waveform, sr = load(wav_file, sr=sr) feature_extractor = LogMelSpectrogram( sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann', f_min=f_min, f_max=f_max, n_mels=64) feats = feature_extractor(paddle.to_tensor(paddle.to_tensor(waveform).unsqueeze(0))) feats = paddle.transpose(feats, [0, 2, 1]) # [B, N, T] -> [B, T, N] logits = model(feats) probs = nn.functional.softmax(logits, axis=1).numpy() sorted_indices = probs[0].argsort() msg = f'[{wav_file}]\n' for idx in sorted_indices[-1:-top_k-1:-1]: msg += f'{train_ds.label_list[idx]}: {probs[0][idx]:.5f}\n' print(msg)
[test/test_0.wav] diaper: 0.50155 sleepy: 0.41397 hug: 0.05912
六、注意事项
- 1.自定义数据集,格式可参考文档;
- 2.统一音频尺寸(例如音频长度、采样频率)
- 3.系统了解,可学习aistudio.baidu.com/aistudio/ed… 课程