[seq2seq]论文实现:Effective Approaches to Attention-based Neural Machine Translation(上)

本文涉及的产品
NLP自然语言处理_高级版,每接口累计50万次
NLP自然语言处理_基础版,每接口每天50万次
NLP 自学习平台,3个模型定制额度 1个月
简介: [seq2seq]论文实现:Effective Approaches to Attention-based Neural Machine Translation(上)

论文:Effective Approaches to Attention-based Neural Machine Translation

作者:Minh-Thang Luong, Hieu Pham, Christopher D. Manning

时间:2015

一、完整代码

这里我们使用tensorflow实现,代码如下:

# 完整代码在这里
import tensorflow as tf
import keras_nlp
import matplotlib.pyplot as plt
import numpy as np
import os
import random
plt.rcParams['font.sans-serif']=['SimHei'] 
plt.rcParams['axes.unicode_minus']=False
# 数据处理
def process_data(x):
    res = tf.strings.split(x, '\t')
    return res[1], res[3]
# 导入数据
dataset = tf.data.TextLineDataset('./data/transformer_data.tsv')
dataset = dataset.map(process_data)
# 建立中英文wordpiece词表
vocab_chinese = keras_nlp.tokenizers.compute_word_piece_vocabulary(
    dataset.map(lambda x, y: x),
    vocabulary_size=20_000,
    lowercase=True,
    strip_accents=True,
    split_on_cjk=True,
    reserved_tokens=["[PAD]", "[START]", "[END]", "[MASK]", "[UNK]"],
)
vocab_english = keras_nlp.tokenizers.compute_word_piece_vocabulary(
    dataset.map(lambda x, y: y),
    vocabulary_size=20_000,
    lowercase=True,
    strip_accents=True,
    split_on_cjk=True,
    reserved_tokens=["[PAD]", "[START]", "[END]", "[MASK]", "[UNK]"],
)
# 构建分词器
chinese_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab_chinese, oov_token="[UNK]")
english_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab_english, oov_token="[UNK]")
# 再进行一次数据处理
def process_data_(ch, en, maxtoken=128):
    
    ch = chinese_tokenizer(ch)[:,:maxtoken]
    en = english_tokenizer(tf.strings.lower(en))[:,:maxtoken]
    
    ch = tf.concat([tf.ones(shape=(64,1), dtype='int32'), ch, tf.ones(shape=(64,1), dtype='int32')*2], axis=-1).to_tensor()
    en = tf.concat([tf.ones(shape=(64,1), dtype='int32'), en, tf.ones(shape=(64,1), dtype='int32')*2], axis=-1)
    en_inputs = en[:, :-1].to_tensor()  # Drop the [END] tokens
    en_labels = en[:, 1:].to_tensor() # Drop the [START] tokens
    return (ch, en_inputs), en_labels
    
dataset = dataset.batch(64).map(process_data_)
train_dataset = dataset.take(1000)
val_dataset = dataset.skip(500).take(300)
# 数据准备完毕 查看数据
for (pt, en), en_labels in dataset.take(1):
    break
print(pt.shape)
print(en.shape)
print(en_labels.shape)
# 构建encoder
class Encoder(tf.keras.layers.Layer):
    def __init__(self, vocabulary_size, d_model, units):
        super().__init__()
        self.embedding = tf.keras.layers.Embedding(vocabulary_size, d_model)
        self.rnn = tf.keras.layers.Bidirectional(
            layer=tf.keras.layers.LSTM(units=units, return_sequences=True, return_state=False),
            merge_mode='sum'
        )
    def call(self, inputs):
        x = inputs
        x = self.embedding(x)
        x = self.rnn(x)
        return x
# 构建crossattention
class CrossAttention(tf.keras.layers.Layer):
    def __init__(self, units, **kwargs):
        super().__init__()
        self.mha = tf.keras.layers.MultiHeadAttention(key_dim=units, num_heads=1, **kwargs)
        self.add = tf.keras.layers.Add()
        self.norm = tf.keras.layers.LayerNormalization()
    def call(self, inputs):
        x, context = inputs
        attention_out, attention_score = self.mha(query=x, value=context, key=context, return_attention_scores=True)
        self.last_attention_score = attention_score
        x = self.add([x, attention_out])
        x = self.norm(x)
        return x
# 构建decoder
class Decoder(tf.keras.layers.Layer):
    def __init__(self, vocabulary_size, d_model, units, **kwargs):
        super().__init__()
        self.embedding = tf.keras.layers.Embedding(vocabulary_size, d_model)
        self.rnn = tf.keras.layers.LSTM(units, return_sequences=True)
        self.attention = CrossAttention(units, **kwargs)
        self.dense = tf.keras.layers.Dense(vocabulary_size, activation='softmax')
    def call(self, inputs):
        x, context = inputs
        x = self.embedding(x)
        x = self.rnn(x)
        x = self.attention((x, context))
        x = self.dense(x)
        return x
# 构建最后的模型
class Seq2Seq(tf.keras.models.Model):
    def __init__(self, vocabulary_size_1, vocabulary_size_2, d_model, units, **kwargs):
        super().__init__()
        self.encoder = Encoder(vocabulary_size=vocabulary_size_1, d_model=d_model, units=units)
        self.decoder = Decoder(vocabulary_size=vocabulary_size_2, d_model=d_model, units=units)
    def call(self, inputs):
        pt, en = inputs
        context = self.encoder(pt)
        output = self.decoder((en, context))
        return output
seq2seq = Seq2Seq(chinese_tokenizer.vocabulary_size(), english_tokenizer.vocabulary_size(), 512, 30)
# 模型总览
seq2seq((pt, en))
seq2seq.summary()
# 模型配置
def masked_loss(y_true, y_pred):
    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(reduction='none')
    loss = loss_fn(y_true, y_pred)
    mask = tf.cast(y_true != 0, loss.dtype)
    loss *= mask
    return tf.reduce_sum(loss)/tf.reduce_sum(mask)
def masked_acc(y_true, y_pred):
    y_pred = tf.argmax(y_pred, axis=-1)
    y_pred = tf.cast(y_pred, y_true.dtype)
    match = tf.cast(y_true == y_pred, tf.float32)
    mask = tf.cast(y_true != 0, tf.float32)
    return tf.reduce_sum(match)/tf.reduce_sum(mask)
seq2seq.compile(
    optimizer='adam',
    loss=masked_loss, 
    metrics=[masked_acc, masked_loss]
)
# 模型训练
seq2seq.fit(train_dataset, epochs=20, validation_data=val_dataset)
# 推理
class Inference(tf.Module):
    def __init__(self, model, tokenizer_1, tokenizer_2):
        self.model = model
        self.tokenizer_1 = tokenizer_1
        self.tokenizer_2 = tokenizer_2
    def __call__(self, sentence, MAX_TOKEN=128):
        assert isinstance(sentence, tf.Tensor)
        if len(sentence.shape) == 0:
            sentence = sentence[tf.newaxis]
        sentence = self.tokenizer_1(sentence)
        sentence = tf.concat([tf.ones(shape=[sentence.shape[0], 1], dtype='int32'), sentence, tf.ones(shape=[sentence.shape[0], 1], dtype='int32')*2], axis=-1).to_tensor()
        encoder_input = sentence
        
        start = tf.constant(1, dtype='int64')[tf.newaxis]
        end = tf.constant(2, dtype='int64')[tf.newaxis]
        # tf.TensorArray 类似于python中的列表
        output_array = tf.TensorArray(dtype=tf.int64, size=0, dynamic_size=True)
        # 在index=0的位置写入start
        output_array = output_array.write(0, start)
        
        for i in tf.range(MAX_TOKEN):
            output = tf.transpose(output_array.stack())
            predictions = self.model.predict((encoder_input, output), verbose=0) # Shape `(batch_size, seq_len, vocab_size)`
            
            # 从seq_len中的最后一个维度选择last token
            predictions = predictions[:, -1:, :]  # Shape `(batch_size, 1, vocab_size)`.
            predicted_id = tf.argmax(predictions, axis=-1)
            
            # `predicted_id`加入到output_array中作为一个新的输入
            output_array = output_array.write(i+1, predicted_id[0])
            # 如果输出end就表明停止
            if predicted_id == end:
                break
        output = tf.squeeze(output_array.stack())
        output = self.tokenizer_2.detokenize(output)
        
        return output
inference = Inference(seq2seq, chinese_tokenizer, english_tokenizer)
# 开始推理
sentence = '你好'
sentence = tf.constant(sentence)
inference(sentence)
# 输出
# <tf.Tensor: shape=(), dtype=string, numpy=b"[START] hello ! [END]">

[seq2seq]论文实现:Effective Approaches to Attention-based Neural Machine Translation(下)https://developer.aliyun.com/article/1504076?spm=a2c6h.13148508.setting.40.36834f0eMJOehx

目录
相关文章
|
4月前
|
机器学习/深度学习 算法
【文献学习】RoemNet: Robust Meta Learning based Channel Estimation in OFDM Systems
本文提出了一种基于元学习的鲁棒信道估计算法RoemNet,旨在解决OFDM系统中由于训练和部署信道模型不一致导致的问题,并展示了其在不同信道环境下优越的性能。
43 5
|
4月前
|
机器学习/深度学习 网络协议 PyTorch
【文献学习】DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement
本文介绍了一种新的深度复数卷积递归网络(DCCRN),用于处理语音增强问题,特别是针对低模型复杂度的实时处理。
191 5
|
7月前
|
机器学习/深度学习 Python TensorFlow
[seq2seq]论文实现:Effective Approaches to Attention-based Neural Machine Translation(下)
[seq2seq]论文实现:Effective Approaches to Attention-based Neural Machine Translation(下)
50 1
|
7月前
|
机器学习/深度学习 自然语言处理 算法
[BPE]论文实现:Neural Machine Translation of Rare Words with Subword Units
[BPE]论文实现:Neural Machine Translation of Rare Words with Subword Units
51 0
|
机器学习/深度学习 自然语言处理 算法
【论文精读】COLING 2022 -Event Detection with Dual Relational Graph Attention Networks
图神经网络(Scarselli et al, 2009)已被广泛用于编码事件检测的依赖树,因为它们可以基于信息聚合方案有效地捕获相关信息(Cao et al, 2021)。
188 0
|
机器学习/深度学习 算法 数据挖掘
A Generative Adversarial Network-based Deep Learning Method for Low-quality Defect ImageReconstructi
本文提出了一种基于生成对抗网络 (GAN) 的 DL 方法,用于低质量缺陷图像识别。 GAN用于重建低质量缺陷图像,并建立VGG16网络识别重建图像。
160 0
|
机器学习/深度学习 存储 自然语言处理
Bi-SimCut: A Simple Strategy for Boosting Neural Machine Translation 论文笔记
Bi-SimCut: A Simple Strategy for Boosting Neural Machine Translation 论文笔记
|
机器学习/深度学习 自然语言处理 数据挖掘
【文本分类】Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification
【文本分类】Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification
164 0
【文本分类】Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification
|
机器学习/深度学习
【论文阅读】(2019)SimGNN:A Neural Network Approach to Fast Graph Similarity Computation
- 图形相似性搜索是最重要的基于图形的应用程序之一,例如查找与查询化合物最相似的化合物。 - 图相似性距离计算,如图编辑距离(GED)和最大公共子图(MCS),是图相似性搜索和许多其他应用程序的核心操作,但实际计算成本很高。 - 受神经网络方法最近成功应用于若干图形应用(如节点或图形分类)的启发,我们提出了一种新的基于神经网络的方法来解决这一经典但具有挑战性的图形问题,**旨在减轻计算负担,同时保持良好的性能**。 - 提出的**方法称为SimGNN**,它结合了两种策略。 - 首先,我们**设计了一个可学习的嵌入函数**,将每个图映射到一个嵌入向量中,从而提供图的全局摘要。**提出了一种新的
276 0
【论文阅读】(2019)SimGNN:A Neural Network Approach to Fast Graph Similarity Computation
《Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition》电子版地址
Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition
109 0
《Multi-Task Multi-Network Joint-Learning of Deep Residual Networks and Cycle-Consistency Generative Adversarial Networks for Robust Speech Recognition》电子版地址