import tensorflow as tf
from official.transformer.model import attention_layer
from official.transformer.model import beam_search
from official.transformer.model import embedding_layer
from official.transformer.model import ffn_layer
from official.transformer.model import model_utils
from official.transformer.utils.tokenizer import EOS_ID
class Transformer(object):
"""
transformer模型由encoder和decoder创建。输入是int序列,encoder产生连续输出,decoder使用ecoder output输出序列概率
"""
def __int__(self,params,train):
"""
transformer model 初始化
:param params: 超参数设置,如:layer size,dropout rate 等
:param train: train模式使用dropput
:return:
"""
self.trian=train
self.params=params
# 创建embedding层,input/output embedding,positional embedding
# matmul在tpu上训练速度更快,gather在cpu,gpu更快
self.embedding_softmax_layer=embedding_layer.EmbeddingSharedWeights(
params['vocab_size'],params['hidden_size'],
method='matmul' if params['tpu'] else 'gather'
)
def __call__(self, inputs, targets=None):
"""
训练/预测阶段的模型输出
:param input: tensor shape[batch_size,input_lenght]
:param targets: None 或者 shape[batch_size,target_length]
:return: 训练模式下,输出[batch_size,target_length,vocab_size];预测模式下,输出字典:
{
output:[batch_size,decoded_length]
# BLEU分数
score:[batch_size,float]
}
"""
# 使用方差缩放
initizlizer=tf.variance_scaling_initializer(
self.params['initializerz_gain'],mode='fan_avg',
distribution='unform'
)
with tf.variable_scope('transformer',initializer=initizlizer):
# 计算encoder,decoder中的attention bias
attention_bias=model_utils.get_padding_bias(inputs)
# 获取encoder output
encoder_outputs=self.encode(inputs,attention_bias)
# 训练模式,预测模式不同输出
if targets == None:
return self.predict(encoder_outputs,attention_bias)
else:
logits=self.decode(targets,encoder_outputs,attention_bias)
return logits
def encode(self,inputs,attention_bias):
"""
:param inputs: int shape[batch_size,input_length]
:param attention_bias:float shape[batch_size,1,1,input_length]
:return: float shape[batch_size,input_length,hidden_size]
"""
with tf.name_scope('encode'):
# encode_input 由 input embedding,positional encoding 合并创建,并添加dropout
# 此时应注意 input embedding,positional encoding 的维度大小,可以两者相加
embedding_inputs=self.embedding_softmax_layer(inputs)
inputs_padding=model_utils.get_padding(inputs)
with tf.name_scope('add_pos_encoding'):
lenth=tf.shape(embedding_inputs)[1]
pos_encoding=model_utils.get_position_encoding(
lenth,self.params['hidden_size']
)
encoder_inputs=embedding_inputs+pos_encoding
# 训练模式使用dropout
if self.train:
encoder_inputs=tf.nn.dropout(
encoder_inputs,1-self.params['layer_postprocess_dropout']
)
# encode,decode 都是默认6层
return self.encode_stack(encoder_inputs,attention_bias,inputs_padding)
def decode(self,targets,encoder_outputs,attention_bias):
"""
:param targets: int shape[batch_size,target_size]
:param encoder_outputs: float shape[batch_size,input_lenth,hidden_size]
:param attention_bias: float shape[batch_size,1,1,input_length]
:return: float shape[batch_size,target_lenth,vocab_size]
"""
with tf.name_scope('decode'):
# 将decode input向右移一位(需要把decoder的输入前面加上开始符号并去掉最后一位。然后最终预测出完整的targets)
# 并添加 positional encoding使用dropout
decoder_inputs=self.embedding_softmax_layer(targets)
# 向右移一位,并去除最后一位
with tf.name_scope('shift_targets'):
decoder_inputs=tf.pad(
decoder_inputs,[[0,0],[1,0],[0,0]]
)[:,:-1,:]
# 添加pos_encoding
with tf.name_scope('add_pos_encoding'):
length=tf.shape(decoder_inputs)[1]
decoder_inputs+=model_utils.get_position_encoding(
length,self.params['hidden_size']
)
# 训练模式使用dropout
if self.train:
decoder_inputs=tf.nn.dropout(
decoder_inputs,1-self.params['layer_posprocess_dropout']
)
# 多头注意力层
decoder_self_attention_bias=model_utils.get_decoder_self_attention_bias(length)
outputs=self.decoder_stack(
decoder_inputs,encoder_outputs,decoder_self_attention_bias,attention_bias
)
logits=self.embedding_softmax_layer.linear(outputs)
return logits
def _get_symbols_to_logits_fn(self,max_decode_length):
"""
返回一个用于计算下一个tokens模型输出值的方法
:param max_decode_length:
:return:
"""
timing_signal=model_utils.get_position_encoding(
max_decode_length+1,self.params['hidden_size']
)
decoder_self_atttention_bias=model_utils.get_decoder_self_attention_bias(max_decode_length)
def symbols_to_logits_fn(ids,i,cache):
"""
生成下一个模型输出值ID
:param ids:当前编码序列
:param i: 循环索引
:param cache: 保存ecoder_output,encoder-decoder attention bias,上一个decoder attention bias值
:return: ([batch_size*beam_size,vocab_size],updated cache values)
"""
# 将decode input 设置为最后一个输出ID
decoder_input=ids[:,-1,:]
# decode input 通过embedding并添加timing signal
decoder_input=self.embedding_softmax_layer(decoder_input)
decoder_input+=timing_signal[i:i+1]
self_attention_bias=decoder_self_atttention_bias[:, :, i:i + 1, :i + 1]
decoder_outputs=self.decoder_stack(
decoder_input,cache.get('encoder_outputs'),self_attention_bias,
cache.get('encoder_decoder_attention_bias'),cache
)
# 模型最后是一层全连接层+softmax层
logits=self.embedding_softmax_layer.linear(decoder_outputs)
logits=tf.squeeze(logits,axis=[1])
return logits,cache
return symbols_to_logits_fn
def predict(self,encoder_outputs,encoder_decoder_attention_bias):
"""
:param endoer_outputs:
:param encoder_decoder_attention_bias:
:return:
"""
# encoder_outputs shape[batch_size,input_length,hidden_size]
batch_size=tf.shape(encoder_outputs)[0]
input_length=tf.shape(encoder_outputs)[1]
max_decode_length=input_length+self.params['extra_decode_length']
symbols_to_logits_fn=self._get_symbols_to_logits_fn(max_decode_length)
# 初始化sybols_to_logits_fn ID输入
initial_ids=tf.zeros(shape=[batch_size],dtype=tf.int32)
# 保存每一层的decode attention值
cache={
'layer_%d'%layer:{
'k':tf.zeros([batch_size,0,self.params['hidden_size']]),
'v':tf.zeros([batch_size,0,self.params['hidden_size']])
}for layer in range(self.params['num_hidden_layers'])
}
cache['encoder_outputs']=encoder_outputs
cache['encoder_decoder_attention_bias']=encoder_decoder_attention_bias
# 使用beam search搜索
decoded_ids,scores=beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params["vocab_size"],
beam_size=self.params["beam_size"],
alpha=self.params["alpha"],
max_decode_length=max_decode_length,
eos_id=EOS_ID
)
#获取每个batch数据中,顶部数据
top_decoded_ids=decoded_ids[:,0,1:]
top_scores=scores[:,0]
return {'outputs':top_decoded_ids,"scores":top_scores}
class LayerNormalization(tf.keras.layers.Layer):
# 层归一化
def __int__(self,hidden_size):
super(LayerNormalization,self).__init__()
self.hidden_size=hidden_size
def build(self,_):
self.scale=tf.get_variable('layer_nor_scale',[self.hidden_size],initializer=tf.ones_initializer())
self.bias=tf.get_variable('layer_norm_bias',[self.hidden_size],initializer=tf.zeros_initializer())
self.built=True
def call(self, x, epsilon=1e-6):
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * self.scale + self.bias
class PrePostProcessingWrapper(object):
"""
用于包装模型起点的attention层和最后的feed_forward全连接层
"""
def __int__(self,layer,params,train):
self.layer=layer,
# 每层都使用到dropout
self.postprocess_dropout=params['layer_postprocess_dropout']
self.train=train
self.layer_norm=LayerNormalization(params['hidden_size'])
def __call__(self, x,*args, **kwargs):
# 层归一化
y=self.layer_norm(x)
y=self.layer(y,*args,**kwargs)
# 训练模式使用dropout
# 应用残差网络
if self.train:
y=tf.nn.dropout(y,1-self.postprocess_dropout)
return x+y
class EncoderStack(tf.keras.layers.Layer):
"""
模型默认6层encoder,每一层有两个子层:1,self-attention层,2,feedforward全连接层(此层内又有两个子层)
"""
def __init__(self,params,train):
super(EncoderStack,self).__init__()
self.layers=[]
for _ in range(params['num_hidden_layers']):
#创建子层
#多头注意力模型默认是8个
self_attention_layer=attention_layer.SelfAttention(
params['hidden_size'],params['num_heads'],
params['attention_dropout'],train
)
feed_forward_network=ffn_layer.FeedFowardNetwork(
params['hidden_size'],params['filter_size'],
params['relu_dropout'],train,params['allow_ffn_add']
)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer,params,train),
PrePostProcessingWrapper(feed_forward_network,params,train)
])
# 创建最后一层,层归一化
self.output_normalization=LayerNormalization(params['hidden_size'])
def call(self,encoder_inputs,attention_bias,inputs_padding):
"""
返回叠层的encoder output
:param encoder_inputs: int shape[batch_size,input_length,hidden_size]
:param attention_bias: shape[batch_size,1,1,input_length]
:param inputs_padding:
:return: float shape[batch_size,input_length,hidden_size]
"""
for n,layer in enumerate(self.layers):
self_attention_layer=layer[0]
feed_forward_network=layer[1]
with tf.variable_scope('layer_%d'%n):
with tf.variable_scope('self_attention'):
encoder_inputs=self_attention_layer(encoder_inputs,attention_bias)
with tf.variable_scope('ffn'):
encoder_inputs=feed_forward_network(encoder_inputs,inputs_padding)
return self.output_normalization
class DecoderStack(tf.keras.layers.Layer):
"""
层数与encoder一样,区别是decoder有三层
1,attention层
2,融合encoder output 前一个attention层的多头注意力层
3,feedforward全连接层(此层内又有两个子层)
"""
def __int__(self,params,train):
super(DecoderStack,self).__init__()
self.layers=[]
for _ in range(params['num_hidden_size']):
# attention层
self_attention_layer=attention_layer.SelfAttention(
params['hidden_size'],params['num_heads'],
params['attention_dropout'],train
)
# 融合encoder output 前一个attention层的多头注意力层
enc_dec_attention_layer=attention_layer.Attention(
params['hidden_size'],params['num_heads'],
params['attention_dropout'],train
)
# feedforward全连接层(此层内又有两个子层)
feed_forward_network=ffn_layer.FeedFowardNetwork(
params['hidden_size'],params['filter_size'],
params['relu_dropout'],train,params['allow_ffn_pad']
)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer,params,train),
PrePostProcessingWrapper(enc_dec_attention_layer,params,train),
PrePostProcessingWrapper(feed_forward_network,params,train)
])
# 最后,添加层归一化
self.output_normalization=LayerNormalization(params['hidden_size'])
def call(self,decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias, cache=None):
"""
:param decoder_inputs: shape[batch_size,target_length,hidden_size]
:param encoder_outputs: shape[batch_size,input_length,hidden_size]
:param decoder_self_attention_bias:[1,1,target_len,target_length]
:param attention_bias: shape[batch_size,1,1,input_length]
:param cache:
:return: float shape[batch_size,target_length,hidden_size]
"""
for n,layer in enumerate(self.layers):
# 分别是decoder的三层
self_attention_layer=layer[0]
enc_dec_attention_layer=layer[1]
feed_forward_network=layer[2]
layer_name = "layer_%d" % n
layer_cache = cache[layer_name] if cache is not None else None
# 将input送入模型
with tf.variable_scope(layer_name):
with tf.variable_scope('self_attention'):
decoder_inputs=self_attention_layer(
decoder_inputs,decoder_self_attention_bias,cache
)
with tf.variable_scope('encdec_attention'):
decoder_inputs=enc_dec_attention_layer(
decoder_inputs,encoder_outputs,attention_bias
)
with tf.variable_scope('ffn'):
decoder_inputs=feed_forward_network(decoder_inputs)
# 最后进行层归一化
return self.output_normalization(decoder_inputs)