作者: Ankur Singh
创建日期: 2020/09/18
最后修改: 2024/03/15
描述: 实现一种掩码语言模型 (MLM) 与 BERT,并在 IMDB 评论数据集上进行微调。
掩码语言建模是一项填空任务, 模型使用掩码标记周围的上下文词汇来预测掩码词应为多少。
对于包含一个或多个掩码标记的输入, 模型将生成每个掩码的最可能替代。
示例:
掩码语言建模是以自我监督的方式训练语言模型的好方法(没有人工注释的标签)。 然后,模型可以进行微调,以完成各种监督的 NLP 任务。
本示例将教您如何从头构建 BERT 模型, 使用掩码语言建模任务进行训练, 然后在情感分类任务上微调该模型。
我们将使用 Keras 的 TextVectorization
和 MultiHeadAttention
层
来创建一个 BERT Transformer-Encoder 网络架构。
注意:此示例应使用 tf-nightly
运行。
通过 pip install tf-nightly
安装 tf-nightly
。
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras_nlp
import keras
import tensorflow as tf
from keras import layers
from keras.layers import TextVectorization
from dataclasses import dataclass
import pandas as pd
import numpy as np
import glob
import re
from pprint import pprint
@dataclass
class Config:
MAX_LEN = 256
BATCH_SIZE = 32
LR = 0.001
VOCAB_SIZE = 30000
EMBED_DIM = 128
NUM_HEAD = 8 # 在 bert 模型中使用
FF_DIM = 128 # 在 bert 模型中使用
NUM_LAYERS = 1
config = Config()
我们将首先下载 IMDB 数据并加载到 Pandas 数据框中。
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.append(line)
return text_list
def get_data_from_text_files(folder_name):
pos_files = glob.glob("aclImdb/" + folder_name + "/pos/*.txt")
pos_texts = get_text_list_from_files(pos_files)
neg_files = glob.glob("aclImdb/" + folder_name + "/neg/*.txt")
neg_texts = get_text_list_from_files(neg_files)
df = pd.DataFrame(
{
"review": pos_texts + neg_texts,
"sentiment": [0] * len(pos_texts) + [1] * len(neg_texts),
}
)
df = df.sample(len(df)).reset_index(drop=True)
return df
train_df = get_data_from_text_files("train")
test_df = get_data_from_text_files("test")
all_data = train_df.append(test_df)
% 总计 % 接收 % 传输 平均速度 时间 时间 时间 当前
下载 上传 总计 花费 剩余 速度
100 80.2M 100 80.2M 0 0 45.3M 0 0:00:01 0:00:01 --:--:-- 45.3M
我们将使用 TextVectorization
层将文本向量化为整数标记 ID。
它将一批字符串转换为:
一个标记索引的序列(一个样本 = 1D 整数标记索引数组,按顺序)
或一个密集表示(一个样本 = 1D 浮点值数组,编码一个无序的标记集合)。
以下,我们定义了 3 个预处理函数。
get_vectorize_layer
函数构建 TextVectorization
层。encode
函数将原始文本编码为整数标记 ID。get_masked_input_and_labels
函数将掩盖输入标记 ID。
它随机掩盖每个序列中 15% 的输入标记。def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, "[%s]" % re.escape("!#$%&'()*+,-./:;<=>?@\^_`{|}~"), ""
)
def get_vectorize_layer(texts, vocab_size, max_seq, special_tokens=["[MASK]"]):
"""构建文本向量化层
Args:
texts (list): 字符串列表,即输入文本
vocab_size (int): 词汇表大小
max_seq (int): 最大序列长度。
special_tokens (list, optional): 特殊符号列表。默认为 ['[MASK]']。
Returns:
layers.Layer: 返回 TextVectorization Keras 层
"""
vectorize_layer = TextVectorization(
max_tokens=vocab_size,
output_mode="int",
standardize=custom_standardization,
output_sequence_length=max_seq,
)
vectorize_layer.adapt(texts)
# 将掩码符号插入词汇表
vocab = vectorize_layer.get_vocabulary()
vocab = vocab[2 : vocab_size - len(special_tokens)] + ["[mask]"]
vectorize_layer.set_vocabulary(vocab)
return vectorize_layer
vectorize_layer = get_vectorize_layer(
all_data.review.values.tolist(),
config.VOCAB_SIZE,
config.MAX_LEN,
special_tokens=["[mask]"],
)
# 获取掩码令牌ID用于掩码语言模型
mask_token_id = vectorize_layer(["[mask]"]).numpy()[0][0]
def encode(texts):
encoded_texts = vectorize_layer(texts)
return encoded_texts.numpy()
def get_masked_input_and_labels(encoded_texts):
# 15% BERT掩码
inp_mask = np.random.rand(*encoded_texts.shape) < 0.15
# 不掩码特殊符号
inp_mask[encoded_texts <= 2] = False
# 默认情况下将目标设置为 -1,表示忽略
labels = -1 * np.ones(encoded_texts.shape, dtype=int)
# 设置掩码令牌的标签
labels[inp_mask] = encoded_texts[inp_mask]
# 准备输入
encoded_texts_masked = np.copy(encoded_texts)
# 将输入设置为 [MASK],即最后一个令牌,占 90% 的令牌
# 这意味着 10% 保持不变
inp_mask_2mask = inp_mask & (np.random.rand(*encoded_texts.shape) < 0.90)
encoded_texts_masked[
inp_mask_2mask
] = mask_token_id # 掩码令牌在字典中的最后一个
# 将 10% 设置为随机令牌
inp_mask_2random = inp_mask_2mask & (np.random.rand(*encoded_texts.shape) < 1 / 9)
encoded_texts_masked[inp_mask_2random] = np.random.randint(
3, mask_token_id, inp_mask_2random.sum()
)
# 准备样本权重以传递给 .fit() 方法
sample_weights = np.ones(labels.shape)
sample_weights[labels == -1] = 0
# y_labels 将与 encoded_texts 相同,即输入令牌
y_labels = np.copy(encoded_texts)
return encoded_texts_masked, y_labels, sample_weights
# 我们有 25000 个训练示例
x_train = encode(train_df.review.values) # 用向量化器编码评论
y_train = train_df.sentiment.values
train_classifier_ds = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1000)
.batch(config.BATCH_SIZE)
)
# 我们有 25000 个测试示例
x_test = encode(test_df.review.values)
y_test = test_df.sentiment.values
test_classifier_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(
config.BATCH_SIZE
)
# 构建用于端到端模型输入的数据集(将在最后使用)
test_raw_classifier_ds = tf.data.Dataset.from_tensor_slices(
(test_df.review.values, y_test)
).batch(config.BATCH_SIZE)
# 为掩码语言模型准备数据
x_all_review = encode(all_data.review.values)
x_masked_train, y_masked_labels, sample_weights = get_masked_input_and_labels(
x_all_review
)
mlm_ds = tf.data.Dataset.from_tensor_slices(
(x_masked_train, y_masked_labels, sample_weights)
)
mlm_ds = mlm_ds.shuffle(1000).batch(config.BATCH_SIZE)
我们将使用MultiHeadAttention
层创建一个类似BERT的预训练模型架构。
它将把令牌ID作为输入(包括被掩蔽的令牌),并预测被掩蔽输入令牌的正确ID。
def bert_module(query, key, value, i):
# 多头自注意力
attention_output = layers.MultiHeadAttention(
num_heads=config.NUM_HEAD,
key_dim=config.EMBED_DIM // config.NUM_HEAD,
name="encoder_{}_multiheadattention".format(i),
)(query, key, value)
attention_output = layers.Dropout(0.1, name="encoder_{}_att_dropout".format(i))(
attention_output
)
attention_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}_att_layernormalization".format(i)
)(query + attention_output)
# 前馈层
ffn = keras.Sequential(
[
layers.Dense(config.FF_DIM, activation="relu"),
layers.Dense(config.EMBED_DIM),
],
name="encoder_{}_ffn".format(i),
)
ffn_output = ffn(attention_output)
ffn_output = layers.Dropout(0.1, name="encoder_{}_ffn_dropout".format(i))(
ffn_output
)
sequence_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}_ffn_layernormalization".format(i)
)(attention_output + ffn_output)
return sequence_output
loss_fn = keras.losses.SparseCategoricalCrossentropy(reduction=None)
loss_tracker = keras.metrics.Mean(name="loss")
class MaskedLanguageModel(keras.Model):
def train_step(self, inputs):
if len(inputs) == 3:
features, labels, sample_weight = inputs
else:
features, labels = inputs
sample_weight = None
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
# 计算梯度
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# 更新权重
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# 计算我们的指标
loss_tracker.update_state(loss, sample_weight=sample_weight)
# 返回一个映射指标名称到当前值的字典
return {"loss": loss_tracker.result()}
@property
def metrics(self):
# 我们在这里列出我们的 `Metric` 对象,以便可以在每个时期的开始时自动
# 或在 `evaluate()` 的开始时调用 `reset_states()`。
# 如果您不实现此属性,您必须在选择的时间手动调用 `reset_states()`。
return [loss_tracker]
def create_masked_language_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype="int64")
word_embeddings = layers.Embedding(
config.VOCAB_SIZE, config.EMBED_DIM, name="word_embedding"
)(inputs)
position_embeddings = keras_nlp.layers.PositionEmbedding(
sequence_length=config.MAX_LEN
)(word_embeddings)
embeddings = word_embeddings + position_embeddings
encoder_output = embeddings
for i in range(config.NUM_LAYERS):
encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i)
mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softmax")(
encoder_output
)
mlm_model = MaskedLanguageModel(inputs, mlm_output, name="masked_bert_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
mlm_model.compile(optimizer=optimizer)
return mlm_model
id2token = dict(enumerate(vectorize_layer.get_vocabulary()))
token2id = {y: x for x, y in id2token.items()}
class MaskedTextGenerator(keras.callbacks.Callback):
def __init__(self, sample_tokens, top_k=5):
self.sample_tokens = sample_tokens
self.k = top_k
def decode(self, tokens):
return " ".join([id2token[t] for t in tokens if t != 0])
def convert_ids_to_tokens(self, id):
return id2token[id]
def on_epoch_end(self, epoch, logs=None):
prediction = self.model.predict(self.sample_tokens)
masked_index = np.where(self.sample_tokens == mask_token_id)
masked_index = masked_index[1]
mask_prediction = prediction[0][masked_index]
top_indices = mask_prediction[0].argsort()[-self.k :][::-1]
values = mask_prediction[0][top_indices]
for i in range(len(top_indices)):
p = top_indices[i]
v = values[i]
tokens = np.copy(sample_tokens[0])
tokens[masked_index[0]] = p
result = {
"input_text": self.decode(sample_tokens[0].numpy()),
"prediction": self.decode(tokens),
"probability": v,
"predicted mask token": self.convert_ids_to_tokens(p),
}
pprint(result)
sample_tokens = vectorize_layer(["我看过这个 [mask],太棒了"])
generator_callback = MaskedTextGenerator(sample_tokens.numpy())
bert_masked_model = create_masked_language_bert_model()
bert_masked_model.summary()
模型: "masked_bert_model"
__________________________________________________________________________________________________
层 (类型) 输出形状 参数数 连接至
==================================================================================================
input_1 (输入层) [(None, 256)] 0
__________________________________________________________________________________________________
word_embedding (嵌入层) (None, 256, 128) 3840000 input_1[0][0]
__________________________________________________________________________________________________
tf.__operators__.add (TFOpLambd (None, 256, 128) 0 word_embedding[0][0]
__________________________________________________________________________________________________
encoder_0/multiheadattention (M (None, 256, 128) 66048 tf.__operators__.add[0][0]
tf.__operators__.add[0][0]
tf.__operators__.add[0][0]
__________________________________________________________________________________________________
encoder_0/att_dropout (丢弃层) (None, 256, 128) 0 encoder_0/multiheadattention[0][0
__________________________________________________________________________________________________
tf.__operators__.add_1 (TFOpLam (None, 256, 128) 0 tf.__operators__.add[0][0]
encoder_0/att_dropout[0][0]
__________________________________________________________________________________________________
encoder_0/att_layernormalizatio (None, 256, 128) 256 tf.__operators__.add_1[0][0]
__________________________________________________________________________________________________
encoder_0/ffn (顺序模型) (None, 256, 128) 33024 encoder_0/att_layernormalization[
__________________________________________________________________________________________________
encoder_0/ffn_dropout (丢弃层) (None, 256, 128) 0 encoder_0/ffn[0][0]
__________________________________________________________________________________________________
tf.__operators__.add_2 (TFOpLam (None, 256, 128) 0 encoder_0/att_layernormalization[
encoder_0/ffn_dropout[0][0]
__________________________________________________________________________________________________
encoder_0/ffn_layernormalizatio (None, 256, 128) 256 tf.__operators__.add_2[0][0]
__________________________________________________________________________________________________
mlm_cls (密集层) (None, 256, 30000) 3870000 encoder_0/ffn_layernormalization[
==================================================================================================
参数总数: 7,809,584
可训练参数: 7,809,584
不可训练参数: 0
__________________________________________________________________________________________________
bert_masked_model.fit(mlm_ds, epochs=5, callbacks=[generator_callback])
bert_masked_model.save("bert_mlm_imdb.keras")
Epoch 1/5
1563/1563 [==============================] - ETA: 0s - loss: 7.0111{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '这个',
'prediction': '我看过这个 这个, 它很棒',
'probability': 0.086307295}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '我',
'prediction': '我看过这个 我, 它很棒',
'probability': 0.066265985}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '电影',
'prediction': '我看过这个 电影, 它很棒',
'probability': 0.044195656}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '一个',
'prediction': '我看过这个 一个, 它很棒',
'probability': 0.04020928}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '是',
'prediction': '我看过这个 是, 它很棒',
'probability': 0.027878676}
1563/1563 [==============================] - 661s 423ms/step - loss: 7.0111
Epoch 2/5
1563/1563 [==============================] - ETA: 0s - loss: 6.4498{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '电影',
'prediction': '我看过这个 电影, 它很棒',
'probability': 0.44448906}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '影片',
'prediction': '我看过这个 影片, 它很棒',
'probability': 0.1507494}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '是',
'prediction': '我看过这个 是, 它很棒',
'probability': 0.06385628}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '一个',
'prediction': '我看过这个 一个, 它很棒',
'probability': 0.023549262}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '是',
'prediction': '我看过这个 是, 它很棒',
'probability': 0.022277055}
1563/1563 [==============================] - 660s 422ms/step - loss: 6.4498
Epoch 3/5
1563/1563 [==============================] - ETA: 0s - loss: 5.8709{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '电影',
'prediction': '我看过这个 电影, 它很棒',
'probability': 0.4759983}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '影片',
'prediction': '我看过这个 影片, 它很棒',
'probability': 0.18642229}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '一个',
'prediction': '我看过这个 一个, 它很棒',
'probability': 0.045611132}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '是',
'prediction': '我看过这个 是, 它很棒',
'probability': 0.028308254}
{'input_text': '我看过这个 [mask],它很棒',
'predicted mask token': '系列',
'prediction': '我看过这个 系列, 它很棒',
'probability': 0.027862877}
1563/1563 [==============================] - 661s 423ms/step - loss: 5.8709
Epoch 4/5
771/1563 [=============>................] - ETA: 5:35 - loss: 5.3782
我们将对我们的自监督模型进行微调,完成下游情感分类任务。为此,让我们通过在预训练的BERT特征上添加一个池化层和一个Dense
层来创建一个分类器。
# 加载预训练的BERT模型
mlm_model = keras.models.load_model(
"bert_mlm_imdb.keras", custom_objects={"MaskedLanguageModel": MaskedLanguageModel}
)
pretrained_bert_model = keras.Model(
mlm_model.input, mlm_model.get_layer("encoder_0_ffn_layernormalization").output
)
# 冻结模型
pretrained_bert_model.trainable = False
def create_classifier_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype="int64")
sequence_output = pretrained_bert_model(inputs)
pooled_output = layers.GlobalMaxPooling1D()(sequence_output)
hidden_layer = layers.Dense(64, activation="relu")(pooled_output)
outputs = layers.Dense(1, activation="sigmoid")(hidden_layer)
classifer_model = keras.Model(inputs, outputs, name="classification")
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return classifer_model
classifer_model = create_classifier_bert_model()
classifer_model.summary()
# 使用冻结的BERT阶段训练分类器
classifer_model.fit(
train_classifier_ds,
epochs=5,
validation_data=test_classifier_ds,
)
# 解冻BERT模型进行微调
pretrained_bert_model.trainable = True
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
classifer_model.fit(
train_classifier_ds,
epochs=5,
validation_data=test_classifier_ds,
)
Model: "classification"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 256)] 0
_________________________________________________________________
model (Functional) (None, 256, 128) 3939584
_________________________________________________________________
global_max_pooling1d (Global (None, 128) 0
_________________________________________________________________
dense_2 (Dense) (None, 64) 8256
_________________________________________________________________
dense_3 (Dense) (None, 1) 65
=================================================================
Total params: 3,947,905
Trainable params: 8,321
Non-trainable params: 3,939,584
_________________________________________________________________
Epoch 1/5
782/782 [==============================] - 15s 19ms/step - loss: 0.8096 - accuracy: 0.5498 - val_loss: 0.6406 - val_accuracy: 0.6329
Epoch 2/5
782/782 [==============================] - 14s 18ms/step - loss: 0.6551 - accuracy: 0.6220 - val_loss: 0.6423 - val_accuracy: 0.6338
Epoch 3/5
782/782 [==============================] - 14s 18ms/step - loss: 0.6473 - accuracy: 0.6310 - val_loss: 0.6380 - val_accuracy: 0.6350
Epoch 4/5
782/782 [==============================] - 14s 18ms/step - loss: 0.6307 - accuracy: 0.6471 - val_loss: 0.6432 - val_accuracy: 0.6312
Epoch 5/5
782/782 [==============================] - 14s 18ms/step - loss: 0.6278 - accuracy: 0.6465 - val_loss: 0.6107 - val_accuracy: 0.6678
Epoch 1/5
782/782 [==============================] - 46s 59ms/step - loss: 0.5234 - accuracy: 0.7373 - val_loss: 0.3533 - val_accuracy: 0.8427
Epoch 2/5
782/782 [==============================] - 45s 57ms/step - loss: 0.2808 - accuracy: 0.8814 - val_loss: 0.3252 - val_accuracy: 0.8633
Epoch 3/5
782/782 [==============================] - 43s 55ms/step - loss: 0.1493 - accuracy: 0.9413 - val_loss: 0.4374 - val_accuracy: 0.8486
Epoch 4/5
782/782 [==============================] - 43s 55ms/step - loss: 0.0600 - accuracy: 0.9803 - val_loss: 0.6422 - val_accuracy: 0.8380
Epoch 5/5
782/782 [==============================] - 43s 55ms/step - loss: 0.0305 - accuracy: 0.9893 - val_loss: 0.6064 - val_accuracy: 0.8440
<tensorflow.python.keras.callbacks.History at 0x7f35af4367f0>
当您想要部署模型时,最好模型已经包含其预处理管道,这样您就不必在生产环境中重新实现预处理逻辑。让我们创建一个端到端模型,集成TextVectorization
层,并进行评估。我们的模型将接受原始字符串作为输入。
def get_end_to_end(model):
inputs_string = keras.Input(shape=(1,), dtype="string")
indices = vectorize_layer(inputs_string)
outputs = model(indices)
end_to_end_model = keras.Model(inputs_string, outputs, name="end_to_end_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
end_to_end_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return end_to_end_model
end_to_end_classification_model = get_end_to_end(classifer_model)
end_to_end_classification_model.evaluate(test_raw_classifier_ds)
782/782 [==============================] - 8s 11ms/step - 损失: 0.5967 - 准确率: 0.8446
[0.6064175963401794, 0.8439599871635437]