代码示例 / 自然语言处理 / 文本分类与 Transformer

文本分类与 Transformer

作者: Apoorv Nandan
创建日期: 2020/05/10
最后修改: 2024/01/18
描述: 实现一个 Transformer 块作为 Keras 层,并将其用于文本分类。

在 Colab 中查看 GitHub 源代码


设置

import keras
from keras import ops
from keras import layers

实现 Transformer 块作为层

class TransformerBlock(layers.Layer):
    def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
        super().__init__()
        self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
        self.ffn = keras.Sequential(
            [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
        )
        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        self.dropout1 = layers.Dropout(rate)
        self.dropout2 = layers.Dropout(rate)

    def call(self, inputs):
        attn_output = self.att(inputs, inputs)
        attn_output = self.dropout1(attn_output)
        out1 = self.layernorm1(inputs + attn_output)
        ffn_output = self.ffn(out1)
        ffn_output = self.dropout2(ffn_output)
        return self.layernorm2(out1 + ffn_output)

实现嵌入层

两个单独的嵌入层,一个用于标记,另一个用于标记索引(位置)。

class TokenAndPositionEmbedding(layers.Layer):
    def __init__(self, maxlen, vocab_size, embed_dim):
        super().__init__()
        self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
        self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)

    def call(self, x):
        maxlen = ops.shape(x)[-1]
        positions = ops.arange(start=0, stop=maxlen, step=1)
        positions = self.pos_emb(positions)
        x = self.token_emb(x)
        return x + positions

下载并准备数据集

vocab_size = 20000  # 仅考虑前 2 万个单词
maxlen = 200  # 仅考虑每条电影评论的前 200 个单词
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "训练序列")
print(len(x_val), "验证序列")
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17465344/17464789 [==============================] - 0s 0us/step

<string>:6: VisibleDeprecationWarning: 从不规则嵌套序列(其长度或形状不同的列表或元组的 ndarray)创建 ndarray 已弃用。如果您想这样做,必须在创建 ndarray 时指定 'dtype=object'
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/datasets/imdb.py:159: VisibleDeprecationWarning: 从不规则嵌套序列(其长度或形状不同的列表或元组的 ndarray)创建 ndarray 已弃用。如果您想这样做,必须在创建 ndarray 时指定 'dtype=object'
  x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/datasets/imdb.py:160: VisibleDeprecationWarning: 从不规则嵌套序列(其长度或形状不同的列表或元组的 ndarray)创建 ndarray 已弃用。如果您想这样做,必须在创建 ndarray 时指定 'dtype=object'
  x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])

25000 训练序列
25000 验证序列

使用 Transformer 层创建分类器模型

Transformer 层为输入序列的每个时间步输出一个向量。 在这里,我们在所有时间步上取均值,并在其上使用前馈网络来分类文本。

embed_dim = 32  # 每个token的嵌入大小
num_heads = 2  # 注意力头的数量
ff_dim = 32  # transformer中前馈网络的隐藏层大小

inputs = layers.Input(shape=(maxlen,))
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(20, activation="relu")(x)
x = layers.Dropout(0.1)(x)
outputs = layers.Dense(2, activation="softmax")(x)

model = keras.Model(inputs=inputs, outputs=outputs)

训练与评估

model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(
    x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val)
)
第 1 轮/2
782/782 [==============================] - 15s 18ms/step - loss: 0.5112 - accuracy: 0.7070 - val_loss: 0.3598 - val_accuracy: 0.8444
第 2 轮/2
782/782 [==============================] - 13s 17ms/step - loss: 0.1942 - accuracy: 0.9297 - val_loss: 0.2977 - val_accuracy: 0.8745