Path: blob/master/second_edition/chapter11_part04_sequence-to-sequence-learning.ipynb
713 views
This is a companion notebook for the book Deep Learning with Python, Second Edition. For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.
This notebook was generated for TensorFlow 2.6.
Beyond text classification: Sequence-to-sequence learning
A machine translation example
!wget http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip !unzip -q spa-eng.zip
text_file = "spa-eng/spa.txt" with open(text_file) as f: lines = f.read().split("\n")[:-1] text_pairs = [] for line in lines: english, spanish = line.split("\t") spanish = "[start] " + spanish + " [end]" text_pairs.append((english, spanish))
import random print(random.choice(text_pairs))
import random random.shuffle(text_pairs) num_val_samples = int(0.15 * len(text_pairs)) num_train_samples = len(text_pairs) - 2 * num_val_samples train_pairs = text_pairs[:num_train_samples] val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples] test_pairs = text_pairs[num_train_samples + num_val_samples:]
Vectorizing the English and Spanish text pairs
import tensorflow as tf import string import re from tensorflow import keras from tensorflow.keras import layers strip_chars = string.punctuation + "¿" strip_chars = strip_chars.replace("[", "") strip_chars = strip_chars.replace("]", "") def custom_standardization(input_string): lowercase = tf.strings.lower(input_string) return tf.strings.regex_replace( lowercase, f"[{re.escape(strip_chars)}]", "") vocab_size = 15000 sequence_length = 20 source_vectorization = layers.TextVectorization( max_tokens=vocab_size, output_mode="int", output_sequence_length=sequence_length, ) target_vectorization = layers.TextVectorization( max_tokens=vocab_size, output_mode="int", output_sequence_length=sequence_length + 1, standardize=custom_standardization, ) train_english_texts = [pair[0] for pair in train_pairs] train_spanish_texts = [pair[1] for pair in train_pairs] source_vectorization.adapt(train_english_texts) target_vectorization.adapt(train_spanish_texts)
Preparing datasets for the translation task
batch_size = 64 def format_dataset(eng, spa): eng = source_vectorization(eng) spa = target_vectorization(spa) return ({ "english": eng, "spanish": spa[:, :-1], }, spa[:, 1:]) def make_dataset(pairs): eng_texts, spa_texts = zip(*pairs) eng_texts = list(eng_texts) spa_texts = list(spa_texts) dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts)) dataset = dataset.batch(batch_size) dataset = dataset.map(format_dataset, num_parallel_calls=4) return dataset.shuffle(2048).prefetch(16).cache() train_ds = make_dataset(train_pairs) val_ds = make_dataset(val_pairs)
for inputs, targets in train_ds.take(1): print(f"inputs['english'].shape: {inputs['english'].shape}") print(f"inputs['spanish'].shape: {inputs['spanish'].shape}") print(f"targets.shape: {targets.shape}")
Sequence-to-sequence learning with RNNs
GRU-based encoder
from tensorflow import keras from tensorflow.keras import layers embed_dim = 256 latent_dim = 1024 source = keras.Input(shape=(None,), dtype="int64", name="english") x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source) encoded_source = layers.Bidirectional( layers.GRU(latent_dim), merge_mode="sum")(x)
GRU-based decoder and the end-to-end model
past_target = keras.Input(shape=(None,), dtype="int64", name="spanish") x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(past_target) decoder_gru = layers.GRU(latent_dim, return_sequences=True) x = decoder_gru(x, initial_state=encoded_source) x = layers.Dropout(0.5)(x) target_next_step = layers.Dense(vocab_size, activation="softmax")(x) seq2seq_rnn = keras.Model([source, past_target], target_next_step)
Training our recurrent sequence-to-sequence model
seq2seq_rnn.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) seq2seq_rnn.fit(train_ds, epochs=15, validation_data=val_ds)
Translating new sentences with our RNN encoder and decoder
import numpy as np spa_vocab = target_vectorization.get_vocabulary() spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab)) max_decoded_sentence_length = 20 def decode_sequence(input_sentence): tokenized_input_sentence = source_vectorization([input_sentence]) decoded_sentence = "[start]" for i in range(max_decoded_sentence_length): tokenized_target_sentence = target_vectorization([decoded_sentence]) next_token_predictions = seq2seq_rnn.predict( [tokenized_input_sentence, tokenized_target_sentence]) sampled_token_index = np.argmax(next_token_predictions[0, i, :]) sampled_token = spa_index_lookup[sampled_token_index] decoded_sentence += " " + sampled_token if sampled_token == "[end]": break return decoded_sentence test_eng_texts = [pair[0] for pair in test_pairs] for _ in range(20): input_sentence = random.choice(test_eng_texts) print("-") print(input_sentence) print(decode_sequence(input_sentence))
Sequence-to-sequence learning with Transformer
The Transformer decoder
The TransformerDecoder
class TransformerDecoder(layers.Layer): def __init__(self, embed_dim, dense_dim, num_heads, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.dense_dim = dense_dim self.num_heads = num_heads self.attention_1 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim) self.attention_2 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim) self.dense_proj = keras.Sequential( [layers.Dense(dense_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm_1 = layers.LayerNormalization() self.layernorm_2 = layers.LayerNormalization() self.layernorm_3 = layers.LayerNormalization() self.supports_masking = True def get_config(self): config = super().get_config() config.update({ "embed_dim": self.embed_dim, "num_heads": self.num_heads, "dense_dim": self.dense_dim, }) return config def get_causal_attention_mask(self, inputs): input_shape = tf.shape(inputs) batch_size, sequence_length = input_shape[0], input_shape[1] i = tf.range(sequence_length)[:, tf.newaxis] j = tf.range(sequence_length) mask = tf.cast(i >= j, dtype="int32") mask = tf.reshape(mask, (1, input_shape[1], input_shape[1])) mult = tf.concat( [tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], axis=0) return tf.tile(mask, mult) def call(self, inputs, encoder_outputs, mask=None): causal_mask = self.get_causal_attention_mask(inputs) if mask is not None: padding_mask = tf.cast( mask[:, tf.newaxis, :], dtype="int32") padding_mask = tf.minimum(padding_mask, causal_mask) else: padding_mask = mask attention_output_1 = self.attention_1( query=inputs, value=inputs, key=inputs, attention_mask=causal_mask) attention_output_1 = self.layernorm_1(inputs + attention_output_1) attention_output_2 = self.attention_2( query=attention_output_1, value=encoder_outputs, key=encoder_outputs, attention_mask=padding_mask, ) attention_output_2 = self.layernorm_2( attention_output_1 + attention_output_2) proj_output = self.dense_proj(attention_output_2) return self.layernorm_3(attention_output_2 + proj_output)
Putting it all together: A Transformer for machine translation
PositionalEmbedding layer
class PositionalEmbedding(layers.Layer): def __init__(self, sequence_length, input_dim, output_dim, **kwargs): super().__init__(**kwargs) self.token_embeddings = layers.Embedding( input_dim=input_dim, output_dim=output_dim) self.position_embeddings = layers.Embedding( input_dim=sequence_length, output_dim=output_dim) self.sequence_length = sequence_length self.input_dim = input_dim self.output_dim = output_dim def call(self, inputs): length = tf.shape(inputs)[-1] positions = tf.range(start=0, limit=length, delta=1) embedded_tokens = self.token_embeddings(inputs) embedded_positions = self.position_embeddings(positions) return embedded_tokens + embedded_positions def compute_mask(self, inputs, mask=None): return tf.math.not_equal(inputs, 0) def get_config(self): config = super(PositionalEmbedding, self).get_config() config.update({ "output_dim": self.output_dim, "sequence_length": self.sequence_length, "input_dim": self.input_dim, }) return config
End-to-end Transformer
embed_dim = 256 dense_dim = 2048 num_heads = 8 encoder_inputs = keras.Input(shape=(None,), dtype="int64", name="english") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs) encoder_outputs = TransformerEncoder(embed_dim, dense_dim, num_heads)(x) decoder_inputs = keras.Input(shape=(None,), dtype="int64", name="spanish") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs) x = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs) x = layers.Dropout(0.5)(x) decoder_outputs = layers.Dense(vocab_size, activation="softmax")(x) transformer = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
Training the sequence-to-sequence Transformer
transformer.compile( optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) transformer.fit(train_ds, epochs=30, validation_data=val_ds)
Translating new sentences with our Transformer model
import numpy as np spa_vocab = target_vectorization.get_vocabulary() spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab)) max_decoded_sentence_length = 20 def decode_sequence(input_sentence): tokenized_input_sentence = source_vectorization([input_sentence]) decoded_sentence = "[start]" for i in range(max_decoded_sentence_length): tokenized_target_sentence = target_vectorization( [decoded_sentence])[:, :-1] predictions = transformer( [tokenized_input_sentence, tokenized_target_sentence]) sampled_token_index = np.argmax(predictions[0, i, :]) sampled_token = spa_index_lookup[sampled_token_index] decoded_sentence += " " + sampled_token if sampled_token == "[end]": break return decoded_sentence test_eng_texts = [pair[0] for pair in test_pairs] for _ in range(20): input_sentence = random.choice(test_eng_texts) print("-") print(input_sentence) print(decode_sequence(input_sentence))