Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
suyashi29
GitHub Repository: suyashi29/python-su
Path: blob/master/Generative AI for Intelligent Data Handling/Day 4.4 RNN for Sequence Generation .ipynb
3074 views
Kernel: Python 3 (ipykernel)

Sequence Generation:

  • if the sequence length is 10:

  • The generated input sequence might be [0, 1, 2, 3, 4, 5, 6, 7, 8].

  • The RNN model predicts the next number, say 9.5.

  • The output would be:

  • Input Sequence: [0 1 2 3 4 5 6 7 8]

  • Next Number Prediction: 9.5

Example 1

import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, Dense # Generate a simple sequence of numbers def generate_sequence(length): return np.array([i for i in range(1, length+1)]) # Prepare data sequence_length = 30 data = generate_sequence(sequence_length) data
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30])
# Create input-output pairs X = [] y = [] for i in range(len(data) - 4): X.append(data[i:i+4]) y.append(data[i+1:i+5]) X = np.array(X).reshape((-1, 4, 1)) y = np.array(y).reshape((-1, 4, 1)) # Print first few values of X and y print("First few input sequences (X):") print(X[:3]) print("\nFirst few target sequences (y):") print(y[:3])
First few input sequences (X): [[[1] [2] [3] [4]] [[2] [3] [4] [5]] [[3] [4] [5] [6]]] First few target sequences (y): [[[2] [3] [4] [5]] [[3] [4] [5] [6]] [[4] [5] [6] [7]]]
# Define RNN model model = Sequential() model.add(SimpleRNN(50, activation='relu', input_shape=(4, 1))) model.add(Dense(4)) model.compile(optimizer='adam', loss='mse')
# Train the model model.fit(X, y, epochs=300, verbose=0)
<keras.callbacks.History at 0x288655660a0>
# Generate a new sequence input_sequence = np.array([[20, 21, 22, 23]]).reshape((1, 4, 1)) prediction = model.predict(input_sequence, verbose=0)
# Print input sequence in 2D format print("\nInput Sequence (2D format):") print(input_sequence.reshape((4, 1)))
Input Sequence (2D format): [[20] [21] [22] [23]]
# Print the next number prediction print("\nNext Sequence Prediction:") print(prediction.flatten())
Next Sequence Prediction: [20.98302 22.027891 23.179502 24.253681]

Example 2

Generate sequences where the output Y is 2x of the input sequence X.

  • In this case, if X is [1, 2, 3, 4], the corresponding Y would be [2, 4, 6, 8].

import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, Dense # Generate a simple sequence of numbers def generate_sequence(length): return np.array([i for i in range(1, length+1)]) # Prepare data sequence_length = 30 data = generate_sequence(sequence_length) # Create input-output pairs X = [] y = [] for i in range(len(data) - 4): X.append(data[i:i+4]) y.append(data[i+1:i+5] * 2) # Y = 2 * X X = np.array(X).reshape((-1, 4, 1)) y = np.array(y).reshape((-1, 4, 1)) # Print first few values of X and y print("First few input sequences (X):") print(X[:3]) print("\nFirst few target sequences (y):") print(y[:3])
First few input sequences (X): [[[1] [2] [3] [4]] [[2] [3] [4] [5]] [[3] [4] [5] [6]]] First few target sequences (y): [[[ 4] [ 6] [ 8] [10]] [[ 6] [ 8] [10] [12]] [[ 8] [10] [12] [14]]]
# Define RNN model model = Sequential() model.add(SimpleRNN(50, activation='relu', input_shape=(4, 1))) model.add(Dense(4)) model.compile(optimizer='adam', loss='mse') # Train the model model.fit(X, y, epochs=300, verbose=0)
<keras.callbacks.History at 0x288668b1e80>
# Generate a new sequence input_sequence = np.array([[24,25,26,27]]).reshape((1, 4, 1)) prediction = model.predict(input_sequence, verbose=0) # Print input sequence in 2D format print("\nInput Sequence (2D format):") print(input_sequence.reshape((4, 1)))
Input Sequence (2D format): [[24] [25] [26] [27]]
# Print the next number prediction print("\nNext Sequence Prediction (Y = 2X):") print(prediction.flatten())
Next Sequence Prediction (Y = 2X): [49.606647 52.26259 54.81207 57.636036]

Methods to Improve the prediction accuracy by making several adjustments:

  • Increase Model Complexity: Adding more layers and units can help the model learn better.

  • More Training Data: Increasing the sequence length for training can provide the model with more data to learn from.

  • More Epochs: Training for more epochs can allow the model to converge better.

  • Adjusting Learning Rate: Using a learning rate scheduler can help in fine-tuning the learning process.

Implemented with code:

  • Increased Model Complexity: Added another SimpleRNN layer and a Dense layer with more units to increase the model's capacity to learn complex patterns.

  • More Training Data: Increased the sequence length to provide the model with more data.

  • More Epochs: Increased the number of epochs to allow the model more time to learn the patterns.

  • Learning Rate Scheduler: Used ReduceLROnPlateau to adjust the learning rate when the loss plateaus, helping the model to fine-tune better.

import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, Dense from tensorflow.keras.callbacks import ReduceLROnPlateau # Generate a simple sequence of numbers def generate_sequence(length): return np.array([i for i in range(1, length+1)]) # Prepare data sequence_length = 100 # Increase sequence length data = generate_sequence(sequence_length) # Create input-output pairs X = [] y = [] for i in range(len(data) - 4): X.append(data[i:i+4]) y.append(data[i+1:i+5] * 2) # Y = 2 * X X = np.array(X).reshape((-1, 4, 1)) y = np.array(y).reshape((-1, 4, 1)) # Print first few values of X and y print("First few input sequences (X):") print(X[:3]) print("\nFirst few target sequences (y):") print(y[:3])
First few input sequences (X): [[[1] [2] [3] [4]] [[2] [3] [4] [5]] [[3] [4] [5] [6]]] First few target sequences (y): [[[ 4] [ 6] [ 8] [10]] [[ 6] [ 8] [10] [12]] [[ 8] [10] [12] [14]]]
# Define RNN model model = Sequential() model.add(SimpleRNN(100, activation='relu', input_shape=(4, 1), return_sequences=True)) # Increase units and add return_sequences model.add(SimpleRNN(50, activation='relu')) # Add another RNN layer model.add(Dense(50, activation='relu')) # Add a Dense layer with more units model.add(Dense(4)) model.compile(optimizer='adam', loss='mse')
# Learning rate scheduler reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_lr=0.001) # Train the model model.fit(X, y, epochs=1000, verbose=0, callbacks=[reduce_lr]) # Increase number of epochs and add learning rate scheduler
<keras.callbacks.History at 0x28868ad7970>
# Generate a new sequence input_sequence = np.array([[24, 25, 26, 27]]).reshape((1, 4, 1)) prediction = model.predict(input_sequence, verbose=0) # Print input sequence in 2D format print("\nInput Sequence (2D format):") print(input_sequence.reshape((4, 1))) # Print the next number prediction print("\nNext Sequence Prediction (Y = 2X):") print(prediction.flatten())
Input Sequence (2D format): [[24] [25] [26] [27]] Next Sequence Prediction (Y = 2X): [49.990376 52.00197 54.007946 55.990585]

Example 3

Predict the sum of the input sequence [1, 2, 3, 4]

import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, Dense from tensorflow.keras.callbacks import ReduceLROnPlateau # Generate a simple sequence of numbers def generate_sequence(length): return np.array([i for i in range(1, length+1)]) # Prepare data sequence_length = 100 # Increase sequence length data = generate_sequence(sequence_length) # Create input-output pairs X = [] y = [] for i in range(len(data) - 4): X.append(data[i:i+4]) y.append([np.sum(data[i:i+4])]) # y = sum(X) X = np.array(X).reshape((-1, 4, 1)) y = np.array(y) # Print first few values of X and y print("First few input sequences (X):") print(X[:3]) print("\nFirst few target sequences (y):") print(y[:3]) # Define RNN model model = Sequential() model.add(SimpleRNN(100, activation='relu', input_shape=(4, 1), return_sequences=False)) # Increase units model.add(Dense(50, activation='relu')) # Add a Dense layer with more units model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # Learning rate scheduler reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_lr=0.001) # Train the model model.fit(X, y, epochs=1000, verbose=0, callbacks=[reduce_lr]) # Increase number of epochs and add learning rate scheduler # Generate a new sequence input_sequence = np.array([17, 18, 19, 20]).reshape((1, 4, 1)) prediction = model.predict(input_sequence, verbose=0) # Print input sequence in 2D format print("\nInput Sequence (2D format):") print(input_sequence.reshape((4, 1))) # Print the next number prediction print("\nNext Sequence Sum Prediction (sum(X)):") print(prediction.flatten()[0])

Example 4: Predicting the Next Number in a Fibonacci Sequence

import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, Dense # Generate a Fibonacci sequence def generate_fibonacci_sequence(length): sequence = [0, 1] for _ in range(length - 2): sequence.append(sequence[-1] + sequence[-2]) return np.array(sequence) # Prepare data sequence_length = 20 # Length of the sequence data = generate_fibonacci_sequence(sequence_length) # Create input-output pairs X = [] y = [] for i in range(len(data) - 4): X.append(data[i:i+4]) y.append(data[i+4]) # Next number in the sequence X = np.array(X).reshape((-1, 4, 1)) y = np.array(y) # Define RNN model model = Sequential() model.add(SimpleRNN(50, activation='relu', input_shape=(4, 1))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # Train the model model.fit(X, y, epochs=1000, verbose=0) # Generate a new sequence input_sequence = np.array([13, 21, 34, 55]).reshape((1, 4, 1)) prediction = model.predict(input_sequence, verbose=0) # Print results print("Input Sequence:", input_sequence.flatten()) print("Next Number Prediction:", prediction.flatten()[0])