Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
tensorflow
GitHub Repository: tensorflow/docs-l10n
Path: blob/master/site/ja/lattice/tutorials/aggregate_function_models.ipynb
25118 views
Kernel: Python 3
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.

TF Lattice 集約関数モデル

概要

既製の TFL (TensorFlow Lattice) 集約関数モデルは、複雑な集約関数の学習向けの TFL tf.keras.modelインスタンスを素早く簡単に構築する方法です。このガイドでは、既製の TFL 集約関数モデルを構築し、トレーニングやテストを行うために必要な手順を説明します。

セットアップ

TF Lattice パッケージをインストールします。

#@test {"skip": true} !pip install tensorflow-lattice pydot

必要なパッケージをインポートします。

import tensorflow as tf import collections import logging import numpy as np import pandas as pd import sys import tensorflow_lattice as tfl logging.disable(sys.maxsize)

Puzzles データセットをダウンロードします。

train_dataframe = pd.read_csv( 'https://raw.githubusercontent.com/wbakst/puzzles_data/master/train.csv') train_dataframe.head()
test_dataframe = pd.read_csv( 'https://raw.githubusercontent.com/wbakst/puzzles_data/master/test.csv') test_dataframe.head()

特徴量とラベルを抽出して変換します。

# Features: # - star_rating rating out of 5 stars (1-5) # - word_count number of words in the review # - is_amazon 1 = reviewed on amazon; 0 = reviewed on artifact website # - includes_photo if the review includes a photo of the puzzle # - num_helpful number of people that found this review helpful # - num_reviews total number of reviews for this puzzle (we construct) # # This ordering of feature names will be the exact same order that we construct # our model to expect. feature_names = [ 'star_rating', 'word_count', 'is_amazon', 'includes_photo', 'num_helpful', 'num_reviews' ]
def extract_features(dataframe, label_name): # First we extract flattened features. flattened_features = { feature_name: dataframe[feature_name].values.astype(float) for feature_name in feature_names[:-1] } # Construct mapping from puzzle name to feature. star_rating = collections.defaultdict(list) word_count = collections.defaultdict(list) is_amazon = collections.defaultdict(list) includes_photo = collections.defaultdict(list) num_helpful = collections.defaultdict(list) labels = {} # Extract each review. for i in range(len(dataframe)): row = dataframe.iloc[i] puzzle_name = row['puzzle_name'] star_rating[puzzle_name].append(float(row['star_rating'])) word_count[puzzle_name].append(float(row['word_count'])) is_amazon[puzzle_name].append(float(row['is_amazon'])) includes_photo[puzzle_name].append(float(row['includes_photo'])) num_helpful[puzzle_name].append(float(row['num_helpful'])) labels[puzzle_name] = float(row[label_name]) # Organize data into list of list of features. names = list(star_rating.keys()) star_rating = [star_rating[name] for name in names] word_count = [word_count[name] for name in names] is_amazon = [is_amazon[name] for name in names] includes_photo = [includes_photo[name] for name in names] num_helpful = [num_helpful[name] for name in names] num_reviews = [[len(ratings)] * len(ratings) for ratings in star_rating] labels = [labels[name] for name in names] # Flatten num_reviews flattened_features['num_reviews'] = [len(reviews) for reviews in num_reviews] # Convert data into ragged tensors. star_rating = tf.ragged.constant(star_rating) word_count = tf.ragged.constant(word_count) is_amazon = tf.ragged.constant(is_amazon) includes_photo = tf.ragged.constant(includes_photo) num_helpful = tf.ragged.constant(num_helpful) num_reviews = tf.ragged.constant(num_reviews) labels = tf.constant(labels) # Now we can return our extracted data. return (star_rating, word_count, is_amazon, includes_photo, num_helpful, num_reviews), labels, flattened_features
train_xs, train_ys, flattened_features = extract_features(train_dataframe, 'Sales12-18MonthsAgo') test_xs, test_ys, _ = extract_features(test_dataframe, 'SalesLastSixMonths')
# Let's define our label minimum and maximum. min_label, max_label = float(np.min(train_ys)), float(np.max(train_ys)) min_label, max_label = float(np.min(train_ys)), float(np.max(train_ys))

このガイドのトレーニングに使用するデフォルト値を設定します。

LEARNING_RATE = 0.1 BATCH_SIZE = 128 NUM_EPOCHS = 500 MIDDLE_DIM = 3 MIDDLE_LATTICE_SIZE = 2 MIDDLE_KEYPOINTS = 16 OUTPUT_KEYPOINTS = 8

特徴量の構成

特徴量の較正と特徴量あたりの構成は tfl.configs.FeatureConfig によって設定します。特徴量の構成には、単調性制約、特徴量あたりの正規化(tfl.configs.RegularizerConfig を参照)、および格子モデルの格子のサイズが含まれます。

モデルが認識する必要のあるすべての特徴量に対し、完全な特徴量の構成を指定する必要があります。指定されていない場合、モデルは特徴量の存在を認識できません。集約モデルの場合は、これらの特徴量は自動的に考慮され、不規則な特徴として適切に処理されます。

分位数を計算する

tfl.configs.FeatureConfigpwl_calibration_input_keypoints のデフォルト設定は 'quantiles' ですが、既製のモデルについては、入力キーポイントを手動で定義する必要があります。これを行うには、まず、分位数を計算するためのヘルパー関数を独自に定義します。

def compute_quantiles(features, num_keypoints=10, clip_min=None, clip_max=None, missing_value=None): # Clip min and max if desired. if clip_min is not None: features = np.maximum(features, clip_min) features = np.append(features, clip_min) if clip_max is not None: features = np.minimum(features, clip_max) features = np.append(features, clip_max) # Make features unique. unique_features = np.unique(features) # Remove missing values if specified. if missing_value is not None: unique_features = np.delete(unique_features, np.where(unique_features == missing_value)) # Compute and return quantiles over unique non-missing feature values. return np.quantile( unique_features, np.linspace(0., 1., num=num_keypoints), interpolation='nearest').astype(float)

特徴量の構成を定義する

分位数の計算ができるようになったら、モデルが入力として使用する各特徴量に対する特徴量の構成を定義します。

# Feature configs are used to specify how each feature is calibrated and used. feature_configs = [ tfl.configs.FeatureConfig( name='star_rating', lattice_size=2, monotonicity='increasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( flattened_features['star_rating'], num_keypoints=5), ), tfl.configs.FeatureConfig( name='word_count', lattice_size=2, monotonicity='increasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( flattened_features['word_count'], num_keypoints=5), ), tfl.configs.FeatureConfig( name='is_amazon', lattice_size=2, num_buckets=2, ), tfl.configs.FeatureConfig( name='includes_photo', lattice_size=2, num_buckets=2, ), tfl.configs.FeatureConfig( name='num_helpful', lattice_size=2, monotonicity='increasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( flattened_features['num_helpful'], num_keypoints=5), # Larger num_helpful indicating more trust in star_rating. reflects_trust_in=[ tfl.configs.TrustConfig( feature_name="star_rating", trust_type="trapezoid"), ], ), tfl.configs.FeatureConfig( name='num_reviews', lattice_size=2, monotonicity='increasing', pwl_calibration_num_keypoints=5, pwl_calibration_input_keypoints=compute_quantiles( flattened_features['num_reviews'], num_keypoints=5), ) ]

集約関数モデル

既製の TFL モデルの構築には、まず tfl.configs からモデル構成を構築します。集約関数モデルは tfl.configs.AggregateFunctionConfig を使用して構築します。これには区分的線形較正と分類別較正、それに続いて不規則な入力の各次元に格子モデルを適用します。次に、各次元の出力に集約レイヤーを適用します。さらにオプションで出力の区分的線形較正を適用します。

# Model config defines the model structure for the aggregate function model. aggregate_function_model_config = tfl.configs.AggregateFunctionConfig( feature_configs=feature_configs, middle_dimension=MIDDLE_DIM, middle_lattice_size=MIDDLE_LATTICE_SIZE, middle_calibration=True, middle_calibration_num_keypoints=MIDDLE_KEYPOINTS, middle_monotonicity='increasing', output_min=min_label, output_max=max_label, output_calibration=True, output_calibration_num_keypoints=OUTPUT_KEYPOINTS, output_initialization=np.linspace( min_label, max_label, num=OUTPUT_KEYPOINTS)) # An AggregateFunction premade model constructed from the given model config. aggregate_function_model = tfl.premade.AggregateFunction( aggregate_function_model_config) # Let's plot our model. tf.keras.utils.plot_model( aggregate_function_model, show_layer_names=False, rankdir='LR')

各集約レイヤーの出力は、不規則な入力にわたる較正格子の平均出力です。ここでは、最初の集約レイヤーの内部で使用するモデルを示します。

aggregation_layers = [ layer for layer in aggregate_function_model.layers if isinstance(layer, tfl.layers.Aggregation) ] tf.keras.utils.plot_model( aggregation_layers[0].model, show_layer_names=False, rankdir='LR')

ここで、ほかの tf.keras.Model と同様に、モデルをコンパイルしてデータに適合させます。

aggregate_function_model.compile( loss='mae', optimizer=tf.keras.optimizers.Adam(LEARNING_RATE)) aggregate_function_model.fit( train_xs, train_ys, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, verbose=False)

モデルのトレーニングが終了すると、テストセットを使用してモデルを評価することができます。

print('Test Set Evaluation...') print(aggregate_function_model.evaluate(test_xs, test_ys))