第17章 予測を使ってさらに予測を行う
In [2]:
Copied!
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Conv1D
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Conv1D
2023-11-23 10:28:40.029962: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. 2023-11-23 10:28:40.045119: I external/local_tsl/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. 2023-11-23 10:28:40.159970: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered 2023-11-23 10:28:40.160215: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered 2023-11-23 10:28:40.187395: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered 2023-11-23 10:28:40.234423: I external/local_tsl/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. 2023-11-23 10:28:40.235547: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-11-23 10:28:41.093645: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
In [3]:
Copied!
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
In [4]:
Copied!
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
In [5]:
Copied!
# for training
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
# for training
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
In [6]:
Copied!
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
# 線形モデル
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
# DNN
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
# LSTM
ms_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
# CNN
KERNEL_WIDTH = 3
ms_cnn_model = Sequential([
Conv1D(filters=32, kernel_size=(KERNEL_WIDTH,), activation='relu'),
Dense(units=32, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
ms_cnn_lstm_model = Sequential([
Conv1D(filters=32, kernel_size=(KERNEL_WIDTH,), activation='relu'),
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
# 線形モデル
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
# DNN
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
# LSTM
ms_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
# CNN
KERNEL_WIDTH = 3
ms_cnn_model = Sequential([
Conv1D(filters=32, kernel_size=(KERNEL_WIDTH,), activation='relu'),
Dense(units=32, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
ms_cnn_lstm_model = Sequential([
Conv1D(filters=32, kernel_size=(KERNEL_WIDTH,), activation='relu'),
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
In [7]:
Copied!
# マルチステップ
column_indices = {name: i for i, name in enumerate(df_train.columns)}
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + KERNEL_WIDTH - 1
multi_conv_window = DataWindow(input_width=INPUT_WIDTH, label_width=LABEL_WIDTH, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
history = compile_and_fit(ms_lstm_model, multi_window)
ms_val_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.val)
ms_test_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.test)
history = compile_and_fit(ms_cnn_model, multi_conv_window)
ms_val_performance['CNN'] = ms_cnn_model.evaluate(multi_conv_window.val)
ms_test_performance['CNN'] = ms_cnn_model.evaluate(multi_conv_window.test)
history = compile_and_fit(ms_cnn_lstm_model, multi_conv_window)
ms_val_performance['CNN + LSTM'] = ms_cnn_lstm_model.evaluate(multi_conv_window.val)
ms_test_performance['CNN + LSTM'] = ms_cnn_lstm_model.evaluate(multi_conv_window.test)
# マルチステップ
column_indices = {name: i for i, name in enumerate(df_train.columns)}
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + KERNEL_WIDTH - 1
multi_conv_window = DataWindow(input_width=INPUT_WIDTH, label_width=LABEL_WIDTH, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
history = compile_and_fit(ms_lstm_model, multi_window)
ms_val_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.val)
ms_test_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.test)
history = compile_and_fit(ms_cnn_model, multi_conv_window)
ms_val_performance['CNN'] = ms_cnn_model.evaluate(multi_conv_window.val)
ms_test_performance['CNN'] = ms_cnn_model.evaluate(multi_conv_window.test)
history = compile_and_fit(ms_cnn_lstm_model, multi_conv_window)
ms_val_performance['CNN + LSTM'] = ms_cnn_lstm_model.evaluate(multi_conv_window.val)
ms_test_performance['CNN + LSTM'] = ms_cnn_lstm_model.evaluate(multi_conv_window.test)
109/109 [==============================] - 0s 3ms/step - loss: 0.1875 - mean_absolute_error: 0.3522 54/54 [==============================] - 0s 3ms/step - loss: 0.1814 - mean_absolute_error: 0.3473 109/109 [==============================] - 0s 2ms/step - loss: 0.2065 - mean_absolute_error: 0.3473 54/54 [==============================] - 0s 2ms/step - loss: 0.2018 - mean_absolute_error: 0.3413 Epoch 1/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0897 - mean_absolute_error: 0.2464 - val_loss: 0.0457 - val_mean_absolute_error: 0.1821 Epoch 2/50 383/383 [==============================] - 1s 4ms/step - loss: 0.0297 - mean_absolute_error: 0.1365 - val_loss: 0.0262 - val_mean_absolute_error: 0.1259 Epoch 3/50 383/383 [==============================] - 1s 2ms/step - loss: 0.0200 - mean_absolute_error: 0.1043 - val_loss: 0.0213 - val_mean_absolute_error: 0.1083 Epoch 4/50 383/383 [==============================] - 1s 2ms/step - loss: 0.0176 - mean_absolute_error: 0.0954 - val_loss: 0.0197 - val_mean_absolute_error: 0.1007 Epoch 5/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0168 - mean_absolute_error: 0.0914 - val_loss: 0.0189 - val_mean_absolute_error: 0.0961 Epoch 6/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0164 - mean_absolute_error: 0.0887 - val_loss: 0.0185 - val_mean_absolute_error: 0.0932 Epoch 7/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0162 - mean_absolute_error: 0.0868 - val_loss: 0.0183 - val_mean_absolute_error: 0.0910 Epoch 8/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0857 - val_loss: 0.0182 - val_mean_absolute_error: 0.0898 Epoch 9/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0850 - val_loss: 0.0182 - val_mean_absolute_error: 0.0889 Epoch 10/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0846 - val_loss: 0.0182 - val_mean_absolute_error: 0.0889 Epoch 11/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0843 - val_loss: 0.0182 - val_mean_absolute_error: 0.0887 Epoch 12/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0161 - mean_absolute_error: 0.0841 - val_loss: 0.0182 - val_mean_absolute_error: 0.0886 109/109 [==============================] - 0s 2ms/step - loss: 0.0182 - mean_absolute_error: 0.0886 54/54 [==============================] - 0s 3ms/step - loss: 0.0142 - mean_absolute_error: 0.0768 Epoch 1/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0298 - mean_absolute_error: 0.1096 - val_loss: 0.0172 - val_mean_absolute_error: 0.0849 Epoch 2/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0148 - mean_absolute_error: 0.0794 - val_loss: 0.0166 - val_mean_absolute_error: 0.0850 Epoch 3/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0143 - mean_absolute_error: 0.0788 - val_loss: 0.0160 - val_mean_absolute_error: 0.0842 Epoch 4/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0137 - mean_absolute_error: 0.0780 - val_loss: 0.0158 - val_mean_absolute_error: 0.0868 Epoch 5/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0134 - mean_absolute_error: 0.0774 - val_loss: 0.0154 - val_mean_absolute_error: 0.0821 Epoch 6/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0133 - mean_absolute_error: 0.0771 - val_loss: 0.0150 - val_mean_absolute_error: 0.0822 Epoch 7/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0131 - mean_absolute_error: 0.0766 - val_loss: 0.0148 - val_mean_absolute_error: 0.0806 Epoch 8/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0131 - mean_absolute_error: 0.0762 - val_loss: 0.0148 - val_mean_absolute_error: 0.0801 Epoch 9/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0130 - mean_absolute_error: 0.0761 - val_loss: 0.0160 - val_mean_absolute_error: 0.0834 Epoch 10/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0129 - mean_absolute_error: 0.0757 - val_loss: 0.0146 - val_mean_absolute_error: 0.0799 Epoch 11/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0129 - mean_absolute_error: 0.0755 - val_loss: 0.0149 - val_mean_absolute_error: 0.0817 Epoch 12/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0129 - mean_absolute_error: 0.0756 - val_loss: 0.0145 - val_mean_absolute_error: 0.0801 Epoch 13/50 383/383 [==============================] - 3s 9ms/step - loss: 0.0128 - mean_absolute_error: 0.0753 - val_loss: 0.0146 - val_mean_absolute_error: 0.0821 Epoch 14/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0128 - mean_absolute_error: 0.0752 - val_loss: 0.0145 - val_mean_absolute_error: 0.0790 Epoch 15/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0127 - mean_absolute_error: 0.0750 - val_loss: 0.0144 - val_mean_absolute_error: 0.0805 Epoch 16/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0127 - mean_absolute_error: 0.0749 - val_loss: 0.0143 - val_mean_absolute_error: 0.0787 Epoch 17/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0126 - mean_absolute_error: 0.0746 - val_loss: 0.0145 - val_mean_absolute_error: 0.0805 Epoch 18/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0126 - mean_absolute_error: 0.0745 - val_loss: 0.0143 - val_mean_absolute_error: 0.0781 Epoch 19/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0126 - mean_absolute_error: 0.0745 - val_loss: 0.0145 - val_mean_absolute_error: 0.0796 109/109 [==============================] - 1s 4ms/step - loss: 0.0145 - mean_absolute_error: 0.0796 54/54 [==============================] - 0s 4ms/step - loss: 0.0100 - mean_absolute_error: 0.0657 Epoch 1/50 383/383 [==============================] - 11s 24ms/step - loss: 0.0477 - mean_absolute_error: 0.1570 - val_loss: 0.0209 - val_mean_absolute_error: 0.1023 Epoch 2/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0167 - mean_absolute_error: 0.0899 - val_loss: 0.0175 - val_mean_absolute_error: 0.0906 Epoch 3/50 383/383 [==============================] - 9s 24ms/step - loss: 0.0150 - mean_absolute_error: 0.0830 - val_loss: 0.0165 - val_mean_absolute_error: 0.0869 Epoch 4/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0143 - mean_absolute_error: 0.0803 - val_loss: 0.0159 - val_mean_absolute_error: 0.0836 Epoch 5/50 383/383 [==============================] - 9s 24ms/step - loss: 0.0139 - mean_absolute_error: 0.0789 - val_loss: 0.0159 - val_mean_absolute_error: 0.0852 Epoch 6/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0137 - mean_absolute_error: 0.0777 - val_loss: 0.0153 - val_mean_absolute_error: 0.0828 Epoch 7/50 383/383 [==============================] - 9s 24ms/step - loss: 0.0134 - mean_absolute_error: 0.0766 - val_loss: 0.0153 - val_mean_absolute_error: 0.0820 Epoch 8/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0133 - mean_absolute_error: 0.0764 - val_loss: 0.0151 - val_mean_absolute_error: 0.0826 Epoch 9/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0131 - mean_absolute_error: 0.0756 - val_loss: 0.0148 - val_mean_absolute_error: 0.0783 Epoch 10/50 383/383 [==============================] - 9s 24ms/step - loss: 0.0130 - mean_absolute_error: 0.0752 - val_loss: 0.0147 - val_mean_absolute_error: 0.0803 Epoch 11/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0129 - mean_absolute_error: 0.0747 - val_loss: 0.0156 - val_mean_absolute_error: 0.0827 Epoch 12/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0129 - mean_absolute_error: 0.0745 - val_loss: 0.0150 - val_mean_absolute_error: 0.0803 Epoch 13/50 383/383 [==============================] - 9s 24ms/step - loss: 0.0127 - mean_absolute_error: 0.0740 - val_loss: 0.0149 - val_mean_absolute_error: 0.0799 109/109 [==============================] - 1s 11ms/step - loss: 0.0149 - mean_absolute_error: 0.0799 54/54 [==============================] - 1s 9ms/step - loss: 0.0109 - mean_absolute_error: 0.0663 Epoch 1/50 383/383 [==============================] - 4s 8ms/step - loss: 0.0354 - mean_absolute_error: 0.1243 - val_loss: 0.0169 - val_mean_absolute_error: 0.0852 Epoch 2/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0144 - mean_absolute_error: 0.0788 - val_loss: 0.0164 - val_mean_absolute_error: 0.0867 Epoch 3/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0140 - mean_absolute_error: 0.0781 - val_loss: 0.0160 - val_mean_absolute_error: 0.0839 Epoch 4/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0138 - mean_absolute_error: 0.0778 - val_loss: 0.0160 - val_mean_absolute_error: 0.0862 Epoch 5/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0137 - mean_absolute_error: 0.0775 - val_loss: 0.0159 - val_mean_absolute_error: 0.0863 Epoch 6/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0135 - mean_absolute_error: 0.0772 - val_loss: 0.0156 - val_mean_absolute_error: 0.0818 Epoch 7/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0133 - mean_absolute_error: 0.0766 - val_loss: 0.0154 - val_mean_absolute_error: 0.0844 Epoch 8/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0132 - mean_absolute_error: 0.0761 - val_loss: 0.0151 - val_mean_absolute_error: 0.0835 Epoch 9/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0131 - mean_absolute_error: 0.0758 - val_loss: 0.0150 - val_mean_absolute_error: 0.0823 Epoch 10/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0129 - mean_absolute_error: 0.0751 - val_loss: 0.0147 - val_mean_absolute_error: 0.0800 Epoch 11/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0128 - mean_absolute_error: 0.0746 - val_loss: 0.0148 - val_mean_absolute_error: 0.0794 Epoch 12/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0127 - mean_absolute_error: 0.0743 - val_loss: 0.0146 - val_mean_absolute_error: 0.0793 Epoch 13/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0126 - mean_absolute_error: 0.0741 - val_loss: 0.0145 - val_mean_absolute_error: 0.0806 Epoch 14/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0126 - mean_absolute_error: 0.0741 - val_loss: 0.0144 - val_mean_absolute_error: 0.0797 Epoch 15/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0125 - mean_absolute_error: 0.0735 - val_loss: 0.0144 - val_mean_absolute_error: 0.0795 Epoch 16/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0125 - mean_absolute_error: 0.0737 - val_loss: 0.0146 - val_mean_absolute_error: 0.0824 Epoch 17/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0125 - mean_absolute_error: 0.0735 - val_loss: 0.0143 - val_mean_absolute_error: 0.0796 Epoch 18/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0124 - mean_absolute_error: 0.0731 - val_loss: 0.0143 - val_mean_absolute_error: 0.0795 Epoch 19/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0123 - mean_absolute_error: 0.0729 - val_loss: 0.0146 - val_mean_absolute_error: 0.0774 Epoch 20/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0123 - mean_absolute_error: 0.0729 - val_loss: 0.0144 - val_mean_absolute_error: 0.0807 109/109 [==============================] - 1s 4ms/step - loss: 0.0144 - mean_absolute_error: 0.0807 54/54 [==============================] - 0s 4ms/step - loss: 0.0098 - mean_absolute_error: 0.0655 Epoch 1/50 383/383 [==============================] - 13s 29ms/step - loss: 0.0420 - mean_absolute_error: 0.1441 - val_loss: 0.0171 - val_mean_absolute_error: 0.0897 Epoch 2/50 383/383 [==============================] - 11s 27ms/step - loss: 0.0146 - mean_absolute_error: 0.0824 - val_loss: 0.0163 - val_mean_absolute_error: 0.0865 Epoch 3/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0141 - mean_absolute_error: 0.0798 - val_loss: 0.0161 - val_mean_absolute_error: 0.0855 Epoch 4/50 383/383 [==============================] - 13s 33ms/step - loss: 0.0139 - mean_absolute_error: 0.0787 - val_loss: 0.0158 - val_mean_absolute_error: 0.0849 Epoch 5/50 383/383 [==============================] - 15s 38ms/step - loss: 0.0138 - mean_absolute_error: 0.0782 - val_loss: 0.0156 - val_mean_absolute_error: 0.0832 Epoch 6/50 383/383 [==============================] - 17s 45ms/step - loss: 0.0136 - mean_absolute_error: 0.0774 - val_loss: 0.0155 - val_mean_absolute_error: 0.0811 Epoch 7/50 383/383 [==============================] - 13s 34ms/step - loss: 0.0134 - mean_absolute_error: 0.0767 - val_loss: 0.0156 - val_mean_absolute_error: 0.0859 Epoch 8/50 383/383 [==============================] - 15s 40ms/step - loss: 0.0132 - mean_absolute_error: 0.0759 - val_loss: 0.0159 - val_mean_absolute_error: 0.0884 Epoch 9/50 383/383 [==============================] - 12s 30ms/step - loss: 0.0129 - mean_absolute_error: 0.0752 - val_loss: 0.0147 - val_mean_absolute_error: 0.0786 Epoch 10/50 383/383 [==============================] - 14s 37ms/step - loss: 0.0127 - mean_absolute_error: 0.0747 - val_loss: 0.0151 - val_mean_absolute_error: 0.0859 Epoch 11/50 383/383 [==============================] - 12s 30ms/step - loss: 0.0126 - mean_absolute_error: 0.0743 - val_loss: 0.0142 - val_mean_absolute_error: 0.0807 Epoch 12/50 383/383 [==============================] - 13s 33ms/step - loss: 0.0125 - mean_absolute_error: 0.0737 - val_loss: 0.0149 - val_mean_absolute_error: 0.0841 Epoch 13/50 383/383 [==============================] - 13s 34ms/step - loss: 0.0123 - mean_absolute_error: 0.0731 - val_loss: 0.0138 - val_mean_absolute_error: 0.0778 Epoch 14/50 383/383 [==============================] - 14s 36ms/step - loss: 0.0119 - mean_absolute_error: 0.0718 - val_loss: 0.0135 - val_mean_absolute_error: 0.0744 Epoch 15/50 383/383 [==============================] - 14s 36ms/step - loss: 0.0118 - mean_absolute_error: 0.0712 - val_loss: 0.0130 - val_mean_absolute_error: 0.0747 Epoch 16/50 383/383 [==============================] - 11s 30ms/step - loss: 0.0116 - mean_absolute_error: 0.0704 - val_loss: 0.0129 - val_mean_absolute_error: 0.0752 Epoch 17/50 383/383 [==============================] - 13s 33ms/step - loss: 0.0115 - mean_absolute_error: 0.0700 - val_loss: 0.0127 - val_mean_absolute_error: 0.0722 Epoch 18/50 383/383 [==============================] - 11s 29ms/step - loss: 0.0113 - mean_absolute_error: 0.0692 - val_loss: 0.0124 - val_mean_absolute_error: 0.0727 Epoch 19/50 383/383 [==============================] - 11s 29ms/step - loss: 0.0112 - mean_absolute_error: 0.0686 - val_loss: 0.0123 - val_mean_absolute_error: 0.0728 Epoch 20/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0110 - mean_absolute_error: 0.0678 - val_loss: 0.0123 - val_mean_absolute_error: 0.0721 Epoch 21/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0109 - mean_absolute_error: 0.0674 - val_loss: 0.0119 - val_mean_absolute_error: 0.0690 Epoch 22/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0108 - mean_absolute_error: 0.0668 - val_loss: 0.0119 - val_mean_absolute_error: 0.0696 Epoch 23/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0107 - mean_absolute_error: 0.0665 - val_loss: 0.0126 - val_mean_absolute_error: 0.0766 Epoch 24/50 383/383 [==============================] - 10s 26ms/step - loss: 0.0106 - mean_absolute_error: 0.0663 - val_loss: 0.0119 - val_mean_absolute_error: 0.0730 Epoch 25/50 383/383 [==============================] - 11s 27ms/step - loss: 0.0105 - mean_absolute_error: 0.0657 - val_loss: 0.0120 - val_mean_absolute_error: 0.0693 Epoch 26/50 383/383 [==============================] - 11s 29ms/step - loss: 0.0104 - mean_absolute_error: 0.0655 - val_loss: 0.0116 - val_mean_absolute_error: 0.0687 Epoch 27/50 383/383 [==============================] - 16s 42ms/step - loss: 0.0103 - mean_absolute_error: 0.0650 - val_loss: 0.0123 - val_mean_absolute_error: 0.0735 Epoch 28/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0102 - mean_absolute_error: 0.0644 - val_loss: 0.0117 - val_mean_absolute_error: 0.0703 Epoch 29/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0102 - mean_absolute_error: 0.0643 - val_loss: 0.0118 - val_mean_absolute_error: 0.0671 109/109 [==============================] - 1s 11ms/step - loss: 0.0118 - mean_absolute_error: 0.0671 54/54 [==============================] - 1s 10ms/step - loss: 0.0081 - mean_absolute_error: 0.0531
In [8]:
Copied!
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.xticks(rotation=45, ha='right')
plt.tight_layout()
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.xticks(rotation=45, ha='right')
plt.tight_layout()
ARLSTMアーキテクチャを調べる¶
ARLSTMモデルを構築する¶
In [ ]:
Copied!