第15章 LSTMで過去を記憶する
In [9]:
Copied!
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM
In [2]:
Copied!
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
In [3]:
Copied!
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
In [5]:
Copied!
# for training
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
# for training
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
In [7]:
Copied!
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
# 線形モデル
linear = Sequential([Dense(units=1)])
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
mo_linear = Sequential([Dense(units=2)])
# DNN
dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1)
])
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
mo_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=2)
])
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
# 線形モデル
linear = Sequential([Dense(units=1)])
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
mo_linear = Sequential([Dense(units=2)])
# DNN
dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1)
])
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
mo_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=2)
])
In [8]:
Copied!
# シングルステップ
single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
column_indices = {name: i for i, name in enumerate(df_train.columns)}
baseline_last = Baseline(column_indices['traffic_volume'])
baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
val_performance = {}
test_performance = {}
## ベースライン
val_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.val)
test_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.test, verbose=0)
## 線形モデル
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
test_performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
## DNN
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
test_performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
# マルチステップ
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
## ベースライン
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
## 線形モデル
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
## DNN
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
# 多変数アウトプット
col_names = ['temp', 'traffic_volume']
mo_single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_baseline_last = Baseline(label_index=[column_indices[col] for col in col_names])
mo_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
mo_val_performance = {}
mo_test_performance = {}
## ベースライン
mo_val_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
mo_test_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
## 線形モデル
history = compile_and_fit(mo_linear, mo_single_step_window)
mo_val_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.val)
mo_test_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.test)
## DNN
history = compile_and_fit(mo_dense, mo_single_step_window)
mo_val_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.val)
mo_test_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.test)
# シングルステップ
single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
column_indices = {name: i for i, name in enumerate(df_train.columns)}
baseline_last = Baseline(column_indices['traffic_volume'])
baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
val_performance = {}
test_performance = {}
## ベースライン
val_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.val)
test_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.test, verbose=0)
## 線形モデル
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
test_performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
## DNN
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
test_performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
# マルチステップ
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
## ベースライン
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
## 線形モデル
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
## DNN
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
# 多変数アウトプット
col_names = ['temp', 'traffic_volume']
mo_single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_baseline_last = Baseline(label_index=[column_indices[col] for col in col_names])
mo_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
mo_val_performance = {}
mo_test_performance = {}
## ベースライン
mo_val_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
mo_test_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
## 線形モデル
history = compile_and_fit(mo_linear, mo_single_step_window)
mo_val_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.val)
mo_test_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.test)
## DNN
history = compile_and_fit(mo_dense, mo_single_step_window)
mo_val_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.val)
mo_test_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.test)
110/110 [==============================] - 0s 2ms/step - loss: 0.0133 - mean_absolute_error: 0.0831 Epoch 1/50 384/384 [==============================] - 2s 3ms/step - loss: 0.1478 - mean_absolute_error: 0.3296 - val_loss: 0.0949 - val_mean_absolute_error: 0.2637 Epoch 2/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0575 - mean_absolute_error: 0.1931 - val_loss: 0.0423 - val_mean_absolute_error: 0.1688 Epoch 3/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0287 - mean_absolute_error: 0.1313 - val_loss: 0.0253 - val_mean_absolute_error: 0.1276 Epoch 4/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0192 - mean_absolute_error: 0.1063 - val_loss: 0.0182 - val_mean_absolute_error: 0.1055 Epoch 5/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0150 - mean_absolute_error: 0.0927 - val_loss: 0.0146 - val_mean_absolute_error: 0.0928 Epoch 6/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0128 - mean_absolute_error: 0.0843 - val_loss: 0.0124 - val_mean_absolute_error: 0.0836 Epoch 7/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0115 - mean_absolute_error: 0.0791 - val_loss: 0.0112 - val_mean_absolute_error: 0.0770 Epoch 8/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0107 - mean_absolute_error: 0.0756 - val_loss: 0.0104 - val_mean_absolute_error: 0.0726 Epoch 9/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0102 - mean_absolute_error: 0.0732 - val_loss: 0.0099 - val_mean_absolute_error: 0.0712 Epoch 10/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0098 - mean_absolute_error: 0.0715 - val_loss: 0.0096 - val_mean_absolute_error: 0.0685 Epoch 11/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0096 - mean_absolute_error: 0.0702 - val_loss: 0.0094 - val_mean_absolute_error: 0.0679 Epoch 12/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0095 - mean_absolute_error: 0.0694 - val_loss: 0.0093 - val_mean_absolute_error: 0.0674 Epoch 13/50 384/384 [==============================] - 4s 9ms/step - loss: 0.0094 - mean_absolute_error: 0.0688 - val_loss: 0.0092 - val_mean_absolute_error: 0.0666 Epoch 14/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0093 - mean_absolute_error: 0.0685 - val_loss: 0.0091 - val_mean_absolute_error: 0.0678 Epoch 15/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0093 - mean_absolute_error: 0.0682 - val_loss: 0.0091 - val_mean_absolute_error: 0.0679 Epoch 16/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0680 - val_loss: 0.0091 - val_mean_absolute_error: 0.0686 Epoch 17/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0680 - val_loss: 0.0091 - val_mean_absolute_error: 0.0665 Epoch 18/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0093 - mean_absolute_error: 0.0678 - val_loss: 0.0091 - val_mean_absolute_error: 0.0666 Epoch 19/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0091 - val_mean_absolute_error: 0.0677 Epoch 20/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0090 - val_mean_absolute_error: 0.0665 Epoch 21/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0091 - val_mean_absolute_error: 0.0673 Epoch 22/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0090 - val_mean_absolute_error: 0.0663 Epoch 23/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0090 - val_mean_absolute_error: 0.0664 Epoch 24/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0678 - val_loss: 0.0090 - val_mean_absolute_error: 0.0663 Epoch 25/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0093 - mean_absolute_error: 0.0677 - val_loss: 0.0091 - val_mean_absolute_error: 0.0675 110/110 [==============================] - 1s 6ms/step - loss: 0.0091 - mean_absolute_error: 0.0675 Epoch 1/50 384/384 [==============================] - 3s 5ms/step - loss: 0.0121 - mean_absolute_error: 0.0733 - val_loss: 0.0050 - val_mean_absolute_error: 0.0526 Epoch 2/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0048 - mean_absolute_error: 0.0510 - val_loss: 0.0043 - val_mean_absolute_error: 0.0504 Epoch 3/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0041 - mean_absolute_error: 0.0470 - val_loss: 0.0035 - val_mean_absolute_error: 0.0439 Epoch 4/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0038 - mean_absolute_error: 0.0449 - val_loss: 0.0031 - val_mean_absolute_error: 0.0416 Epoch 5/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0035 - mean_absolute_error: 0.0430 - val_loss: 0.0037 - val_mean_absolute_error: 0.0484 Epoch 6/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0034 - mean_absolute_error: 0.0425 - val_loss: 0.0027 - val_mean_absolute_error: 0.0385 Epoch 7/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0032 - mean_absolute_error: 0.0406 - val_loss: 0.0026 - val_mean_absolute_error: 0.0373 Epoch 8/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0032 - mean_absolute_error: 0.0409 - val_loss: 0.0026 - val_mean_absolute_error: 0.0372 Epoch 9/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0029 - mean_absolute_error: 0.0385 - val_loss: 0.0035 - val_mean_absolute_error: 0.0438 Epoch 10/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0029 - mean_absolute_error: 0.0386 - val_loss: 0.0023 - val_mean_absolute_error: 0.0351 Epoch 11/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0028 - mean_absolute_error: 0.0379 - val_loss: 0.0026 - val_mean_absolute_error: 0.0374 Epoch 12/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0029 - mean_absolute_error: 0.0381 - val_loss: 0.0022 - val_mean_absolute_error: 0.0342 Epoch 13/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0027 - mean_absolute_error: 0.0372 - val_loss: 0.0023 - val_mean_absolute_error: 0.0346 Epoch 14/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0027 - mean_absolute_error: 0.0366 - val_loss: 0.0022 - val_mean_absolute_error: 0.0343 Epoch 15/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0026 - mean_absolute_error: 0.0359 - val_loss: 0.0021 - val_mean_absolute_error: 0.0328 Epoch 16/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0026 - mean_absolute_error: 0.0354 - val_loss: 0.0022 - val_mean_absolute_error: 0.0342 Epoch 17/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0026 - mean_absolute_error: 0.0357 - val_loss: 0.0020 - val_mean_absolute_error: 0.0323 Epoch 18/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0026 - mean_absolute_error: 0.0358 - val_loss: 0.0021 - val_mean_absolute_error: 0.0337 Epoch 19/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0025 - mean_absolute_error: 0.0351 - val_loss: 0.0019 - val_mean_absolute_error: 0.0316 Epoch 20/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0025 - mean_absolute_error: 0.0348 - val_loss: 0.0022 - val_mean_absolute_error: 0.0356 Epoch 21/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0024 - mean_absolute_error: 0.0346 - val_loss: 0.0023 - val_mean_absolute_error: 0.0354 Epoch 22/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0024 - mean_absolute_error: 0.0345 - val_loss: 0.0020 - val_mean_absolute_error: 0.0327 110/110 [==============================] - 0s 3ms/step - loss: 0.0020 - mean_absolute_error: 0.0327 109/109 [==============================] - 0s 2ms/step - loss: 0.1875 - mean_absolute_error: 0.3522 54/54 [==============================] - 0s 2ms/step - loss: 0.1814 - mean_absolute_error: 0.3473 109/109 [==============================] - 0s 2ms/step - loss: 0.2065 - mean_absolute_error: 0.3473 54/54 [==============================] - 0s 2ms/step - loss: 0.2018 - mean_absolute_error: 0.3413 Epoch 1/50 383/383 [==============================] - 2s 4ms/step - loss: 0.0901 - mean_absolute_error: 0.2467 - val_loss: 0.0459 - val_mean_absolute_error: 0.1824 Epoch 2/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0299 - mean_absolute_error: 0.1371 - val_loss: 0.0261 - val_mean_absolute_error: 0.1253 Epoch 3/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0201 - mean_absolute_error: 0.1045 - val_loss: 0.0213 - val_mean_absolute_error: 0.1074 Epoch 4/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0177 - mean_absolute_error: 0.0955 - val_loss: 0.0196 - val_mean_absolute_error: 0.1000 Epoch 5/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0168 - mean_absolute_error: 0.0914 - val_loss: 0.0189 - val_mean_absolute_error: 0.0957 Epoch 6/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0164 - mean_absolute_error: 0.0887 - val_loss: 0.0185 - val_mean_absolute_error: 0.0926 Epoch 7/50 383/383 [==============================] - 1s 3ms/step - loss: 0.0162 - mean_absolute_error: 0.0869 - val_loss: 0.0183 - val_mean_absolute_error: 0.0907 Epoch 8/50 383/383 [==============================] - 3s 8ms/step - loss: 0.0161 - mean_absolute_error: 0.0857 - val_loss: 0.0182 - val_mean_absolute_error: 0.0896 Epoch 9/50 383/383 [==============================] - 2s 4ms/step - loss: 0.0161 - mean_absolute_error: 0.0850 - val_loss: 0.0182 - val_mean_absolute_error: 0.0890 Epoch 10/50 383/383 [==============================] - 3s 9ms/step - loss: 0.0161 - mean_absolute_error: 0.0846 - val_loss: 0.0182 - val_mean_absolute_error: 0.0888 Epoch 11/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0161 - mean_absolute_error: 0.0843 - val_loss: 0.0182 - val_mean_absolute_error: 0.0887 Epoch 12/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0841 - val_loss: 0.0182 - val_mean_absolute_error: 0.0891 Epoch 13/50 383/383 [==============================] - 1s 4ms/step - loss: 0.0161 - mean_absolute_error: 0.0840 - val_loss: 0.0182 - val_mean_absolute_error: 0.0886 109/109 [==============================] - 0s 3ms/step - loss: 0.0182 - mean_absolute_error: 0.0886 54/54 [==============================] - 0s 5ms/step - loss: 0.0142 - mean_absolute_error: 0.0766 Epoch 1/50 383/383 [==============================] - 4s 8ms/step - loss: 0.0304 - mean_absolute_error: 0.1116 - val_loss: 0.0168 - val_mean_absolute_error: 0.0857 Epoch 2/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0142 - mean_absolute_error: 0.0789 - val_loss: 0.0161 - val_mean_absolute_error: 0.0833 Epoch 3/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0138 - mean_absolute_error: 0.0784 - val_loss: 0.0156 - val_mean_absolute_error: 0.0823 Epoch 4/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0135 - mean_absolute_error: 0.0775 - val_loss: 0.0154 - val_mean_absolute_error: 0.0817 Epoch 5/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0134 - mean_absolute_error: 0.0777 - val_loss: 0.0152 - val_mean_absolute_error: 0.0816 Epoch 6/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0131 - mean_absolute_error: 0.0767 - val_loss: 0.0149 - val_mean_absolute_error: 0.0829 Epoch 7/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0130 - mean_absolute_error: 0.0763 - val_loss: 0.0146 - val_mean_absolute_error: 0.0809 Epoch 8/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0130 - mean_absolute_error: 0.0761 - val_loss: 0.0146 - val_mean_absolute_error: 0.0812 Epoch 9/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0129 - mean_absolute_error: 0.0760 - val_loss: 0.0146 - val_mean_absolute_error: 0.0798 Epoch 10/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0129 - mean_absolute_error: 0.0759 - val_loss: 0.0147 - val_mean_absolute_error: 0.0804 Epoch 11/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0128 - mean_absolute_error: 0.0754 - val_loss: 0.0145 - val_mean_absolute_error: 0.0790 Epoch 12/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0128 - mean_absolute_error: 0.0756 - val_loss: 0.0148 - val_mean_absolute_error: 0.0800 Epoch 13/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0127 - mean_absolute_error: 0.0753 - val_loss: 0.0146 - val_mean_absolute_error: 0.0804 Epoch 14/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0127 - mean_absolute_error: 0.0751 - val_loss: 0.0146 - val_mean_absolute_error: 0.0812 109/109 [==============================] - 0s 3ms/step - loss: 0.0146 - mean_absolute_error: 0.0812 54/54 [==============================] - 0s 3ms/step - loss: 0.0100 - mean_absolute_error: 0.0668 110/110 [==============================] - 1s 3ms/step - loss: 0.0069 - mean_absolute_error: 0.0482 110/110 [==============================] - 0s 3ms/step - loss: 0.0069 - mean_absolute_error: 0.0482 Epoch 1/50 384/384 [==============================] - 2s 4ms/step - loss: 0.1803 - mean_absolute_error: 0.3456 - val_loss: 0.0670 - val_mean_absolute_error: 0.2038 Epoch 2/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0444 - mean_absolute_error: 0.1666 - val_loss: 0.0279 - val_mean_absolute_error: 0.1320 Epoch 3/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0207 - mean_absolute_error: 0.1105 - val_loss: 0.0148 - val_mean_absolute_error: 0.0911 Epoch 4/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0119 - mean_absolute_error: 0.0790 - val_loss: 0.0095 - val_mean_absolute_error: 0.0684 Epoch 5/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0084 - mean_absolute_error: 0.0632 - val_loss: 0.0072 - val_mean_absolute_error: 0.0569 Epoch 6/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0069 - mean_absolute_error: 0.0557 - val_loss: 0.0061 - val_mean_absolute_error: 0.0508 Epoch 7/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0061 - mean_absolute_error: 0.0518 - val_loss: 0.0056 - val_mean_absolute_error: 0.0478 Epoch 8/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0057 - mean_absolute_error: 0.0495 - val_loss: 0.0053 - val_mean_absolute_error: 0.0462 Epoch 9/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0055 - mean_absolute_error: 0.0478 - val_loss: 0.0051 - val_mean_absolute_error: 0.0450 Epoch 10/50 384/384 [==============================] - 4s 10ms/step - loss: 0.0053 - mean_absolute_error: 0.0463 - val_loss: 0.0049 - val_mean_absolute_error: 0.0439 Epoch 11/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0052 - mean_absolute_error: 0.0449 - val_loss: 0.0048 - val_mean_absolute_error: 0.0421 Epoch 12/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0051 - mean_absolute_error: 0.0435 - val_loss: 0.0048 - val_mean_absolute_error: 0.0406 Epoch 13/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0050 - mean_absolute_error: 0.0422 - val_loss: 0.0047 - val_mean_absolute_error: 0.0400 Epoch 14/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0050 - mean_absolute_error: 0.0412 - val_loss: 0.0047 - val_mean_absolute_error: 0.0387 Epoch 15/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0050 - mean_absolute_error: 0.0405 - val_loss: 0.0047 - val_mean_absolute_error: 0.0388 Epoch 16/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0049 - mean_absolute_error: 0.0399 - val_loss: 0.0046 - val_mean_absolute_error: 0.0383 Epoch 17/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0049 - mean_absolute_error: 0.0396 - val_loss: 0.0046 - val_mean_absolute_error: 0.0377 Epoch 18/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0380 Epoch 19/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0378 Epoch 20/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0375 Epoch 21/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0394 - val_loss: 0.0046 - val_mean_absolute_error: 0.0380 Epoch 22/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0381 110/110 [==============================] - 0s 3ms/step - loss: 0.0046 - mean_absolute_error: 0.0381 55/55 [==============================] - 0s 2ms/step - loss: 0.0043 - mean_absolute_error: 0.0360 Epoch 1/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0108 - mean_absolute_error: 0.0606 - val_loss: 0.0030 - val_mean_absolute_error: 0.0351 Epoch 2/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0029 - mean_absolute_error: 0.0333 - val_loss: 0.0024 - val_mean_absolute_error: 0.0324 Epoch 3/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0023 - mean_absolute_error: 0.0300 - val_loss: 0.0018 - val_mean_absolute_error: 0.0282 Epoch 4/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0021 - mean_absolute_error: 0.0290 - val_loss: 0.0017 - val_mean_absolute_error: 0.0274 Epoch 5/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0021 - mean_absolute_error: 0.0284 - val_loss: 0.0023 - val_mean_absolute_error: 0.0315 Epoch 6/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0019 - mean_absolute_error: 0.0272 - val_loss: 0.0015 - val_mean_absolute_error: 0.0253 Epoch 7/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0019 - mean_absolute_error: 0.0271 - val_loss: 0.0015 - val_mean_absolute_error: 0.0248 Epoch 8/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0019 - mean_absolute_error: 0.0267 - val_loss: 0.0016 - val_mean_absolute_error: 0.0260 Epoch 9/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0018 - mean_absolute_error: 0.0263 - val_loss: 0.0014 - val_mean_absolute_error: 0.0248 Epoch 10/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0018 - mean_absolute_error: 0.0262 - val_loss: 0.0014 - val_mean_absolute_error: 0.0248 Epoch 11/50 384/384 [==============================] - 4s 11ms/step - loss: 0.0018 - mean_absolute_error: 0.0258 - val_loss: 0.0016 - val_mean_absolute_error: 0.0260 Epoch 12/50 384/384 [==============================] - 5s 13ms/step - loss: 0.0017 - mean_absolute_error: 0.0255 - val_loss: 0.0014 - val_mean_absolute_error: 0.0236 Epoch 13/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0017 - mean_absolute_error: 0.0254 - val_loss: 0.0014 - val_mean_absolute_error: 0.0237 Epoch 14/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0017 - mean_absolute_error: 0.0248 - val_loss: 0.0013 - val_mean_absolute_error: 0.0235 Epoch 15/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0017 - mean_absolute_error: 0.0249 - val_loss: 0.0013 - val_mean_absolute_error: 0.0230 Epoch 16/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0016 - mean_absolute_error: 0.0248 - val_loss: 0.0014 - val_mean_absolute_error: 0.0238 Epoch 17/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0017 - mean_absolute_error: 0.0248 - val_loss: 0.0014 - val_mean_absolute_error: 0.0250 Epoch 18/50 384/384 [==============================] - 3s 8ms/step - loss: 0.0016 - mean_absolute_error: 0.0243 - val_loss: 0.0014 - val_mean_absolute_error: 0.0246 110/110 [==============================] - 1s 4ms/step - loss: 0.0014 - mean_absolute_error: 0.0246 55/55 [==============================] - 0s 4ms/step - loss: 0.0010 - mean_absolute_error: 0.0217
リカレントニューラルネットワーク(RNN)を探索する¶
LSTMアーキテクチャを調べる¶
LSTMアーキテクチャを実装する¶
In [18]:
Copied!
lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1)
])
history = compile_and_fit(lstm_model, wide_window)
val_performance['LSTM'] = lstm_model.evaluate(wide_window.val)
test_performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)
lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1)
])
history = compile_and_fit(lstm_model, wide_window)
val_performance['LSTM'] = lstm_model.evaluate(wide_window.val)
test_performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)
Epoch 1/50 384/384 [==============================] - 8s 16ms/step - loss: 0.0316 - mean_absolute_error: 0.1258 - val_loss: 0.0108 - val_mean_absolute_error: 0.0755 Epoch 2/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0072 - mean_absolute_error: 0.0607 - val_loss: 0.0050 - val_mean_absolute_error: 0.0506 Epoch 3/50 384/384 [==============================] - 9s 24ms/step - loss: 0.0046 - mean_absolute_error: 0.0477 - val_loss: 0.0038 - val_mean_absolute_error: 0.0437 Epoch 4/50 384/384 [==============================] - 13s 34ms/step - loss: 0.0039 - mean_absolute_error: 0.0432 - val_loss: 0.0033 - val_mean_absolute_error: 0.0401 Epoch 5/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0034 - mean_absolute_error: 0.0401 - val_loss: 0.0030 - val_mean_absolute_error: 0.0382 Epoch 6/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0032 - mean_absolute_error: 0.0383 - val_loss: 0.0028 - val_mean_absolute_error: 0.0370 Epoch 7/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0030 - mean_absolute_error: 0.0374 - val_loss: 0.0026 - val_mean_absolute_error: 0.0359 Epoch 8/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0029 - mean_absolute_error: 0.0368 - val_loss: 0.0025 - val_mean_absolute_error: 0.0351 Epoch 9/50 384/384 [==============================] - 9s 23ms/step - loss: 0.0028 - mean_absolute_error: 0.0363 - val_loss: 0.0025 - val_mean_absolute_error: 0.0350 Epoch 10/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0028 - mean_absolute_error: 0.0359 - val_loss: 0.0024 - val_mean_absolute_error: 0.0339 Epoch 11/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0027 - mean_absolute_error: 0.0355 - val_loss: 0.0025 - val_mean_absolute_error: 0.0348 Epoch 12/50 384/384 [==============================] - 7s 17ms/step - loss: 0.0026 - mean_absolute_error: 0.0350 - val_loss: 0.0024 - val_mean_absolute_error: 0.0345 Epoch 13/50 384/384 [==============================] - 7s 18ms/step - loss: 0.0026 - mean_absolute_error: 0.0347 - val_loss: 0.0023 - val_mean_absolute_error: 0.0336 Epoch 14/50 384/384 [==============================] - 7s 17ms/step - loss: 0.0025 - mean_absolute_error: 0.0343 - val_loss: 0.0022 - val_mean_absolute_error: 0.0327 Epoch 15/50 384/384 [==============================] - 7s 18ms/step - loss: 0.0025 - mean_absolute_error: 0.0341 - val_loss: 0.0022 - val_mean_absolute_error: 0.0326 Epoch 16/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0024 - mean_absolute_error: 0.0336 - val_loss: 0.0021 - val_mean_absolute_error: 0.0319 Epoch 17/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0024 - mean_absolute_error: 0.0333 - val_loss: 0.0021 - val_mean_absolute_error: 0.0330 Epoch 18/50 384/384 [==============================] - 9s 24ms/step - loss: 0.0024 - mean_absolute_error: 0.0330 - val_loss: 0.0020 - val_mean_absolute_error: 0.0309 Epoch 19/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0023 - mean_absolute_error: 0.0326 - val_loss: 0.0020 - val_mean_absolute_error: 0.0315 Epoch 20/50 384/384 [==============================] - 10s 25ms/step - loss: 0.0023 - mean_absolute_error: 0.0324 - val_loss: 0.0020 - val_mean_absolute_error: 0.0314 Epoch 21/50 384/384 [==============================] - 6s 17ms/step - loss: 0.0023 - mean_absolute_error: 0.0321 - val_loss: 0.0020 - val_mean_absolute_error: 0.0308 Epoch 22/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0022 - mean_absolute_error: 0.0320 - val_loss: 0.0020 - val_mean_absolute_error: 0.0325 Epoch 23/50 384/384 [==============================] - 10s 25ms/step - loss: 0.0022 - mean_absolute_error: 0.0317 - val_loss: 0.0019 - val_mean_absolute_error: 0.0309 Epoch 24/50 384/384 [==============================] - 7s 18ms/step - loss: 0.0022 - mean_absolute_error: 0.0315 - val_loss: 0.0019 - val_mean_absolute_error: 0.0300 Epoch 25/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0021 - mean_absolute_error: 0.0312 - val_loss: 0.0019 - val_mean_absolute_error: 0.0304 Epoch 26/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0021 - mean_absolute_error: 0.0310 - val_loss: 0.0018 - val_mean_absolute_error: 0.0295 Epoch 27/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0021 - mean_absolute_error: 0.0308 - val_loss: 0.0018 - val_mean_absolute_error: 0.0297 Epoch 28/50 384/384 [==============================] - 10s 25ms/step - loss: 0.0021 - mean_absolute_error: 0.0307 - val_loss: 0.0019 - val_mean_absolute_error: 0.0303 Epoch 29/50 384/384 [==============================] - 8s 22ms/step - loss: 0.0021 - mean_absolute_error: 0.0305 - val_loss: 0.0018 - val_mean_absolute_error: 0.0297 Epoch 30/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0020 - mean_absolute_error: 0.0303 - val_loss: 0.0018 - val_mean_absolute_error: 0.0298 Epoch 31/50 384/384 [==============================] - 11s 29ms/step - loss: 0.0020 - mean_absolute_error: 0.0301 - val_loss: 0.0018 - val_mean_absolute_error: 0.0296 Epoch 32/50 384/384 [==============================] - 8s 19ms/step - loss: 0.0020 - mean_absolute_error: 0.0299 - val_loss: 0.0017 - val_mean_absolute_error: 0.0288 Epoch 33/50 384/384 [==============================] - 7s 17ms/step - loss: 0.0020 - mean_absolute_error: 0.0299 - val_loss: 0.0019 - val_mean_absolute_error: 0.0310 Epoch 34/50 384/384 [==============================] - 7s 18ms/step - loss: 0.0020 - mean_absolute_error: 0.0297 - val_loss: 0.0017 - val_mean_absolute_error: 0.0289 Epoch 35/50 384/384 [==============================] - 6s 17ms/step - loss: 0.0019 - mean_absolute_error: 0.0295 - val_loss: 0.0017 - val_mean_absolute_error: 0.0291 109/109 [==============================] - 1s 7ms/step - loss: 0.0017 - mean_absolute_error: 0.0291
In [11]:
Copied!
ms_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
history = compile_and_fit(ms_lstm_model, multi_window)
ms_val_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.val)
ms_test_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.test)
ms_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
history = compile_and_fit(ms_lstm_model, multi_window)
ms_val_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.val)
ms_test_performance['LSTM'] = ms_lstm_model.evaluate(multi_window.test)
Epoch 1/50 383/383 [==============================] - 9s 19ms/step - loss: 0.0504 - mean_absolute_error: 0.1617 - val_loss: 0.0206 - val_mean_absolute_error: 0.0996 Epoch 2/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0169 - mean_absolute_error: 0.0894 - val_loss: 0.0181 - val_mean_absolute_error: 0.0922 Epoch 3/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0153 - mean_absolute_error: 0.0837 - val_loss: 0.0170 - val_mean_absolute_error: 0.0870 Epoch 4/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0147 - mean_absolute_error: 0.0811 - val_loss: 0.0165 - val_mean_absolute_error: 0.0851 Epoch 5/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0143 - mean_absolute_error: 0.0798 - val_loss: 0.0161 - val_mean_absolute_error: 0.0831 Epoch 6/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0140 - mean_absolute_error: 0.0787 - val_loss: 0.0157 - val_mean_absolute_error: 0.0826 Epoch 7/50 383/383 [==============================] - 11s 27ms/step - loss: 0.0137 - mean_absolute_error: 0.0778 - val_loss: 0.0155 - val_mean_absolute_error: 0.0811 Epoch 8/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0135 - mean_absolute_error: 0.0771 - val_loss: 0.0152 - val_mean_absolute_error: 0.0830 Epoch 9/50 383/383 [==============================] - 10s 25ms/step - loss: 0.0133 - mean_absolute_error: 0.0764 - val_loss: 0.0151 - val_mean_absolute_error: 0.0817 Epoch 10/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0132 - mean_absolute_error: 0.0758 - val_loss: 0.0150 - val_mean_absolute_error: 0.0815 Epoch 11/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0131 - mean_absolute_error: 0.0756 - val_loss: 0.0148 - val_mean_absolute_error: 0.0818 Epoch 12/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0130 - mean_absolute_error: 0.0752 - val_loss: 0.0147 - val_mean_absolute_error: 0.0785 Epoch 13/50 383/383 [==============================] - 10s 27ms/step - loss: 0.0129 - mean_absolute_error: 0.0746 - val_loss: 0.0148 - val_mean_absolute_error: 0.0784 Epoch 14/50 383/383 [==============================] - 10s 26ms/step - loss: 0.0128 - mean_absolute_error: 0.0743 - val_loss: 0.0146 - val_mean_absolute_error: 0.0808 Epoch 15/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0128 - mean_absolute_error: 0.0741 - val_loss: 0.0144 - val_mean_absolute_error: 0.0781 Epoch 16/50 383/383 [==============================] - 9s 25ms/step - loss: 0.0127 - mean_absolute_error: 0.0740 - val_loss: 0.0145 - val_mean_absolute_error: 0.0787 Epoch 17/50 383/383 [==============================] - 9s 23ms/step - loss: 0.0126 - mean_absolute_error: 0.0734 - val_loss: 0.0143 - val_mean_absolute_error: 0.0786 Epoch 18/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0124 - mean_absolute_error: 0.0728 - val_loss: 0.0141 - val_mean_absolute_error: 0.0782 Epoch 19/50 383/383 [==============================] - 11s 28ms/step - loss: 0.0124 - mean_absolute_error: 0.0725 - val_loss: 0.0141 - val_mean_absolute_error: 0.0765 Epoch 20/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0123 - mean_absolute_error: 0.0720 - val_loss: 0.0139 - val_mean_absolute_error: 0.0787 Epoch 21/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0121 - mean_absolute_error: 0.0715 - val_loss: 0.0141 - val_mean_absolute_error: 0.0775 Epoch 22/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0120 - mean_absolute_error: 0.0712 - val_loss: 0.0139 - val_mean_absolute_error: 0.0770 Epoch 23/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0120 - mean_absolute_error: 0.0711 - val_loss: 0.0138 - val_mean_absolute_error: 0.0777 Epoch 24/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0119 - mean_absolute_error: 0.0707 - val_loss: 0.0135 - val_mean_absolute_error: 0.0753 Epoch 25/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0118 - mean_absolute_error: 0.0704 - val_loss: 0.0134 - val_mean_absolute_error: 0.0751 Epoch 26/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0117 - mean_absolute_error: 0.0698 - val_loss: 0.0132 - val_mean_absolute_error: 0.0752 Epoch 27/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0116 - mean_absolute_error: 0.0695 - val_loss: 0.0132 - val_mean_absolute_error: 0.0710 Epoch 28/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0114 - mean_absolute_error: 0.0690 - val_loss: 0.0127 - val_mean_absolute_error: 0.0718 Epoch 29/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0114 - mean_absolute_error: 0.0689 - val_loss: 0.0127 - val_mean_absolute_error: 0.0716 Epoch 30/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0111 - mean_absolute_error: 0.0679 - val_loss: 0.0126 - val_mean_absolute_error: 0.0717 Epoch 31/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0109 - mean_absolute_error: 0.0674 - val_loss: 0.0122 - val_mean_absolute_error: 0.0704 Epoch 32/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0109 - mean_absolute_error: 0.0670 - val_loss: 0.0123 - val_mean_absolute_error: 0.0686 Epoch 33/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0108 - mean_absolute_error: 0.0667 - val_loss: 0.0122 - val_mean_absolute_error: 0.0714 Epoch 34/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0106 - mean_absolute_error: 0.0660 - val_loss: 0.0121 - val_mean_absolute_error: 0.0686 Epoch 35/50 383/383 [==============================] - 8s 19ms/step - loss: 0.0105 - mean_absolute_error: 0.0658 - val_loss: 0.0120 - val_mean_absolute_error: 0.0690 Epoch 36/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0105 - mean_absolute_error: 0.0655 - val_loss: 0.0117 - val_mean_absolute_error: 0.0688 Epoch 37/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0104 - mean_absolute_error: 0.0655 - val_loss: 0.0117 - val_mean_absolute_error: 0.0692 Epoch 38/50 383/383 [==============================] - 8s 21ms/step - loss: 0.0104 - mean_absolute_error: 0.0651 - val_loss: 0.0118 - val_mean_absolute_error: 0.0663 Epoch 39/50 383/383 [==============================] - 8s 20ms/step - loss: 0.0103 - mean_absolute_error: 0.0646 - val_loss: 0.0117 - val_mean_absolute_error: 0.0670 Epoch 40/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0103 - mean_absolute_error: 0.0646 - val_loss: 0.0124 - val_mean_absolute_error: 0.0699 Epoch 41/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0102 - mean_absolute_error: 0.0644 - val_loss: 0.0115 - val_mean_absolute_error: 0.0688 Epoch 42/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0102 - mean_absolute_error: 0.0645 - val_loss: 0.0119 - val_mean_absolute_error: 0.0708 Epoch 43/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0101 - mean_absolute_error: 0.0640 - val_loss: 0.0115 - val_mean_absolute_error: 0.0683 Epoch 44/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0101 - mean_absolute_error: 0.0638 - val_loss: 0.0115 - val_mean_absolute_error: 0.0682 Epoch 45/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0101 - mean_absolute_error: 0.0636 - val_loss: 0.0116 - val_mean_absolute_error: 0.0710 Epoch 46/50 383/383 [==============================] - 7s 19ms/step - loss: 0.0100 - mean_absolute_error: 0.0635 - val_loss: 0.0115 - val_mean_absolute_error: 0.0671 Epoch 47/50 383/383 [==============================] - 7s 18ms/step - loss: 0.0100 - mean_absolute_error: 0.0634 - val_loss: 0.0115 - val_mean_absolute_error: 0.0689 109/109 [==============================] - 1s 8ms/step - loss: 0.0115 - mean_absolute_error: 0.0689 54/54 [==============================] - 0s 7ms/step - loss: 0.0082 - mean_absolute_error: 0.0551
In [12]:
Copied!
mo_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=2)
])
history = compile_and_fit(mo_lstm_model, mo_wide_window)
mo_val_performance['LSTM'] = mo_lstm_model.evaluate(mo_wide_window.val)
mo_test_performance['LSTM'] = mo_lstm_model.evaluate(mo_wide_window.test)
mo_lstm_model = Sequential([
LSTM(32, return_sequences=True),
Dense(units=2)
])
history = compile_and_fit(mo_lstm_model, mo_wide_window)
mo_val_performance['LSTM'] = mo_lstm_model.evaluate(mo_wide_window.val)
mo_test_performance['LSTM'] = mo_lstm_model.evaluate(mo_wide_window.test)
Epoch 1/50 384/384 [==============================] - 9s 20ms/step - loss: 0.0354 - mean_absolute_error: 0.1212 - val_loss: 0.0121 - val_mean_absolute_error: 0.0698 Epoch 2/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0073 - mean_absolute_error: 0.0555 - val_loss: 0.0051 - val_mean_absolute_error: 0.0450 Epoch 3/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0041 - mean_absolute_error: 0.0409 - val_loss: 0.0033 - val_mean_absolute_error: 0.0373 Epoch 4/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0031 - mean_absolute_error: 0.0354 - val_loss: 0.0026 - val_mean_absolute_error: 0.0328 Epoch 5/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0027 - mean_absolute_error: 0.0327 - val_loss: 0.0022 - val_mean_absolute_error: 0.0303 Epoch 6/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0025 - mean_absolute_error: 0.0308 - val_loss: 0.0020 - val_mean_absolute_error: 0.0289 Epoch 7/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0023 - mean_absolute_error: 0.0295 - val_loss: 0.0019 - val_mean_absolute_error: 0.0279 Epoch 8/50 384/384 [==============================] - 7s 19ms/step - loss: 0.0021 - mean_absolute_error: 0.0283 - val_loss: 0.0018 - val_mean_absolute_error: 0.0270 Epoch 9/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0020 - mean_absolute_error: 0.0273 - val_loss: 0.0017 - val_mean_absolute_error: 0.0261 Epoch 10/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0019 - mean_absolute_error: 0.0266 - val_loss: 0.0016 - val_mean_absolute_error: 0.0257 Epoch 11/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0018 - mean_absolute_error: 0.0258 - val_loss: 0.0015 - val_mean_absolute_error: 0.0245 Epoch 12/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0018 - mean_absolute_error: 0.0251 - val_loss: 0.0015 - val_mean_absolute_error: 0.0243 Epoch 13/50 384/384 [==============================] - 9s 24ms/step - loss: 0.0017 - mean_absolute_error: 0.0246 - val_loss: 0.0014 - val_mean_absolute_error: 0.0240 Epoch 14/50 384/384 [==============================] - 8s 22ms/step - loss: 0.0017 - mean_absolute_error: 0.0241 - val_loss: 0.0014 - val_mean_absolute_error: 0.0237 Epoch 15/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0016 - mean_absolute_error: 0.0238 - val_loss: 0.0014 - val_mean_absolute_error: 0.0236 Epoch 16/50 384/384 [==============================] - 9s 23ms/step - loss: 0.0016 - mean_absolute_error: 0.0236 - val_loss: 0.0013 - val_mean_absolute_error: 0.0228 Epoch 17/50 384/384 [==============================] - 8s 21ms/step - loss: 0.0016 - mean_absolute_error: 0.0234 - val_loss: 0.0013 - val_mean_absolute_error: 0.0226 Epoch 18/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0016 - mean_absolute_error: 0.0231 - val_loss: 0.0013 - val_mean_absolute_error: 0.0229 Epoch 19/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0015 - mean_absolute_error: 0.0229 - val_loss: 0.0013 - val_mean_absolute_error: 0.0226 Epoch 20/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0015 - mean_absolute_error: 0.0228 - val_loss: 0.0012 - val_mean_absolute_error: 0.0219 Epoch 21/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0015 - mean_absolute_error: 0.0227 - val_loss: 0.0012 - val_mean_absolute_error: 0.0216 Epoch 22/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0015 - mean_absolute_error: 0.0226 - val_loss: 0.0012 - val_mean_absolute_error: 0.0219 Epoch 23/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0015 - mean_absolute_error: 0.0224 - val_loss: 0.0012 - val_mean_absolute_error: 0.0221 Epoch 24/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0014 - mean_absolute_error: 0.0224 - val_loss: 0.0012 - val_mean_absolute_error: 0.0217 Epoch 25/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0014 - mean_absolute_error: 0.0222 - val_loss: 0.0011 - val_mean_absolute_error: 0.0209 Epoch 26/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0014 - mean_absolute_error: 0.0220 - val_loss: 0.0011 - val_mean_absolute_error: 0.0211 Epoch 27/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0014 - mean_absolute_error: 0.0219 - val_loss: 0.0012 - val_mean_absolute_error: 0.0212 Epoch 28/50 384/384 [==============================] - 8s 20ms/step - loss: 0.0014 - mean_absolute_error: 0.0217 - val_loss: 0.0012 - val_mean_absolute_error: 0.0220 109/109 [==============================] - 1s 8ms/step - loss: 0.0012 - mean_absolute_error: 0.0220 55/55 [==============================] - 0s 8ms/step - loss: 9.0453e-04 - mean_absolute_error: 0.0194
In [19]:
Copied!
# single stepの結果比較
plt.title('Single Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(val_performance.keys(), [v[1] for v in val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(test_performance.keys(), [v[1] for v in test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# single stepの結果比較
plt.title('Single Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(val_performance.keys(), [v[1] for v in val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(test_performance.keys(), [v[1] for v in test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [14]:
Copied!
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [15]:
Copied!
# multi outputの結果比較
plt.title('Multi Output')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(mo_val_performance.keys(), [v[1] for v in mo_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(mo_test_performance.keys(), [v[1] for v in mo_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# multi outputの結果比較
plt.title('Multi Output')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(mo_val_performance.keys(), [v[1] for v in mo_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(mo_test_performance.keys(), [v[1] for v in mo_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [ ]:
Copied!