第14章 ディープラーニングの手ほどき
In [10]:
Copied!
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import MeanAbsoluteError
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
In [2]:
Copied!
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
url_train = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/train.csv'
url_val = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/val.csv'
url_test = 'https://raw.githubusercontent.com/marcopeix/TimeSeriesForecastingInPython/master/data/test.csv'
df_train = pd.read_csv(url_train, index_col=0)
df_val = pd.read_csv(url_val, index_col=0)
df_test = pd.read_csv(url_test, index_col=0)
In [6]:
Copied!
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
class DataWindow:
def __init__(self, input_width, label_width, shift, df_train, df_val, df_test, label_columns=None):
# window size
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
# データ
self.df_train = df_train
self.df_val = df_val
self.df_test = df_test
# ラベル
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in enumerate(label_columns)}
self.column_indices = {name: i for i, name in enumerate(self.df_train.columns)}
# スライス
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
# ラベル開始位置
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_to_inputs_labels(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack([labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1)
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def plot(self, plot_col: str, model=None, max_subplots=3):
inputs, labels = self.sample_batch
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
n_max = min(max_subplots, len(inputs))
for n in range(n_max):
plt.subplot(n_max, 1, n+1)
plt.ylabel(f'{plot_col} [scaled]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index], label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index], edgecolors='k', label='Labels', c='tab:green', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index], marker='X', edgecolors='k', label='Predictions', c='tab:red', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time (h)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,
)
ds = ds.map(self.split_to_inputs_labels)
return ds
@property
def train(self):
return self.make_dataset(self.df_train)
@property
def val(self):
return self.make_dataset(self.df_val)
@property
def test(self):
return self.make_dataset(self.df_test)
@property
def sample_batch(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_sample_batch', None)
if result is None:
result = next(iter(self.train))
self._sample_batch = result
return result
In [7]:
Copied!
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
# models
class Baseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
elif isinstance(self.label_index, list):
tensors = []
for index in self.label_index:
res = inputs[:, :, index]
res = res[:, :, tf.newaxis]
tensors.append(res)
return tf.concat(tensors, axis=-1)
else:
res = inputs[:, :, self.label_index]
return res[:, :, tf.newaxis]
class MultiStepLastBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return tf.tile(inputs[:, -1:, :], [1, 24, 1])
return tf.tile(inputs[:, -1:, self.label_index:], [1, 24, 1])
class RepeatBaseline(Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
return inputs[:, :, self.label_index:]
In [41]:
Copied!
# シングルステップ
single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
column_indices = {name: i for i, name in enumerate(df_train.columns)}
baseline_last = Baseline(column_indices['traffic_volume'])
baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
val_performance = {}
test_performance = {}
val_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.val)
test_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.test, verbose=0)
# マルチステップ
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
# 多変数アウトプット
col_names = ['temp', 'traffic_volume']
mo_single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_baseline_last = Baseline(label_index=[column_indices[col] for col in col_names])
mo_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
mo_val_performance = {}
mo_test_performance = {}
mo_val_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
mo_test_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
# シングルステップ
single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
column_indices = {name: i for i, name in enumerate(df_train.columns)}
baseline_last = Baseline(column_indices['traffic_volume'])
baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
val_performance = {}
test_performance = {}
val_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.val)
test_performance['Baseline - Last'] = baseline_last.evaluate(single_step_window.test, verbose=0)
# マルチステップ
multi_window = DataWindow(input_width=24, label_width=24, shift=24, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=['traffic_volume'])
ms_baseline_last = MultiStepLastBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_baseline_repeat = RepeatBaseline(label_index=column_indices['traffic_volume'])
ms_baseline_repeat.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
ms_val_performance = {}
ms_test_performance = {}
ms_val_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.val)
ms_test_performance['Baseline - Last'] = ms_baseline_last.evaluate(multi_window.test)
ms_val_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.val)
ms_test_performance['Baseline - Repeat'] = ms_baseline_repeat.evaluate(multi_window.test)
# 多変数アウトプット
col_names = ['temp', 'traffic_volume']
mo_single_step_window = DataWindow(input_width=1, label_width=1, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_wide_window = DataWindow(input_width=24, label_width=24, shift=1, df_train=df_train, df_val=df_val, df_test=df_test, label_columns=col_names)
mo_baseline_last = Baseline(label_index=[column_indices[col] for col in col_names])
mo_baseline_last.compile(loss=MeanSquaredError(), metrics=[MeanAbsoluteError()])
mo_val_performance = {}
mo_test_performance = {}
mo_val_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
mo_test_performance['Baseline - Last'] = mo_baseline_last.evaluate(mo_single_step_window.val)
110/110 [==============================] - 1s 3ms/step - loss: 0.0133 - mean_absolute_error: 0.0831 109/109 [==============================] - 0s 3ms/step - loss: 0.1875 - mean_absolute_error: 0.3522 54/54 [==============================] - 0s 3ms/step - loss: 0.1814 - mean_absolute_error: 0.3473 109/109 [==============================] - 0s 3ms/step - loss: 0.2065 - mean_absolute_error: 0.3473 54/54 [==============================] - 0s 3ms/step - loss: 0.2018 - mean_absolute_error: 0.3413 110/110 [==============================] - 0s 3ms/step - loss: 0.0069 - mean_absolute_error: 0.0482 110/110 [==============================] - 0s 3ms/step - loss: 0.0069 - mean_absolute_error: 0.0482
線形モデルを実装する¶
In [42]:
Copied!
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
def compile_and_fit(model, window, patience=3, max_epochs=50):
early_stopping = EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min'
)
model.compile(
loss=MeanSquaredError(),
optimizer=Adam(),
metrics=[MeanAbsoluteError()]
)
history = model.fit(
window.train,
epochs=max_epochs,
validation_data=window.val,
callbacks=[early_stopping]
)
return history
シングルステップの線形モデルを実装する¶
In [43]:
Copied!
linear = Sequential([Dense(units=1)])
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
test_performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
linear = Sequential([Dense(units=1)])
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
test_performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
Epoch 1/50 384/384 [==============================] - 2s 4ms/step - loss: 0.3284 - mean_absolute_error: 0.4560 - val_loss: 0.0977 - val_mean_absolute_error: 0.2534 Epoch 2/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0829 - mean_absolute_error: 0.2325 - val_loss: 0.0570 - val_mean_absolute_error: 0.1893 Epoch 3/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0478 - mean_absolute_error: 0.1743 - val_loss: 0.0325 - val_mean_absolute_error: 0.1410 Epoch 4/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0279 - mean_absolute_error: 0.1319 - val_loss: 0.0200 - val_mean_absolute_error: 0.1100 Epoch 5/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0184 - mean_absolute_error: 0.1060 - val_loss: 0.0147 - val_mean_absolute_error: 0.0930 Epoch 6/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0146 - mean_absolute_error: 0.0922 - val_loss: 0.0128 - val_mean_absolute_error: 0.0844 Epoch 7/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0131 - mean_absolute_error: 0.0851 - val_loss: 0.0119 - val_mean_absolute_error: 0.0798 Epoch 8/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0124 - mean_absolute_error: 0.0813 - val_loss: 0.0114 - val_mean_absolute_error: 0.0772 Epoch 9/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0118 - mean_absolute_error: 0.0786 - val_loss: 0.0109 - val_mean_absolute_error: 0.0749 Epoch 10/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0113 - mean_absolute_error: 0.0762 - val_loss: 0.0105 - val_mean_absolute_error: 0.0728 Epoch 11/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0108 - mean_absolute_error: 0.0740 - val_loss: 0.0101 - val_mean_absolute_error: 0.0708 Epoch 12/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0104 - mean_absolute_error: 0.0721 - val_loss: 0.0098 - val_mean_absolute_error: 0.0692 Epoch 13/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0101 - mean_absolute_error: 0.0706 - val_loss: 0.0095 - val_mean_absolute_error: 0.0675 Epoch 14/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0098 - mean_absolute_error: 0.0693 - val_loss: 0.0093 - val_mean_absolute_error: 0.0670 Epoch 15/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0096 - mean_absolute_error: 0.0685 - val_loss: 0.0092 - val_mean_absolute_error: 0.0656 Epoch 16/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0095 - mean_absolute_error: 0.0680 - val_loss: 0.0091 - val_mean_absolute_error: 0.0657 Epoch 17/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0094 - mean_absolute_error: 0.0676 - val_loss: 0.0091 - val_mean_absolute_error: 0.0656 Epoch 18/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0093 - mean_absolute_error: 0.0676 - val_loss: 0.0091 - val_mean_absolute_error: 0.0649 Epoch 19/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0093 - mean_absolute_error: 0.0675 - val_loss: 0.0091 - val_mean_absolute_error: 0.0645 Epoch 20/50 384/384 [==============================] - 3s 7ms/step - loss: 0.0093 - mean_absolute_error: 0.0675 - val_loss: 0.0091 - val_mean_absolute_error: 0.0652 Epoch 21/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0093 - mean_absolute_error: 0.0676 - val_loss: 0.0091 - val_mean_absolute_error: 0.0652 Epoch 22/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0093 - mean_absolute_error: 0.0675 - val_loss: 0.0091 - val_mean_absolute_error: 0.0648 Epoch 23/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0093 - mean_absolute_error: 0.0676 - val_loss: 0.0091 - val_mean_absolute_error: 0.0645 110/110 [==============================] - 0s 4ms/step - loss: 0.0091 - mean_absolute_error: 0.0645
In [44]:
Copied!
wide_window.plot('traffic_volume', linear)
wide_window.plot('traffic_volume', linear)
マルチステップの線形モデルを実装する¶
In [45]:
Copied!
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
ms_linear = Sequential([Dense(units=1, kernel_initializer=tf.initializers.zeros)])
history = compile_and_fit(ms_linear, multi_window)
ms_val_performance['Linear'] = ms_linear.evaluate(multi_window.val)
ms_test_performance['Linear'] = ms_linear.evaluate(multi_window.test)
Epoch 1/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0897 - mean_absolute_error: 0.2463 - val_loss: 0.0456 - val_mean_absolute_error: 0.1819 Epoch 2/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0297 - mean_absolute_error: 0.1366 - val_loss: 0.0261 - val_mean_absolute_error: 0.1251 Epoch 3/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0200 - mean_absolute_error: 0.1044 - val_loss: 0.0213 - val_mean_absolute_error: 0.1074 Epoch 4/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0177 - mean_absolute_error: 0.0955 - val_loss: 0.0196 - val_mean_absolute_error: 0.1000 Epoch 5/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0168 - mean_absolute_error: 0.0914 - val_loss: 0.0189 - val_mean_absolute_error: 0.0956 Epoch 6/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0164 - mean_absolute_error: 0.0887 - val_loss: 0.0185 - val_mean_absolute_error: 0.0924 Epoch 7/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0162 - mean_absolute_error: 0.0869 - val_loss: 0.0183 - val_mean_absolute_error: 0.0905 Epoch 8/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0162 - mean_absolute_error: 0.0857 - val_loss: 0.0182 - val_mean_absolute_error: 0.0894 Epoch 9/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0850 - val_loss: 0.0182 - val_mean_absolute_error: 0.0887 Epoch 10/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0161 - mean_absolute_error: 0.0846 - val_loss: 0.0182 - val_mean_absolute_error: 0.0883 Epoch 11/50 383/383 [==============================] - 2s 4ms/step - loss: 0.0161 - mean_absolute_error: 0.0843 - val_loss: 0.0182 - val_mean_absolute_error: 0.0887 Epoch 12/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0841 - val_loss: 0.0182 - val_mean_absolute_error: 0.0883 Epoch 13/50 383/383 [==============================] - 2s 5ms/step - loss: 0.0161 - mean_absolute_error: 0.0840 - val_loss: 0.0182 - val_mean_absolute_error: 0.0881 Epoch 14/50 383/383 [==============================] - 2s 4ms/step - loss: 0.0161 - mean_absolute_error: 0.0839 - val_loss: 0.0182 - val_mean_absolute_error: 0.0880 Epoch 15/50 383/383 [==============================] - 2s 4ms/step - loss: 0.0161 - mean_absolute_error: 0.0839 - val_loss: 0.0182 - val_mean_absolute_error: 0.0881 109/109 [==============================] - 0s 3ms/step - loss: 0.0182 - mean_absolute_error: 0.0881 54/54 [==============================] - 0s 2ms/step - loss: 0.0142 - mean_absolute_error: 0.0759
In [46]:
Copied!
multi_window.plot('traffic_volume', ms_linear)
multi_window.plot('traffic_volume', ms_linear)
多出力の線形モデルを実装する¶
In [47]:
Copied!
mo_linear = Sequential([Dense(units=2)])
history = compile_and_fit(mo_linear, mo_single_step_window)
mo_val_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.val)
mo_test_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.test)
mo_linear = Sequential([Dense(units=2)])
history = compile_and_fit(mo_linear, mo_single_step_window)
mo_val_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.val)
mo_test_performance['Linear'] = mo_linear.evaluate(mo_single_step_window.test)
Epoch 1/50 384/384 [==============================] - 2s 5ms/step - loss: 0.2844 - mean_absolute_error: 0.4397 - val_loss: 0.1930 - val_mean_absolute_error: 0.3678 Epoch 2/50 384/384 [==============================] - 2s 4ms/step - loss: 0.1347 - mean_absolute_error: 0.3038 - val_loss: 0.1113 - val_mean_absolute_error: 0.2788 Epoch 3/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0795 - mean_absolute_error: 0.2296 - val_loss: 0.0696 - val_mean_absolute_error: 0.2200 Epoch 4/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0513 - mean_absolute_error: 0.1817 - val_loss: 0.0473 - val_mean_absolute_error: 0.1811 Epoch 5/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0358 - mean_absolute_error: 0.1510 - val_loss: 0.0335 - val_mean_absolute_error: 0.1521 Epoch 6/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0259 - mean_absolute_error: 0.1284 - val_loss: 0.0242 - val_mean_absolute_error: 0.1290 Epoch 7/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0188 - mean_absolute_error: 0.1092 - val_loss: 0.0173 - val_mean_absolute_error: 0.1085 Epoch 8/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0138 - mean_absolute_error: 0.0924 - val_loss: 0.0126 - val_mean_absolute_error: 0.0910 Epoch 9/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0103 - mean_absolute_error: 0.0778 - val_loss: 0.0093 - val_mean_absolute_error: 0.0756 Epoch 10/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0080 - mean_absolute_error: 0.0660 - val_loss: 0.0074 - val_mean_absolute_error: 0.0634 Epoch 11/50 384/384 [==============================] - 1s 4ms/step - loss: 0.0067 - mean_absolute_error: 0.0568 - val_loss: 0.0061 - val_mean_absolute_error: 0.0532 Epoch 12/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0059 - mean_absolute_error: 0.0501 - val_loss: 0.0055 - val_mean_absolute_error: 0.0464 Epoch 13/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0055 - mean_absolute_error: 0.0457 - val_loss: 0.0052 - val_mean_absolute_error: 0.0429 Epoch 14/50 384/384 [==============================] - 1s 4ms/step - loss: 0.0053 - mean_absolute_error: 0.0431 - val_loss: 0.0050 - val_mean_absolute_error: 0.0403 Epoch 15/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0052 - mean_absolute_error: 0.0416 - val_loss: 0.0048 - val_mean_absolute_error: 0.0395 Epoch 16/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0051 - mean_absolute_error: 0.0407 - val_loss: 0.0048 - val_mean_absolute_error: 0.0383 Epoch 17/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0050 - mean_absolute_error: 0.0402 - val_loss: 0.0047 - val_mean_absolute_error: 0.0384 Epoch 18/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0050 - mean_absolute_error: 0.0399 - val_loss: 0.0047 - val_mean_absolute_error: 0.0382 Epoch 19/50 384/384 [==============================] - 1s 4ms/step - loss: 0.0050 - mean_absolute_error: 0.0397 - val_loss: 0.0047 - val_mean_absolute_error: 0.0383 Epoch 20/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0049 - mean_absolute_error: 0.0396 - val_loss: 0.0047 - val_mean_absolute_error: 0.0382 Epoch 21/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0396 - val_loss: 0.0047 - val_mean_absolute_error: 0.0391 Epoch 22/50 384/384 [==============================] - 1s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0396 - val_loss: 0.0046 - val_mean_absolute_error: 0.0380 Epoch 23/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0381 Epoch 24/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0386 Epoch 25/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0049 - mean_absolute_error: 0.0396 - val_loss: 0.0046 - val_mean_absolute_error: 0.0381 Epoch 26/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0380 Epoch 27/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0377 Epoch 28/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0378 Epoch 29/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0049 - mean_absolute_error: 0.0395 - val_loss: 0.0046 - val_mean_absolute_error: 0.0381 110/110 [==============================] - 0s 3ms/step - loss: 0.0046 - mean_absolute_error: 0.0381 55/55 [==============================] - 0s 3ms/step - loss: 0.0043 - mean_absolute_error: 0.0361
In [48]:
Copied!
mo_wide_window.plot('traffic_volume', mo_linear)
mo_wide_window.plot('traffic_volume', mo_linear)
In [49]:
Copied!
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
Out[49]:
<Axes: >
ディープニューラルネットワーク(DNN)を実装する¶
In [50]:
Copied!
dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1)
])
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
test_performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1)
])
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
test_performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
Epoch 1/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0116 - mean_absolute_error: 0.0700 - val_loss: 0.0044 - val_mean_absolute_error: 0.0521 Epoch 2/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0040 - mean_absolute_error: 0.0467 - val_loss: 0.0033 - val_mean_absolute_error: 0.0435 Epoch 3/50 384/384 [==============================] - 3s 6ms/step - loss: 0.0035 - mean_absolute_error: 0.0431 - val_loss: 0.0029 - val_mean_absolute_error: 0.0406 Epoch 4/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0033 - mean_absolute_error: 0.0415 - val_loss: 0.0027 - val_mean_absolute_error: 0.0389 Epoch 5/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0031 - mean_absolute_error: 0.0400 - val_loss: 0.0027 - val_mean_absolute_error: 0.0382 Epoch 6/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0030 - mean_absolute_error: 0.0396 - val_loss: 0.0027 - val_mean_absolute_error: 0.0385 Epoch 7/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0028 - mean_absolute_error: 0.0381 - val_loss: 0.0023 - val_mean_absolute_error: 0.0348 Epoch 8/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0028 - mean_absolute_error: 0.0381 - val_loss: 0.0022 - val_mean_absolute_error: 0.0341 Epoch 9/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0027 - mean_absolute_error: 0.0368 - val_loss: 0.0023 - val_mean_absolute_error: 0.0360 Epoch 10/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0027 - mean_absolute_error: 0.0373 - val_loss: 0.0026 - val_mean_absolute_error: 0.0384 Epoch 11/50 384/384 [==============================] - 2s 4ms/step - loss: 0.0026 - mean_absolute_error: 0.0365 - val_loss: 0.0021 - val_mean_absolute_error: 0.0332 Epoch 12/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0025 - mean_absolute_error: 0.0355 - val_loss: 0.0023 - val_mean_absolute_error: 0.0362 Epoch 13/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0025 - mean_absolute_error: 0.0356 - val_loss: 0.0022 - val_mean_absolute_error: 0.0335 Epoch 14/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0025 - mean_absolute_error: 0.0350 - val_loss: 0.0020 - val_mean_absolute_error: 0.0319 Epoch 15/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0025 - mean_absolute_error: 0.0349 - val_loss: 0.0022 - val_mean_absolute_error: 0.0339 Epoch 16/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0025 - mean_absolute_error: 0.0352 - val_loss: 0.0025 - val_mean_absolute_error: 0.0383 Epoch 17/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0024 - mean_absolute_error: 0.0349 - val_loss: 0.0021 - val_mean_absolute_error: 0.0337 110/110 [==============================] - 1s 4ms/step - loss: 0.0021 - mean_absolute_error: 0.0337
In [51]:
Copied!
wide_window.plot('traffic_volume', dense)
wide_window.plot('traffic_volume', dense)
In [52]:
Copied!
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
Out[52]:
<Axes: >
In [53]:
Copied!
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
ms_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=1, kernel_initializer=tf.initializers.zeros)
])
history = compile_and_fit(ms_dense, multi_window)
ms_val_performance['Dense'] = ms_dense.evaluate(multi_window.val)
ms_test_performance['Dense'] = ms_dense.evaluate(multi_window.test)
Epoch 1/50
383/383 [==============================] - 3s 7ms/step - loss: 0.0307 - mean_absolute_error: 0.1132 - val_loss: 0.0169 - val_mean_absolute_error: 0.0860 Epoch 2/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0141 - mean_absolute_error: 0.0790 - val_loss: 0.0161 - val_mean_absolute_error: 0.0853 Epoch 3/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0136 - mean_absolute_error: 0.0779 - val_loss: 0.0153 - val_mean_absolute_error: 0.0842 Epoch 4/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0133 - mean_absolute_error: 0.0772 - val_loss: 0.0152 - val_mean_absolute_error: 0.0851 Epoch 5/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0132 - mean_absolute_error: 0.0766 - val_loss: 0.0156 - val_mean_absolute_error: 0.0813 Epoch 6/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0130 - mean_absolute_error: 0.0758 - val_loss: 0.0147 - val_mean_absolute_error: 0.0806 Epoch 7/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0128 - mean_absolute_error: 0.0752 - val_loss: 0.0145 - val_mean_absolute_error: 0.0804 Epoch 8/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0127 - mean_absolute_error: 0.0749 - val_loss: 0.0145 - val_mean_absolute_error: 0.0799 Epoch 9/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0128 - mean_absolute_error: 0.0751 - val_loss: 0.0145 - val_mean_absolute_error: 0.0788 Epoch 10/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0127 - mean_absolute_error: 0.0747 - val_loss: 0.0143 - val_mean_absolute_error: 0.0793 Epoch 11/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0126 - mean_absolute_error: 0.0743 - val_loss: 0.0143 - val_mean_absolute_error: 0.0794 Epoch 12/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0126 - mean_absolute_error: 0.0744 - val_loss: 0.0143 - val_mean_absolute_error: 0.0785 Epoch 13/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0125 - mean_absolute_error: 0.0741 - val_loss: 0.0144 - val_mean_absolute_error: 0.0775 Epoch 14/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0125 - mean_absolute_error: 0.0740 - val_loss: 0.0142 - val_mean_absolute_error: 0.0782 Epoch 15/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0125 - mean_absolute_error: 0.0739 - val_loss: 0.0142 - val_mean_absolute_error: 0.0781 Epoch 16/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0125 - mean_absolute_error: 0.0742 - val_loss: 0.0142 - val_mean_absolute_error: 0.0775 Epoch 17/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0124 - mean_absolute_error: 0.0738 - val_loss: 0.0142 - val_mean_absolute_error: 0.0801 Epoch 18/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0124 - mean_absolute_error: 0.0736 - val_loss: 0.0142 - val_mean_absolute_error: 0.0793 Epoch 19/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0123 - mean_absolute_error: 0.0734 - val_loss: 0.0143 - val_mean_absolute_error: 0.0815 Epoch 20/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0123 - mean_absolute_error: 0.0732 - val_loss: 0.0145 - val_mean_absolute_error: 0.0823 Epoch 21/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0122 - mean_absolute_error: 0.0731 - val_loss: 0.0140 - val_mean_absolute_error: 0.0766 Epoch 22/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0122 - mean_absolute_error: 0.0732 - val_loss: 0.0139 - val_mean_absolute_error: 0.0777 Epoch 23/50 383/383 [==============================] - 3s 7ms/step - loss: 0.0121 - mean_absolute_error: 0.0727 - val_loss: 0.0141 - val_mean_absolute_error: 0.0767 Epoch 24/50 383/383 [==============================] - 2s 6ms/step - loss: 0.0121 - mean_absolute_error: 0.0726 - val_loss: 0.0140 - val_mean_absolute_error: 0.0756 Epoch 25/50 383/383 [==============================] - 3s 6ms/step - loss: 0.0121 - mean_absolute_error: 0.0725 - val_loss: 0.0142 - val_mean_absolute_error: 0.0751 109/109 [==============================] - 1s 4ms/step - loss: 0.0142 - mean_absolute_error: 0.0751 54/54 [==============================] - 0s 4ms/step - loss: 0.0096 - mean_absolute_error: 0.0618
In [54]:
Copied!
multi_window.plot('traffic_volume', ms_dense)
multi_window.plot('traffic_volume', ms_dense)
In [55]:
Copied!
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
Out[55]:
<Axes: >
In [56]:
Copied!
mo_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=2)
])
history = compile_and_fit(mo_dense, mo_single_step_window)
mo_val_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.val)
mo_test_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.test)
mo_dense = Sequential([
Dense(units=64, activation='relu'),
Dense(units=64, activation='relu'),
Dense(units=2)
])
history = compile_and_fit(mo_dense, mo_single_step_window)
mo_val_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.val)
mo_test_performance['Dense'] = mo_dense.evaluate(mo_single_step_window.test)
Epoch 1/50 384/384 [==============================] - 3s 5ms/step - loss: 0.0127 - mean_absolute_error: 0.0642 - val_loss: 0.0031 - val_mean_absolute_error: 0.0349 Epoch 2/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0028 - mean_absolute_error: 0.0324 - val_loss: 0.0023 - val_mean_absolute_error: 0.0302 Epoch 3/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0024 - mean_absolute_error: 0.0299 - val_loss: 0.0022 - val_mean_absolute_error: 0.0304 Epoch 4/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0022 - mean_absolute_error: 0.0285 - val_loss: 0.0017 - val_mean_absolute_error: 0.0262 Epoch 5/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0020 - mean_absolute_error: 0.0277 - val_loss: 0.0016 - val_mean_absolute_error: 0.0255 Epoch 6/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0020 - mean_absolute_error: 0.0277 - val_loss: 0.0016 - val_mean_absolute_error: 0.0258 Epoch 7/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0019 - mean_absolute_error: 0.0270 - val_loss: 0.0015 - val_mean_absolute_error: 0.0256 Epoch 8/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0019 - mean_absolute_error: 0.0264 - val_loss: 0.0015 - val_mean_absolute_error: 0.0242 Epoch 9/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0018 - mean_absolute_error: 0.0260 - val_loss: 0.0015 - val_mean_absolute_error: 0.0243 Epoch 10/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0018 - mean_absolute_error: 0.0257 - val_loss: 0.0018 - val_mean_absolute_error: 0.0288 Epoch 11/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0017 - mean_absolute_error: 0.0255 - val_loss: 0.0014 - val_mean_absolute_error: 0.0239 Epoch 12/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0017 - mean_absolute_error: 0.0249 - val_loss: 0.0014 - val_mean_absolute_error: 0.0240 Epoch 13/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0017 - mean_absolute_error: 0.0250 - val_loss: 0.0013 - val_mean_absolute_error: 0.0233 Epoch 14/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0016 - mean_absolute_error: 0.0244 - val_loss: 0.0013 - val_mean_absolute_error: 0.0230 Epoch 15/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0016 - mean_absolute_error: 0.0241 - val_loss: 0.0014 - val_mean_absolute_error: 0.0274 Epoch 16/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0016 - mean_absolute_error: 0.0242 - val_loss: 0.0014 - val_mean_absolute_error: 0.0238 Epoch 17/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0016 - mean_absolute_error: 0.0243 - val_loss: 0.0012 - val_mean_absolute_error: 0.0227 Epoch 18/50 384/384 [==============================] - 2s 6ms/step - loss: 0.0016 - mean_absolute_error: 0.0240 - val_loss: 0.0012 - val_mean_absolute_error: 0.0224 Epoch 19/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0016 - mean_absolute_error: 0.0241 - val_loss: 0.0013 - val_mean_absolute_error: 0.0236 Epoch 20/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0015 - mean_absolute_error: 0.0235 - val_loss: 0.0015 - val_mean_absolute_error: 0.0271 Epoch 21/50 384/384 [==============================] - 2s 5ms/step - loss: 0.0015 - mean_absolute_error: 0.0237 - val_loss: 0.0013 - val_mean_absolute_error: 0.0239 110/110 [==============================] - 0s 3ms/step - loss: 0.0013 - mean_absolute_error: 0.0239 55/55 [==============================] - 0s 3ms/step - loss: 9.0741e-04 - mean_absolute_error: 0.0203
In [57]:
Copied!
mo_wide_window.plot('traffic_volume', mo_dense)
mo_wide_window.plot('traffic_volume', mo_dense)
In [58]:
Copied!
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
Out[58]:
<Axes: >
In [72]:
Copied!
# single stepの結果比較
plt.title('Single Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(val_performance.keys(), [v[1] for v in val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(test_performance.keys(), [v[1] for v in test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# single stepの結果比較
plt.title('Single Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(val_performance.keys(), [v[1] for v in val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(test_performance.keys(), [v[1] for v in test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [73]:
Copied!
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# multi stepの結果比較
plt.title('Multi Step')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(ms_val_performance.keys(), [v[1] for v in ms_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(ms_test_performance.keys(), [v[1] for v in ms_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [74]:
Copied!
# multi outputの結果比較
plt.title('Multi Output')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(mo_val_performance.keys(), [v[1] for v in mo_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(mo_test_performance.keys(), [v[1] for v in mo_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
# multi outputの結果比較
plt.title('Multi Output')
plt.xlabel('Models')
plt.ylabel('MAE')
plt.bar(mo_val_performance.keys(), [v[1] for v in mo_val_performance.values()], width=-0.25, align='edge', label='Validation')
plt.bar(mo_test_performance.keys(), [v[1] for v in mo_test_performance.values()], width=0.25, align='edge', label='Test', hatch='/')
plt.legend()
plt.tight_layout()
In [ ]:
Copied!