매사추세츠 보스턴 지역의 주택 가격 예측하기
import numpy as np
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import BatchNormalization
(X_train, y_train), (X_valid, y_valid) = boston_housing.load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/boston_housing.npz 57026/57026 [==============================] - 0s 0us/step
X_train.shape
(404, 13)
X_valid.shape
(102, 13)
X_train[0]
array([ 1.23247, 0. , 8.14 , 0. , 0.538 , 6.142 , 91.7 , 3.9769 , 4. , 307. , 21. , 396.9 , 18.72 ])
y_train[0]
15.2
model = Sequential()
model.add(Dense(32, input_dim=13, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(16, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1, activation='linear'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 32) 448 batch_normalization (BatchN (None, 32) 128 ormalization) dense_1 (Dense) (None, 16) 528 batch_normalization_1 (Batc (None, 16) 64 hNormalization) dropout (Dropout) (None, 16) 0 dense_2 (Dense) (None, 1) 17 ================================================================= Total params: 1,185 Trainable params: 1,089 Non-trainable params: 96 _________________________________________________________________
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train,
batch_size=8, epochs=32, verbose=1,
validation_data=(X_valid, y_valid))
Epoch 1/32 51/51 [==============================] - 4s 12ms/step - loss: 579.8355 - val_loss: 443.8278 Epoch 2/32 51/51 [==============================] - 0s 8ms/step - loss: 560.0992 - val_loss: 483.1794 Epoch 3/32 51/51 [==============================] - 0s 8ms/step - loss: 543.0685 - val_loss: 469.9540 Epoch 4/32 51/51 [==============================] - 0s 7ms/step - loss: 521.0295 - val_loss: 465.4912 Epoch 5/32 51/51 [==============================] - 0s 8ms/step - loss: 501.8672 - val_loss: 455.2636 Epoch 6/32 51/51 [==============================] - 0s 8ms/step - loss: 473.1227 - val_loss: 435.2798 Epoch 7/32 51/51 [==============================] - 0s 9ms/step - loss: 446.5820 - val_loss: 424.8003 Epoch 8/32 51/51 [==============================] - 0s 8ms/step - loss: 413.9762 - val_loss: 409.0033 Epoch 9/32 51/51 [==============================] - 0s 8ms/step - loss: 379.3524 - val_loss: 358.2756 Epoch 10/32 51/51 [==============================] - 0s 7ms/step - loss: 339.5614 - val_loss: 315.7754 Epoch 11/32 51/51 [==============================] - 0s 7ms/step - loss: 293.6695 - val_loss: 254.9101 Epoch 12/32 51/51 [==============================] - 0s 7ms/step - loss: 258.0252 - val_loss: 261.8799 Epoch 13/32 51/51 [==============================] - 0s 5ms/step - loss: 209.6821 - val_loss: 217.7905 Epoch 14/32 51/51 [==============================] - 0s 4ms/step - loss: 179.9783 - val_loss: 179.9889 Epoch 15/32 51/51 [==============================] - 0s 8ms/step - loss: 149.2293 - val_loss: 143.5271 Epoch 16/32 51/51 [==============================] - 0s 5ms/step - loss: 123.8661 - val_loss: 248.8328 Epoch 17/32 51/51 [==============================] - 0s 4ms/step - loss: 93.4710 - val_loss: 162.2863 Epoch 18/32 51/51 [==============================] - 0s 4ms/step - loss: 79.5144 - val_loss: 154.6616 Epoch 19/32 51/51 [==============================] - 0s 4ms/step - loss: 63.8505 - val_loss: 90.0440 Epoch 20/32 51/51 [==============================] - 0s 5ms/step - loss: 58.4107 - val_loss: 84.3149 Epoch 21/32 51/51 [==============================] - 0s 5ms/step - loss: 59.3938 - val_loss: 91.1221 Epoch 22/32 51/51 [==============================] - 0s 5ms/step - loss: 49.0404 - val_loss: 31.6145 Epoch 23/32 51/51 [==============================] - 0s 5ms/step - loss: 46.7913 - val_loss: 42.7062 Epoch 24/32 51/51 [==============================] - 0s 4ms/step - loss: 45.1807 - val_loss: 39.2975 Epoch 25/32 51/51 [==============================] - 0s 5ms/step - loss: 41.6249 - val_loss: 46.8431 Epoch 26/32 51/51 [==============================] - 0s 4ms/step - loss: 49.3840 - val_loss: 31.6878 Epoch 27/32 51/51 [==============================] - 0s 4ms/step - loss: 43.3416 - val_loss: 46.2134 Epoch 28/32 51/51 [==============================] - 0s 4ms/step - loss: 41.7307 - val_loss: 27.0634 Epoch 29/32 51/51 [==============================] - 0s 5ms/step - loss: 43.3982 - val_loss: 35.7756 Epoch 30/32 51/51 [==============================] - 0s 4ms/step - loss: 41.0184 - val_loss: 26.2336 Epoch 31/32 51/51 [==============================] - 0s 5ms/step - loss: 38.7970 - val_loss: 54.1452 Epoch 32/32 51/51 [==============================] - 0s 4ms/step - loss: 42.0810 - val_loss: 27.2458
<keras.callbacks.History at 0x7ff00004e1f0>
X_valid[42]
array([ 9.32909, 0. , 18.1 , 0. , 0.713 , 6.185 , 98.7 , 2.2616 , 24. , 666. , 20.2 , 396.9 , 18.13 ])
y_valid[42]
14.1
model.predict(np.reshape(X_valid[42], [1, 13]))
1/1 [==============================] - 0s 92ms/step
array([[17.81344]], dtype=float32)