오늘의 인기 글
최근 글
최근 댓글
Today
Total
05-15 03:43
관리 메뉴

우노

[DL] DNN Grid Search 본문

AI/Deep Learning

[DL] DNN Grid Search

운호(Noah) 2021. 7. 16. 16:09

Import library

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.metrics import make_scorer
from sklearn.metrics import mean_squared_error
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import Adagrad
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adamax
from keras.wrappers.scikit_learn import KerasRegressor
from keras import backend as K

데이터 생성

train = pd.read_csv('/Users/bdlab/Desktop/sparse-matrix-multiplication/scenario-extraction/d-optimal/spmm-latency-traintest/train-test-csv/nonsquare-train-1035-from-spmm-contain-todense-over-3s-1293.csv')
test = pd.read_csv('/Users/bdlab/Desktop/sparse-matrix-multiplication/scenario-extraction/d-optimal/spmm-latency-traintest/train-test-csv/nonsquare-test-258-from-spmm-contain-todense-over-3s-1293.csv')

X_train = train[['lr','lc','rc','ld','rd','lnnz','rnnz','lr*lc','lc*rc','lr*rc']] 
y_train = train['sp_smdm']

X_test = test[['lr','lc','rc','ld','rd','lnnz','rnnz','lr*lc','lc*rc','lr*rc']] 
y_test = test['sp_smdm']

데이터 전처리

from sklearn.preprocessing import MinMaxScaler

# MinMaxScaler 객체 생성
minmax_scaler = MinMaxScaler()

# 훈련데이터의 모수 분포 저장
minmax_scaler.fit(X_train)

# 훈련 데이터 스케일링
X_train = minmax_scaler.transform(X_train)

# 테스트 데이터 스케일링
X_test = minmax_scaler.transform(X_test)

Metric 함수 생성

# RMSE
def rmse(y_true, y_pred):
    rmse = K.sqrt(K.mean(K.square(y_pred - y_true))) 
    return rmse

# MAPE
def mean_absolute_percentage_error(y_test, y_pred):
    y_test, y_pred = np.array(y_test), np.array(y_pred)
    return np.mean(np.abs((y_test - y_pred) / y_test)) * 100

Hyperparameter 정의

# dense_nparams : 초기 dense layer size
dense_nparams = [256, 512, 1024]

# dense_layer_sizes : 사용할 dense layer size 목록
dense_layer_sizes = [(16,), (16,16,), (16,16,16,)]

# input_optimizer = optimizer
input_optimizer = [SGD, Adagrad, RMSprop, Adam, Adamax]

# input_kernel_initializer : 가중치 초기화 방법
input_kernel_initializer =  ['uniform', 'normal', 
                            'glorot_uniform', 'glorot_normal',
                            'he_uniform', 'he_normal' ]

# input_dropout : dropout 비율
input_dropout = [0, 0.1, 0.2, 0.3, 0.4, 0.5]

# input_lr : learning_rate
input_lr = [0.001, 0.01, 0.1, 0.2]

# hyperparameter 를 dictionary 화
param_grid = dict(dense_nparams = dense_nparams,
                dense_layer_sizes = dense_layer_sizes,
                input_optimizer = input_optimizer,
                input_kernel_initializer = input_kernel_initializer,
                input_dropout = input_dropout,
                input_lr = input_lr)

Hyperparameter Tuning 대상 정의

# hyperparameter tuning 대상 정의
def create_model(dense_nparams, dense_layer_sizes , input_optimizer, input_kernel_initializer, input_dropout, input_lr):

    model=Sequential()
    model.add(Dense(dense_nparams, activation="relu", input_shape=(X_train.shape[1],), kernel_initializer=input_kernel_initializer))  
    model.add(Dropout(input_dropout),)

    # dense_layer_sizes 만큼 layer 추가
    for layer_size in dense_layer_sizes:
        model.add(Dense(layer_size, activation='relu', kernel_initializer=input_kernel_initializer))
        model.add(Dropout(input_dropout), )

    model.add(Dense(1))

    optimizer = input_optimizer(lr=input_lr)

    model.compile(optimizer = optimizer ,
                  loss='mape',
                  metrics=['mape',rmse])
    return model

# hyperparameter tuning 대상 선언
# 파라미터 조합 당 epochs 는 300 번, batch_size 는 10
regressor_model = KerasRegressor(build_fn=create_model, epochs=300, batch_size=10, verbose=0)

GridSearchCV 정의 및 시작

# cross_validation 정의
kf = KFold(random_state=30,
           n_splits=10, # Fold 는 10개로 지정
           shuffle=True
          )

# gridsearch 정의
# scoring : 검증셋의 성능을 무엇으로 측정할 것인지
# n_jobs : 프로세스가 시스템의 모든 코어를 사용하도록    
# verbose : 모든 log 출력하도록
grid = GridSearchCV(estimator=regressor_model, 
                    param_grid=param_grid, 
                    scoring = make_scorer(mean_absolute_percentage_error, greater_is_better=False),
                    cv = kf,
                    n_jobs=-1,
                    verbose=3)

# gridsearch 시작
grid_result = grid.fit(X_train, y_train)

# gridesearch 결과
print("최고의 파라미터 :", grid_result.best_params_)
print("최고 평균 정확도 : {}".format(grid_result.best_score_))

GridSearchCV 를 통해 탐색된 최적의 Hyperparameter 를 사용해 모델링

# 모델 정의
result_model = create_model(grid_result.best_params_['dense_nparams'],
                            grid_result.best_params_['dense_layer_sizes'],
                            grid_result.best_params_['input_optimizer'],
                            grid_result.best_params_['input_kernel_initializer'],
                            grid_result.best_params_['input_dropout'],
                            grid_result.best_params_['input_lr'])
# 모델 훈련
result_model.fit(X_train, 
                y_train,
                epochs=300, 
                validation_split = 0.1, 
                verbose =0)

예측 성능

# 훈련데이터 예측
y_train_pred = result_model.predict(X_train).reshape(-1,)
print("훈련데이터 예측 mape : {}\n".format(mean_absolute_percentage_error(y_train,y_train_pred)))

# 테스트데이터 예측
y_test_pred = result_model.predict(X_test).reshape(-1,)
print("테스트데이터 예측 mape : {}\n".format(mean_absolute_percentage_error(y_test,y_test_pred)))
Comments