K-MOOC 4주차 앙상블 기법의 이해

4주차 1차시

Untitled

Untitled

배깅(Bagging)

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

랜덤 포레스트

Untitled

Untitled

Untitled

부스팅

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

Untitled

4주차 2차시

랜덤포레스트 손글씨 분류 이해하기

라이브러리 import

from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns

데이터셋 로드 및 확인

mnist = fetch_openml('mnist_784')
mnist_data = mnist.data[:10000] 
mnist_target = mnist.target[:10000] 

print(mnist_data) 
print(mnist_data.shape) 
print(mnist_target) 
print(mnist_target.shape)
      pixel1  pixel2  pixel3  pixel4  ...  pixel781  pixel782  pixel783  pixel784
0        0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
1        0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
2        0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
3        0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
4        0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
...      ...     ...     ...     ...  ...       ...       ...       ...       ...
9995     0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
9996     0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
9997     0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
9998     0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0
9999     0.0     0.0     0.0     0.0  ...       0.0       0.0       0.0       0.0

[10000 rows x 784 columns]
(10000, 784)
0       5
1       0
2       4
3       1
4       9
       ..
9995    5
9996    8
9997    6
9998    9
9999    7
Name: class, Length: 10000, dtype: category
Categories (10, object): ['0', '1', '2', '3', ..., '6', '7', '8', '9']
(10000,)

학습/테스트 데이터 나누기

X_train,X_test,y_train,y_test=train_test_split(mnist_data,mnist_target,test_size=0.2)

파라미터 없이 학습하기

dt_clf = tree.DecisionTreeClassifier() 
rf_clf = RandomForestClassifier() 

dt_clf.fit(X_train, y_train) 
rf_clf.fit(X_train, y_train) 

dt_pred = dt_clf.predict(X_test) 
rf_pred = rf_clf.predict(X_test) 

accuracy_dt = accuracy_score(y_test, dt_pred) 
accuracy_rf = accuracy_score(y_test, rf_pred)

예측 결과

print('의사결정트리 예측 정확도: {0:.4f}'.format(accuracy_dt)) 
print('랜덤 포레스트 예측 정확도: {0:.4f}'.format(accuracy_rf))
의사결정트리 예측 정확도: 0.8005
랜덤 포레스트 예측 정확도: 0.9380

특징 중요도 확인

ft_importances_values = rf_clf.feature_importances_ 
ft_importances = pd.Series(ft_importances_values) 
top10 = ft_importances.sort_values(ascending=False)[:10] 
plt.figure(figsize=(12,10)) 
plt.title('Feature Importances') 
sns.barplot(x=top10.index, y=top10) 
plt.show()

Untitled

파라미터를 사용하여 학습하기

rf_param_grid ={ 
    'n_estimators' : [100, 110, 120],
    'min_samples_leaf' : [1, 2, 3],
    'min_samples_split' : [2, 3, 4]
    }
rf_clf = RandomForestClassifier(random_state = 0) 
grid = GridSearchCV(rf_clf, param_grid = rf_param_grid, scoring='accuracy', n_jobs=1) 

grid.fit(X_train, y_train)
GridSearchCV(estimator=RandomForestClassifier(random_state=0), n_jobs=1,
             param_grid={'min_samples_leaf': [1, 2, 3],
                         'min_samples_split': [2, 3, 4],
                         'n_estimators': [100, 110, 120]},
             scoring='accuracy')
print('최고 평균 정확도 : {0:.4f}'.format(grid.best_score_)) 
print(grid.best_params_)  
최고 평균 정확도 : 0.9474
{'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 100}

4주차 3차시

보팅 앙상블 손글씨 분류

라이블리 import

from sklearn import datasets
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt

손글씨 데이터 로드

np.random.seed(5) 
mnist = datasets.load_digits()
features, labels = mnist.data, mnist.target 
X_train,X_test,y_train,y_test=train_test_split(features,labels,test_size=0.2) 

단일 모델 정확도 측정

dtree = tree.DecisionTreeClassifier( 
    criterion="gini", max_depth=8, max_features=32)

dtree = dtree.fit(X_train, y_train) 
dtree_predicted = dtree.predict(X_test) 

knn = KNeighborsClassifier(n_neighbors=299).fit(X_train, y_train) 
knn_predicted = knn.predict(X_test) 

svm = SVC(C=0.1, gamma=0.003, 
          probability=True).fit(X_train, y_train) 
svm_predicted = svm.predict(X_test)

print("[accuarcy]")
print("d-tree: ", accuracy_score(y_test, dtree_predicted)) 
print("knn   : ", accuracy_score(y_test, knn_predicted)) 
print("svm   : ", accuracy_score(y_test, svm_predicted))
[accuarcy]
d-tree:  0.7916666666666666
knn   :  0.8944444444444445
svm   :  0.8916666666666667

분류값별 확률 확인

svm_proba = svm.predict_proba(X_test) 
print(svm_proba[0:2]) 
[[0.0020036  0.00913495 0.00860886 0.00431856 0.0047931  0.8975483
  0.0019513  0.01046554 0.04855539 0.0126204 ]
 [0.00290208 0.01165787 0.86869732 0.00809384 0.00503728 0.01857273
  0.00301187 0.00945009 0.05716773 0.0154092 ]]

하드 보팅

voting_model = VotingClassifier(estimators=[ 
    ('Decision_Tree', dtree), ('k-NN', knn), ('SVM', svm)], 
    weights=[1,1,1], voting='hard')
voting_model.fit(X_train, y_train) 
hard_voting_predicted = voting_model.predict(X_test) 
accuracy_score(y_test, hard_voting_predicted) 
0.9472222222222222

소프트 보팅

voting_model = VotingClassifier(estimators=[ 
    ('Decision_Tree', dtree), ('k-NN', knn), ('SVM', svm)], 
    weights=[1,1,1], voting='soft')
voting_model.fit(X_train, y_train)
soft_voting_predicted = voting_model.predict(X_test) 
accuracy_score(y_test, soft_voting_predicted) 
0.9333333333333333

정확도 비교 시각화

x = np.arange(5) 
plt.bar(x, height= [accuracy_score(y_test, dtree_predicted), 
                    accuracy_score(y_test, knn_predicted),
                    accuracy_score(y_test, svm_predicted),
                    accuracy_score(y_test, hard_voting_predicted),
                    accuracy_score(y_test, soft_voting_predicted)])
plt.xticks(x, ['decision tree','knn','svm','hard voting','soft voting']); 

Untitled

Categories:

Updated:

Leave a comment