ADP/실기
배깅
hyerimir
2024. 1. 29. 22:10
# 앙상블
# 배깅, 부스팅, 랜덤 포레스트가 가장 대표적
# 붓스트랩(Bootstrap)
# 랜덤 샘플링의 일종으로 가설 검증을 하거나 통계 계산을 하기 전에 단순임의복원추출법(중복허용)을
# 적용하여 여러 개의 동일한 크기의 표본 자료를 획득하는 방법
# 배깅(Bagging : Boostrap aggregation)
# 주어진 자료를 모집단으로 생각하여 주어진 자료에서 여러 개의 붓스트랩 자료를 생성하고 각 붓스트랩
# 자료에서 예측 모형을 만든 후 결합하여 최종 예측 모형을 만드는 방법
# 각 붓스트랩에 대해 붓스트래핑 및 모델링 과정이 병렬적으로 수행됨
import pandas as pd
df = pd.read_csv('../data/kc_house_data.csv')
df = df.drop(['id', 'rate'], axis = 1)
X = df.drop('price', axis = 1)
y = df['price']
X = pd.get_dummies(data = X, columns = ['waterfront'])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 48)
from sklearn.ensemble import BaggingRegressor
model_bag_reg = BaggingRegressor()
model_bag_reg.fit(X_train, y_train)
model_bag_reg.score(X_train, y_train)
model_bag_reg.score(X_test, y_test)
# out of bag 샘플을 활용한 성능 측정
model_bag_reg_oob = BaggingRegressor(n_estimators = 100, oob_score = True)
# 이때 홀드아웃 방법 필요 없음
model_bag_reg_oob.fit(X, y)
model_bag_reg_oob.oob_score_
from sklearn import tree
model_bag_reg.estimators_[0].feature_importances_
# 변수 중요도 확인
import numpy as np
importances = pd.DataFrame(np.mean([ree.feature_importances_ for tree in model_bag_reg.estimators_];axis = 0)
feature_importances = pd.concat([pd.DataFrame(X.columns), importances], axis = 1)
feature_importances.columns = ['col_name', 'feature_importance']
feature_importances = feature_importances.sort_values(by = 'feature_importance', ascending = False).reset_index(drop = True)
import matplotlib.pyplot as plt
plt.barh(feature_importances['col_name'], feature_importances['feature_importance'], align = 'center')
plt.xlabel('feature importance', size = 15)
plt.ylabel('feature', size = 15)
plt.show()
# 배깅(분류)
import pandas as pd
from sklearn.model_selection import train_test_split
credit = pd.read_csv('../data/credit_final.csv')
X = credit[credit.columns.difference(['credit.rating'])]
y = credit[['credit.rating']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 48)
X_train.head()
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
clf = BaggingClassifier(base_estimator = DecisionTreeClassifier(), n_estimators = 100,
max_samples = 1.0, max_features = 1.0)
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
clf.score(X_test, y_test)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score
pd.DataFrame(confusion_matrix(y_test, y_pred), index = ['True[0]', 'True[1]'],
columns = ['pred[0]', 'pred[1]'])
import matplotlib.pyplot as plt
from sklearn.metrics import plot_roc_curve, roc_auc_score
plot_roc_curve(clf, X_test, y_test)
plt.show()
roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]))