0. 핵심개념 및 사이킷런 알고리즘 API 링크
tree.
Part1. 분류(Classification)
1. 분석 데이터 준비
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
data1=pd.read_csv('breast-cancer-wisconsin.csv', encoding='utf-8')
X=data1[data1.columns[1:10]]
y=data1[["Class"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X, y, stratify=y, random_state=42)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
scaler.fit(X_train)
X_scaled_train=scaler.transform(X_train)
X_scaled_test=scaler.transform(X_test)
2. 기본모델 적용
from sklearn.tree import DecisionTreeClassifier
model=DecisionTreeClassifier()
model.fit(X_scaled_train, y_train)
pred_train=model.predict(X_scaled_train)
model.score(X_scaled_train, y_train)
1.0
from sklearn.metrics import confusion_matrix
confusion_train=confusion_matrix(y_train, pred_train)
print("훈련데이터 오차행렬:\n", confusion_train)
훈련데이터 오차행렬:
[[333 0]
[ 0 179]]
from sklearn.metrics import classification_report
cfreport_train=classification_report(y_train, pred_train)
print("분류예측 레포트:\n", cfreport_train)
분류예측 레포트:
precision recall f1-score support
0 1.00 1.00 1.00 333
1 1.00 1.00 1.00 179
accuracy 1.00 512
macro avg 1.00 1.00 1.00 512
weighted avg 1.00 1.00 1.00 512
pred_test=model.predict(X_scaled_test)
model.score(X_scaled_test, y_test)
0.9532163742690059
confusion_test=confusion_matrix(y_test, pred_test)
print("테스트데이터 오차행렬:\n", confusion_test)
테스트데이터 오차행렬:
[[106 5]
[ 3 57]]
from sklearn.metrics import classification_report
cfreport_test=classification_report(y_test, pred_test)
print("분류예측 레포트:\n", cfreport_test)
분류예측 레포트:
precision recall f1-score support
0 0.97 0.95 0.96 111
1 0.92 0.95 0.93 60
accuracy 0.95 171
macro avg 0.95 0.95 0.95 171
weighted avg 0.95 0.95 0.95 171
3. Grid Search
param_grid={'max_depth': range(2,20,2), 'min_samples_leaf': range(1,50,2)}
from sklearn.model_selection import GridSearchCV
grid_search=GridSearchCV(DecisionTreeClassifier(), param_grid, cv=5)
grid_search.fit(X_scaled_train, y_train)
GridSearchCV(cv=5, estimator=DecisionTreeClassifier(),
param_grid={'max_depth': range(2, 20, 2),
'min_samples_leaf': range(1, 50, 2)})
print("Best Parameter: {}".format(grid_search.best_params_))
print("Best Score: {:.4f}".format(grid_search.best_score_))
print("TestSet Score: {:.4f}".format(grid_search.score(X_scaled_test, y_test)))
Best Parameter: {'max_depth': 6, 'min_samples_leaf': 1}
Best Score: 0.9589
TestSet Score: 0.9415
4. Random Search
from scipy.stats import randint
param_distribs = {'max_depth': randint(low=1, high=20),
'min_samples_leaf': randint(low=1, high=50)}
from sklearn.model_selection import RandomizedSearchCV
random_search=RandomizedSearchCV(DecisionTreeClassifier(),
param_distributions=param_distribs, n_iter=20, cv=5)
random_search.fit(X_scaled_train, y_train)
RandomizedSearchCV(cv=5, estimator=DecisionTreeClassifier(), n_iter=20,
param_distributions={'max_depth': <scipy.stats._distn_infrastructure.rv_frozen object at 0x0000015A318DDF10>,
'min_samples_leaf': <scipy.stats._distn_infrastructure.rv_frozen object at 0x0000015A318DDD60>})
print("Best Parameter: {}".format(random_search.best_params_))
print("Best Score: {:.4f}".format(random_search.best_score_))
print("TestSet Score: {:.4f}".format(random_search.score(X_scaled_test, y_test)))
Best Parameter: {'max_depth': 17, 'min_samples_leaf': 2}
Best Score: 0.9531
TestSet Score: 0.9532
Part2. 회귀(Regression)
1. 분석 데이터 준비
data2=pd.read_csv('house_price.csv', encoding='utf-8')
X=data2[data2.columns[1:5]]
y=data2[["house_value"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X, y, random_state=42)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
scaler.fit(X_train)
X_scaled_train=scaler.transform(X_train)
X_scaled_test=scaler.transform(X_test)
2. 기본모델 적용
from sklearn.tree import DecisionTreeRegressor
model=DecisionTreeRegressor()
model.fit(X_scaled_train, y_train)
pred_train=model.predict(X_scaled_train)
model.score(X_scaled_train, y_train)
1.0
pred_test=model.predict(X_scaled_test)
model.score(X_scaled_test, y_test)
0.22116121551330037
import numpy as np
from sklearn.metrics import mean_squared_error
MSE_train = mean_squared_error(y_train, pred_train)
MSE_test = mean_squared_error(y_test, pred_test)
print("훈련 데이터 RMSE:", np.sqrt(MSE_train))
print("테스트 데이터 RMSE:", np.sqrt(MSE_test))
훈련 데이터 RMSE: 0.0
테스트 데이터 RMSE: 84369.65224928294
3. Grid Search
param_grid={'max_depth': range(2,20,2), 'min_samples_leaf': range(1,50,2)}
from sklearn.model_selection import GridSearchCV
grid_search=GridSearchCV(DecisionTreeRegressor(), param_grid, cv=5)
grid_search.fit(X_scaled_train, y_train)
GridSearchCV(cv=5, estimator=DecisionTreeRegressor(),
param_grid={'max_depth': range(2, 20, 2),
'min_samples_leaf': range(1, 50, 2)})
print("Best Parameter: {}".format(grid_search.best_params_))
print("Best Score: {:.4f}".format(grid_search.best_score_))
print("TestSet Score: {:.4f}".format(grid_search.score(X_scaled_test, y_test)))
Best Parameter: {'max_depth': 8, 'min_samples_leaf': 49}
Best Score: 0.5592
TestSet Score: 0.5770
4. Random Search
param_distribs = {'max_depth': randint(low=1, high=20),
'min_samples_leaf': randint(low=1, high=50)}
from sklearn.model_selection import RandomizedSearchCV
random_search=RandomizedSearchCV(DecisionTreeRegressor(),
param_distributions=param_distribs, n_iter=20, cv=5)
random_search.fit(X_scaled_train, y_train)
RandomizedSearchCV(cv=5, estimator=DecisionTreeRegressor(), n_iter=20,
param_distributions={'max_depth': <scipy.stats._distn_infrastructure.rv_frozen object at 0x0000015A318EB3A0>,
'min_samples_leaf': <scipy.stats._distn_infrastructure.rv_frozen object at 0x0000015A318E3C40>})
print("Best Parameter: {}".format(random_search.best_params_))
print("Best Score: {:.4f}".format(random_search.best_score_))
print("TestSet Score: {:.4f}".format(random_search.score(X_scaled_test, y_test)))
Best Parameter: {'max_depth': 13, 'min_samples_leaf': 47}
Best Score: 0.5576
TestSet Score: 0.5763