13-1什么是集成学习

Notbook 示例

 Notbook 源码

集成学习
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x2337fd4a160>

[4]
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
[5]
from sklearn.linear_model import LogisticRegression

log_clf = LogisticRegression()
log_clf.fit(X_train, y_train)
log_clf.score(X_test, y_test)
0.864
[6]
from sklearn.svm import SVC

svm_clf = SVC()
svm_clf.fit(X_train, y_train)
svm_clf.score(X_test, y_test)
0.896
[7]
from sklearn.tree import DecisionTreeClassifier

dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
dt_clf.score(X_test, y_test)
0.84
[8]
y_predict1 = log_clf.predict(X_test)
y_predict2 = svm_clf.predict(X_test)
y_predict3 = dt_clf.predict(X_test)
[9]
y_predict = np.array( (y_predict1 + y_predict2 + y_predict3) >= 2, dtype='int')
[10]
y_predict[:10]
array([1, 0, 0, 1, 1, 1, 0, 0, 0, 0])
[11]
from sklearn.metrics import accuracy_score

accuracy_score(y_test, y_predict)
0.904
Voting Classifier
[12]
from sklearn.ensemble import VotingClassifier

voting_clf = VotingClassifier(estimators=[
    ('log_clf', LogisticRegression()),
    ('svm_clf',SVC()),
    ('dt_clf', DecisionTreeClassifier())
],voting='hard')
[13]
voting_clf.fit(X_train, y_train)
VotingClassifier(estimators=[('log_clf', LogisticRegression()),
                             ('svm_clf', SVC()),
                             ('dt_clf', DecisionTreeClassifier())])
[14]
voting_clf.score(X_test,y_test)
0.912

13-2 SoftVoting Classifier

Notbook 示例

Notbook 源码

Soft Voting
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x1dbd04d9190>

[4]
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
Hard Voting Classifier
[5]
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier

voting_clf = VotingClassifier(estimators=[
    ('log_clf', LogisticRegression()),
    ('svm_clf',SVC()),
    ('dt_clf', DecisionTreeClassifier())
],voting='hard')
[6]
voting_clf.fit(X_train, y_train)
voting_clf.score(X_test,y_test)
0.912
Soft Voting Classifier
[7]
voting_clf2 = VotingClassifier(estimators=[
    ('log_clf', LogisticRegression()),
    ('svm_clf',SVC(probability=True)),
    ('dt_clf', DecisionTreeClassifier())
],voting='soft')
[8]
voting_clf2.fit(X_train, y_train)
voting_clf2.score(X_test,y_test)
0.92

13-3 Bagging和Pasting

 Notbook 示例

Notbook 源码

Bagging和Pasting
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x25647d0a160>

[4]
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
使用 Bagging
[5]
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier

bagging_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=100,
                               bootstrap=True)
[6]
%%time
bagging_clf.fit(X_train, y_train)
bagging_clf.score(X_test,y_test)
CPU times: total: 1.47 s
Wall time: 1.51 s

0.904
[7]
bagging_clf2 = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=5000,max_samples=100,
                               bootstrap=True)
[8]
%%time
bagging_clf2.fit(X_train, y_train)
bagging_clf2.score(X_test,y_test)
CPU times: total: 15 s
Wall time: 15.2 s

0.912

13-4 oob(Out-of-Bag)和关于Bagging的更多讨论

Notbook 示例

Notbook 源码

obb 和更多Bagging相关
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x26a0b87a280>

使用obb
[4]
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier

bagging_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=100,
                               bootstrap=True, oob_score=True)

bagging_clf.fit(X,y)
bagging_clf.oob_score_
0.92
n_jobs
[5]
%%time
bagging_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=100,
                               bootstrap=True, oob_score=True )

bagging_clf.fit(X,y)
bagging_clf.oob_score_
CPU times: total: 1.77 s
Wall time: 1.81 s

0.918
[6]
%%time
bagging_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=100,
                               bootstrap=True, oob_score=True,
                               n_jobs =-1 )

bagging_clf.fit(X,y)
bagging_clf.oob_score_
CPU times: total: 453 ms
Wall time: 6.93 s

0.918
bootstrap_features
[7]
%%time
random_subspaces_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=500,
                               bootstrap=True, oob_score=True,
                               n_jobs =-1 ,
                               max_features=1, bootstrap_features=True)

random_subspaces_clf.fit(X,y)
random_subspaces_clf.oob_score_
CPU times: total: 438 ms
Wall time: 1.38 s

0.82
[8]
%%time
random_patches_clf = BaggingClassifier(DecisionTreeClassifier(),
                               n_estimators=500,max_samples=100,
                               bootstrap=True, oob_score=True,
                               n_jobs =-1 ,
                               max_features=1, bootstrap_features=True)

random_patches_clf.fit(X,y)
random_patches_clf.oob_score_
CPU times: total: 469 ms
Wall time: 1.31 s

0.856

13-5 随机森林和Extra-Trees

Notbook 示例

Notbook 源码

随机森林和Extra-Trees
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x1b73906a2b0>

随机森林
[4]
from sklearn.ensemble import RandomForestClassifier

rf_clf = RandomForestClassifier(n_estimators=500,random_state=666,
                               oob_score=True, n_jobs=-1)
rf_clf.fit(X, y)
RandomForestClassifier(n_estimators=500, n_jobs=-1, oob_score=True,
                       random_state=666)
[5]
rf_clf.oob_score_
0.896
[6]
rf_clf2 = RandomForestClassifier(n_estimators=500,max_leaf_nodes=16,
                                random_state=666,
                               oob_score=True, n_jobs=-1)
rf_clf2.fit(X, y)
rf_clf2.oob_score_
0.92
使用 Extra-Trees
[7]
from sklearn.ensemble import ExtraTreesClassifier
et_clf = ExtraTreesClassifier(n_estimators=500, bootstrap=True,
                             oob_score=True,random_state=666)
et_clf.fit(X, y)
ExtraTreesClassifier(bootstrap=True, n_estimators=500, oob_score=True,
                     random_state=666)
[8]
et_clf.oob_score_
0.892
集成学习解决回归问题
[9]
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor

13-6 Ada Boosting和Gradient Boosting

Notbook 示例

Notbook 源码

Boosting
[1]
import numpy as np
import matplotlib.pyplot as plt
[2]
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500,noise=0.3, random_state=42)
[3]
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
<matplotlib.collections.PathCollection at 0x224b3d8b160>

[4]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, 
                                         random_state=666)
Adabosting
[5]
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier

ada_clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),
                            n_estimators=500)
ada_clf.fit(X_train, y_train)
AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=2),
                   n_estimators=500)
[6]
ada_clf.score(X_test, y_test)
0.824
Gradient Boosting
[7]
from sklearn.ensemble import GradientBoostingClassifier

gd_clf = GradientBoostingClassifier(max_depth=2,n_estimators=30)
gd_clf.fit(X_train, y_train)
GradientBoostingClassifier(max_depth=2, n_estimators=30)
[8]
gd_clf.score(X_test,y_test)
0.848
Boosting 解决回归问题
[9]
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor

13-7 Stacking

第14章 更多机器学习算法

14-1 scikit-learan 文档的学习

网址:www.scikit-learn.org

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐