理论
包含CART、GBDT、LR,我得抽时间好好写一下。
代码
- 安装
lightgbm
- 安装
的依赖lightgbm
,不安装会报错brew install libomp
brew install libomp
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
print('Load data...')
iris = load_iris()
data = iris.data
target = iris.target
print("Target:",target)
x_train,x_test,y_train,y_test = train_test_split(data,target,test_size=0.2)
lgb_train = lgb.Dataset(x_train, y_train)
lgb_eval = lgb.Dataset(x_test, y_test, reference=lgb_train)
params = {
'task': 'train',
'boosting_type': 'gbdt', # 提升类型
'objective': 'binary', # 目标函数
'metric': {'binary_logloss'}, # 评估函数
'num_leaves': 31,
'num_trees': 100,
'learning_rate': 0.01,
'feature_fraction': 0.9, # 建树的样本选择比例
'bagging_fraction': 0.8, # 建树的样本采样比例
'bagging_freq': 5,
'verbose': 0
}
print('Start training...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=lgb_train)
print('Save model...')
gbm.save_model('model.txt')
print('Start predicting...')
# predict and get data on leaves, training data
y_pred = gbm.predict(x_train, pred_leaf=True)
print(np.array(y_pred).shape)
print(y_pred[0])
print('Writing transformed training data')
num_leaf = 31
transformed_training_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf],
dtype=np.int64) # N * num_tress * num_leafs
# 生成one-hot
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_training_matrix[i][temp] += 1
y_pred = gbm.predict(x_test, pred_leaf=True)
print('Writing transformed testing data')
transformed_testing_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], dtype=np.int64)
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_testing_matrix[i][temp] += 1
lm = LogisticRegression(penalty='l2',C=0.05) # logestic model construction
lm.fit(transformed_training_matrix,y_train) # fitting the data
y_pred_test = lm.predict_proba(transformed_testing_matrix) # Give the probabilty on each label
print(y_pred_test)
NE = (-1) / len(y_pred_test) * sum(((1+y_test)/2 * np.log(y_pred_test[:,1]) + (1-y_test)/2 * np.log(1 - y_pred_test[:,1])))
print("Normalized Cross Entropy " + str(NE))