使用者流失預測
資料詳細情況請檢視:使用者流失分析
資料預處理
轉換資料類型和處理缺失值
#轉換資料類型
#TotalCharges總費用 和 MonthlyCharges每月費用 ,都應該是數值型資料,轉換TotalCharges為數值型資料
data['TotalCharges'] = pd.to_numeric(data['TotalCharges'],errors='coerce')
#處理缺失值
#用MonthlyCharges 的值 填充 TotalCharges的缺失值
data['TotalCharges'] = data['TotalCharges'].fillna(data['MonthlyCharges'])

特征工程
資料歸一化并删除不需要标簽
#删除使用者ID标簽
data = data.iloc[:,1:]
#檢視非數值資料包含的特征資訊
def uni(label):
print(label,'-----',data[label].unique())
data_object = data.select_dtypes(['object'])
for i in range(0,len(data_object.columns)):
uni(data_object.columns[i])
根據之前的分析,No phone service,No internet service 可以等同于No
#No phone service 轉換為 No
data['MultipleLines'] = data['MultipleLines'].replace('No phone service','No')
data['MultipleLines'].value_counts()
#No internet service 轉換為 No
loc = ['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies']
for i in loc:
data[i] = data[i].replace('No internet service','No')
對隻有兩類值的特征标簽,進行二值化處理,用1, 0來代替
# Male= 1 Female = 0
data['gender'] = data['gender'].replace({'Male':1,'Female':0})
# Yes = 1 No = 0
loc = ['Partner','Dependents','PhoneService','MultipleLines','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','PaperlessBilling','Churn']
for i in loc:
data[i] = data[i].replace({'Yes':1,'No':0})
數值型資料标準化處理
data[['tenure']] = StandardScaler().fit_transform(data[['tenure']])
data[['MonthlyCharges']] = StandardScaler().fit_transform(data[['MonthlyCharges']])
data[['TotalCharges']] = StandardScaler().fit_transform(data[['TotalCharges']])
對 InternetService,Contract,PaymentMethod 這三列進行獨熱編碼處理
#轉碼
InternetService = pd.get_dummies(data['InternetService'])
Contract = pd.get_dummies(data['Contract'])
PaymentMethod = pd.get_dummies(data['PaymentMethod'])
#合并資料
train = pd.concat([data,InternetService,Contract,PaymentMethod],axis=1)
#删除原标簽列
train = train.drop(['InternetService','Contract','PaymentMethod'],axis=1)
根據相關性系數,删除對預測結果影響較小的特征值
# 由于資料集不大可使用 corr() 計算出沒對屬性之間的标準相關系數 也稱為“皮爾遜系數”
corr_matrix = train.corr()
abs(corr_matrix['Churn']).sort_values(ascending=False)
根據情況來取舍相關性低的标簽列
本次案例選擇删除小于0.1的标簽
#删除列
loc = ['gender','PhoneService','MultipleLines','StreamingMovies','StreamingTV','DeviceProtection','OnlineBackup','Mailed check']
train= train.drop(loc,axis=1)
提取目标标簽,劃分資料
train_x = train.drop(['Churn'],axis=1)
train_y = train['Churn']
模型預測
導入模型包,
from sklearn.linear_model import LogisticRegression #邏輯回歸
from sklearn.ensemble import RandomForestClassifier #随機森林
from xgboost import XGBClassifier #xgboost
#導入評分子產品
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
訓練模型并選擇最優模型
# 定義一個函數,使其能輸入訓練資料和模型
#然後傳回模型分數
def model_fit(x,y,model):
model = model
model.fit(x,y)
predict = model.predict(x)
recall = recall_score(y,predict)
precision = precision_score(y,predict)
f1 = f1_score(y,predict)
print(model)
print('recall :{:.3f}'.format(recall))
print('precision:{:.3f}'.format(precision))
print('f-1 :{:.3f}'.format(f1))
訓練模型
LR = LogisticRegression()
RF = RandomForestClassifier()
XG = XGBClassifier(eval_metric=['logloss','auc','error'],objective ='reg:squarederror')
model_fit(train_x,train_y,LR)
model_fit(train_x,train_y,RF)
model_fit(train_x,train_y,XG)
模型評估
LogisticRegression
recall :0.547
precision:0.658
f-1 :0.597
RandomForestClassifier
recall :0.994
precision:0.995
f-1 :0.994
XGBClassifier
recall :0.819
precision:0.935
f-1 :0.873
根據精确率,召回率,F1值 綜合比較選擇最優模型
本次測試選擇模型為RandomForestClassifier
使用網格搜尋尋找最優參數
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators':[3,5,10,15,25,30],'max_features':[2,4,6,8]},
{'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]}
]
grid_search = GridSearchCV(RF,
param_grid,cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(train_x,train_y)
#列印最優參數
grid_search.best_params_
預測結果展示
final_model = grid_search.best_estimator_
final_predictions = final_model.predict_proba(train_x[:20])
output = pd.DataFrame({'customerID':data_id[:20],'ratio':final_predictions[:,1]})
output = output.sort_values('ratio',ascending=False)
output
建議:預測值越大的使用者流失率就越高,對于公司來說,可以根據實際情況設定流失率門檻值,達到門檻值的群體進行挽留措施,值越大優先級越高。
結果僅供參考
由于資料有限,測試和訓練使用的都是一樣的數,是以結果的過拟合問題較嚴重,方法思路是差不多的。