主成分分析
PCA顧名思義,就是找出資料裡最主要的方面,用資料裡最主要的方面來代替原始資料。具體的,假如我們的資料集是 n維的,共有 m 個資料 (x_{1},x_{2},…,x_{m}) 。我們希望将這 m 個資料的次元從 n 維降到 n’ 維,希望這 m 個 n’ 維的資料集盡可能的代表原始資料集。我們知道資料從 n 維降到 n’ 維肯定會有損失,但是我們希望損失盡可能的小。
成人收入分析
資料集
file = "adult/adult.data"
data = pd.read_csv(file,header = None,names=["Age", "Work-Class", "fnlwgt", "Education",
"Education-Num", "Marital-Status", "Occupation",
"Relationship", "Race", "Sex", "Capital-gain",
"Capital-loss", "Hours-per-week", "Native-Country",
"Earnings-Raw"])
# drop the blank line
data.dropna(inplace=True)
data.iloc[:5]
data["Hours-per-week"].describe()
data["Education-Num"].median()
data["Work-Class"].unique()
data["LongHours"] = data["Hours-per-week"] > 40
data[:5]
選取好的特征
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
# choose three best features
transformer = SelectKBest(score_func=chi2,k = 3)
Xt_chi2 = transformer.fit_transform(X,y)
print(Xt_chi2[:5])
#the highest is age,capital-gain,capital-loss
print(transformer.scores_)
from scipy.stats import pearsonr
def multivariate_pearsonr(X,y):
scores,p_values = [],[]
for column in range(X.shape[1]):
cur_score,cur_p = pearsonr(X[:,column],y)
scores.append(cur_score)
p_values.append(cur_p)
return (np.array(scores),np.array(p_values))
transformer = SelectKBest(score_func=multivariate_pearsonr,k=3)
Xt_pearson = transformer.fit_transform(X,y)
print(Xt_pearson[:5])
# Age,Education-num,Hours-per-week
print(transformer.scores_)
模型建立及評估
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import cross_val_score
clf = DecisionTreeClassifier(random_state=14)
scores_chi2 = cross_val_score(clf,Xt_chi2,y,scoring="accuracy")
scores_pearson = cross_val_score(clf,Xt_pearson,y,scoring="accuracy")
print("Chi2 performance {0:.1f}%".format(np.mean(scores_chi2)*100))
print("Pearson performance {0:.1f}&".format(np.mean(scores_pearson)*100))
最終chi2結果為0.85,而pearson的結果為0.75。
網站廣告分析
資料集
import numpy as np
import pandas as pd
file = "ad-dataset/ad.data"
def convert_num(x):
try:
return float(x)
except ValueError:
return np.nan
from collections import defaultdict
converters = defaultdict(convert_num)
# set the final column
converters[1558] = lambda x:1. if x.strip() == "ad." else 0.
for column in range(1558):
converters[column] = convert_num
ads = pd.read_csv(file,header=None,converters=converters)
#ads[2] = pd.to_numeric(ads[2],errors='coerce')
print(ads.shape)
# for column in range(ads.shape[1]-1):
# ads[column] = pd.to_numeric(ads[column],errors='coerce').astype('float')
# ads[1558] = ads[1558].apply(lambda x:1. if x.strip() == "ad." else 0.)
ads.dropna(inplace=True)
print(ads.shape)
print(ads[:5])
PCA選取特征
X = ads.drop(1558,axis = 1).values
y = ads[1558]
print(X.shape,y.shape)
from sklearn.decomposition import PCA
pca = PCA(n_components = 5)
Xd = pca.fit_transform(X)
print(Xd[:5])
np.set_printoptions(precision = 3,suppress=True)
pca.explained_variance_ratio_
模型建立及評估
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import cross_val_score
clf = DecisionTreeClassifier(random_state=14)
scores_pca = cross_val_score(clf,Xd,y,scoring='accuracy')
print("The accuracy is {0:.4f}".format(np.mean(scores_pca)))
%matplotlib inline
from matplotlib import pyplot as plt
# only two:is ad or not
classes = set(y)
colors = ["red","green"]
for cur_class,color in zip(classes,colors):
mask = (y == cur_class).values
plt.scatter(Xd[mask,0],Xd[mask,1],marker='o',color=color,label=int(cur_class))
plt.legend()
plt.show()
最終結果準确率為:0.9326