天天看點

《機器學習實戰》的學習筆記之KNN

這裡是《機器學習實戰》中第二章KNN的代碼部分解釋。

代碼參考的是:https://www.bilibili.com/video/BV16t411Q7TM

主要是邊看這個視訊邊自己查資料學習的。

KNN最常用的是歐式距離,它沒有訓練過程,直接就是分類

常用的向量距離度量準則:

歐式距離、曼哈頓距離、切比雪夫距離、馬氏距離、巴氏距離、漢明距離、皮爾遜系數、資訊熵,部分相關公式與python代碼見:

https://blog.csdn.net/weixin_43330946/article/details/105032182

優點:精度高、對異常值不敏感、無資料輸入假定(樸素貝葉斯需要假設樣本之間獨立、高斯分布)。

缺點:計算複雜度高(每一個樣本都要計算)、空間複雜度高。

使用資料範圍:數值型和标稱型。

代碼1:

已知4個樣本的類别,再輸入一個新的樣本判斷其屬于哪一類:

import numpy as np
import operator

def creatDataSet():
    group = np.array([[1,101], [5,89], [100,5], [115,8]])
    labels = ['愛情片','愛情片','動作片','動作片']
    return group, labels


def classify0(inX, dataSet, labels, k):
    dataSetSize = dataSet.shape[0]#0表示行數
    #np.tile表示複制:在列方向上重複inX共1次,行方向上重複inX共dataSetSize次
    diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet
    sqDiffMat = diffMat ** 2#特征相減後平方
    sqDistances = sqDiffMat.sum(axis=1)#sum(0)列相加,sum(1)行相加
    distances = sqDistances ** 0.5
    sortedDistIndices = distances.argsort()#傳回distance中元素從小到大排序後的索引值
    #定義一個記錄類别次數的字典
    classCount = {}
    for i in range(k):
        #取出前k個樣本的相關索引
        voteIlable = labels[sortedDistIndices[i]]#取出第i個樣本的類别
        #計算類别次數
        classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
        #對擷取的類别數量進行排序
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    #key=operator.itemgetter(1)根據字典的值進行排序,
    #key=operator.itemgetter(0)根據字典的鍵進行排序,
    #reverse=True降序排序字典
    return sortedClassCount[0][0]


if __name__ == '__main__':
    group, labels = createDataSet()
    test = [101, 20]
    test_class = classify0(test , group, labels, 3)
    print(test_class)
           

代碼2:

約會網站配對效果判定

import numpy as np
import matplotlib.pyplot as plt

def file2matrix(filename):
    fr = open(filename)
    arrayOLines = fr.readlines()
    numberOfLines = len(arrayOLines)
    returMat = np.zeros((numberOFLines, 3))#3個特征
    classLableVector = []#傳回的分類标簽向量
    index = 0#行的索引值

    for line in arrayOLines:
        line = line.strip()#預設删除空白符(包括\n、\r、\t、'')
        listFromLine = line.split('\t')#将字元串根據'\t'分隔符進行切片
        returnMat[index,:] = listFromLine[0:3]#将資料前三列提取出來放入returnMat中
        #根據文本中标記的喜歡的程度進行分類
        if listFromLine[-1] == 'didntlike':
            classLabelVector.append(1)
        elif listFromLine[-1] == 'smallDoses':
            classLabelVector.append(2)
        elif listFromLine[-1] == 'largeDoses':
            classLabelVector.append(3)
        index += 1
    return returnMat, classLabelVector


def showdatas(datingDataMat, datingLabels):
    font = FontProperties(fname=r'sumsun.ttc', size=14)#設定漢字格式,英語字型可以不用
    fig, axs = plt.subplots(nrows=2, ncols=2, sharex=False, sharey=Flase, figsize=(13,8))

    LabelsColors = []
    for i in datingLabels:
        if i == 1:
            LabelsColors.append('black')
        if i == 2:
            LabelsColors.append('orange')
        if i == 3:
            LabelsColors.append('red')

    axs[0][0].scatter(x=datingDataMat[:,0], y=datingDataMat[:,1], color=LabelsColors, s=15, alpha=.5)#根據datingDataMat的第一、二列資料畫散點資料
    axs0_title_text = axs[0][0].set_title(u'每年獲得的飛行常客裡程數與玩視訊遊戲所消耗時間占比', FontProperties=font)
    axs0_xlabel_text = axs[0][0].set_xlabel(u'每年獲得的飛行常客裡程數', FontProperties=font)
    axs0_ylabel_text = axs[0][0].set_ylabel(u'玩視訊遊戲所消耗時間占比', FontProperties=font)
    plt.setp(axs0_title_text, size=9, weight='bold', color='red')
    plt.setp(axs0_xlabel_text, size=7, weight='bold', color='black')
    plt.setp(axs0_ylabel_text, size=7, weight='bold', color='black')

    axs[0][1].scatter(x=datingDataMat[:,0], y=datingDataMat[:,2], color=LabelsColors, s=15, alpha=.5)#根據datingDataMat的第一、三列資料畫散點資料
    axs1_title_text = axs[0][1].set_title(u'每年獲得的飛行常客裡程數與每周消費的冰淇淋公升數', FontProperties=font)
    axs1_xlabel_text = axs[0][1].set_xlabel(u'每年獲得的飛行常客裡程數', FontProperties=font)
    axs1_ylabel_text = axs[0][1].set_ylabel(u'每周消費的冰淇淋公升數', FontProperties=font)
    plt.setp(axs1_title_text, size=9, weight='bold', color='red')
    plt.setp(axs1_xlabel_text, size=7, weight='bold', color='black')
    plt.setp(axs1_ylabel_text, size=7, weight='bold', color='black')

    axs[1][0].scatter(x=datingDataMat[:,1], y=datingDataMat[:,2], color=LabelsColors, s=15, alpha=.5)#根據datingDataMat的第二、三列資料畫散點資料
    axs2_title_text = axs[1][0].set_title(u'玩視訊遊戲所消耗時間占比與每周消費的冰淇淋公升數', FontProperties=font)
    axs2_xlabel_text = axs[1][0].set_xlabel(u'玩視訊遊戲所消耗時間占比', FontProperties=font)
    axs2_ylabel_text = axs[1][0].set_ylabel(u'每周消費的冰淇淋公升數', FontProperties=font)
    plt.setp(axs2_title_text, size=9, weight='bold', color='red')
    plt.setp(axs2_xlabel_text, size=7, weight='bold', color='black')
    plt.setp(axs2_ylabel_text, size=7, weight='bold', color='black')

    #設定圖例    
    didntLike = mlines.line2D([], [], color='black', marker='.', markersize=6, label='didntLike')
    smallDoses = mlines.line2D([], [], color='orange', marker='.', markersize=6, label='smallDoses')
    largeDoses = mlines.line2D([], [], color='red', marker='.', markersize=6, label='largeDoses')

    #添加圖例
    axs[0][0].legend(handles=[didntLike, smallDoses, largeDoses])
    axs[0][1].legend(handles=[didntLike, smallDoses, largeDoses])
    axs[1][0].legend(handles=[didntLike, smallDoses, largeDoses])

    plt.show()


def autoNorm(dataSet):
    minVals = dataSet.min(0)#傳回每一列的最小數
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = np.zeros(np.shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - np.tile(minVals, (m,1))
    normDataSet = normDataSet / np.tile(ranges, (m,1))
    return normDataSet, ranges, minVals


def datingClassTest():
    filename = 'datingTestSet.txt'
    datingDataMat, datingLabels = file2matrix(filename)
    hoRatio = 0.10#10%作為測試集
    normMat, ranges, minVals = autoNorm(datingDataMat)#資料歸一化,傳回歸一化後的矩陣、資料範圍、資料最小值
    m = normMat.shape[0]
    numTestVecs = int(m*hoRatio)
    erroeCount = 0.0

    for i in range(numTestVecs):
        classfierResult = classify0(normMat[i:], normMat[numTestVecs:m,:], datingLabels[numTestVecs:m], 4)
        print('分類結果:%d\t真實類别:%d' % (classifierResult, datingLabels[i]))
        if classifierResult != datingLabels[i]:
            errorCount += 1.0
    print('錯誤率:%f%%' % (errorCount / float(numTestVecs) * 100))
        


def classify0(inX, datsSet, labels, k):
    dataSetSize = dataSet.shape[0]
    diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet
    sqDiffMat = diffMat ** 2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances ** 0.5
    sortedDistIndices = distances.argsort()
    classCount = {}

    for i in range(k):
        voteIlable = labels[sortedDistIndices[i]]#取出第i個樣本的類别
        classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)

    return sortedClassCount[0][0]


def classifyPerson():
    resultlist = ['讨厭','有些喜歡','非常喜歡']
    percentTats = float(input('玩視訊遊戲所耗時間百分比:'))
    ffMiles = float(input('每年獲得的飛行常客裡程數:'))
    iceCream = float(input('每周消費的冰淇淋公升數'))
    filename = 'datingTestSet.txt'
    datingDataMat, datingLabels = file2matrix(filename)
    normMat, ranges, minVals = autoNorm(datingDataMat)
    inArr = np.array([percentTats, ffMiles, iceCream])
    norminArr = (inArr - minVals) / ranges
    classifierResult = classify0(norminArr, normMat, datingLabels, 3)
    print('你可能%s這個人' % (resultlist[classifierResult]))


if __name__ == '__main__':
    filename = 'datingTestSet.txt'
    datingDataMat, datingLabels = file2matrix(filename)
    showdatas(datingDataMat, datingLabels)
    datingClassTest()
    classifyPerson()
           

代碼3:

類似于代碼4。

代碼4:

手寫數字識别

http://archive.ocs.uci.edu/ml(加州大學歐文學院),有許多驗證機器學習算法的資料集。

from os import listdir
import numpy as np
form sklearn.neighbors import KNeighborsClassifier as KNN

def img2vextor(filename):
    returnVect = np.zeros((1,1024))
    #KNN不能處理二維資料,沒有位置資訊,是以隻能輸入一維資訊,也說明了CNN的強大
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0, 32*i+j] = int(lineStr[j])
    return returnVect


def handwritingClassTest():
    hwLabels = []
    trainingFileList = list('traingDigits')
    m = len(trainingFileList)
    trainingMat = np.zeros((m, 1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        classNumber = int(fileNameStr.split('_')[0]#獲得分類的數字
        hwLabels.append(classNumber)
        trainingMat[i:] = ing2vector('trainingDigits/%s' % (fileNameStr))

    neigh = KNN(n_neighbors=3, algorithm='auto')
    neigh.fit(trainingMat, hwLabels)
    testFileList = listdir('testDigits')
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        classNumber = int(fileNameStr.split('_')[0]
        vectorUnderTest = img2vector('testDigits/%s' % (fileNameStr))
        classifierResult = neigh.predict(vectorUnderTest)
        print('分類傳回結果為%d\t真實結果為%d' % (classifierResult, classNumber))
        if (classifierResult != classNumber):
            errrorCount += 1.0
    print('總共錯了%d個資料\n錯誤率為%f%%' % (errorCount, erroeCount / mTest * 100))


if __name__ == '__main__':
    handwritingClassTest()
           

繼續閱讀