Press "Enter" to skip to content

Python 好坏质检分类示例

本站内容均来自兴趣收集,如不慎侵害的您的相关权益,请留言告知,我们将尽快删除.谢谢.

好坏质检分类实战task:

 

1、基于data_class_raw.csv数据,根据高斯分布概率密度函数,寻找异常点并剔除 2、基于data_class_processed.csv数据,进行PCA处理,确定重要数据维度及成分 3、完成数据分离,数据分离参数:random_state=4,test_size=0.4 4、建立KNN模型完成分类,n_neighbors取10,计算分类准确率,可视化分类边界 5、计算测试数据集对应的混淆矩阵,计算准确率、召回率、特异度、精确率、F1 6、尝试不同的n_neighbors(1-20),计算其在训练数据集、测试数据集上的准确率并作图

 

#load the data
import pandas as pd
import  as np
data = pd.read_csv('data_class_raw.csv')
data.head()

 

x1x2y
00.773.970
11.712.810
22.181.310
33.800.690
45.211.140

 

#define X and y
X = data.drop(['y'],axis=1)
y = data.loc[:,'y']

 

pandas.core.frame.DataFrame

 

#训练集数据可视化
%matplotlib inline
from matplotlib import pyplot as plt
fig1 = plt.figure(figsize=(5,5))
bad = plt.scatter(X.loc[:,'x1'][y==0],X.loc[:,'x2'][y==0])
good = plt.scatter(X.loc[:,'x1'][y==1],X.loc[:,'x2'][y==1])
plt.legend((good,bad),('good','bad'))
plt.title('raw data')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()

 

 

#异常检测
from sklearn.covariance import EllipticEnvelope
ad_model = EllipticEnvelope(contamination=0.02)
ad_model.fit(X[y==0])#喂给异常数据进行训练
y_predict_bad = ad_model.predict(X[y==0])
print(y_predict_bad)

 

[ 1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1  1 -1]

 

fig2 = plt.figure(figsize=(5,5))
bad = plt.scatter(X.loc[:,'x1'][y==0],X.loc[:,'x2'][y==0])
good = plt.scatter(X.loc[:,'x1'][y==1],X.loc[:,'x2'][y==1])
#重复上面的画图,标出异常点,并剔除
plt.scatter(X.loc[:,'x1'][y==0][y_predict_bad==-1],X.loc[:,'x2'][y==0][y_predict_bad==-1],marker='x',s=150)
plt.legend((good,bad),('good','bad'))
plt.title('raw data')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()

 

 

#基于data_class_processed.csv数据,进行PCA处理,确定重要数据维度及成分
data = pd.read_csv('data_class_processed.csv')
data.head()
#define X and y
X = data.drop(['y'],axis=1)
y = data.loc[:,'y']

 

#pca 
from sklearn.preprocessing import StandardScaler  
from sklearn.decomposition import PCA  
X_norm = StandardScaler().fit_transform(X) #标准化处理
pca = PCA(n_components=2) #创建一个PCA实例
X_reduced = pca.fit_transform(X_norm) #降维并标准化
var_ratio = pca.explained_variance_ratio_ #各个主成分的标准差的比例
print(var_ratio)
fig4 = plt.figure(figsize=(5,5))
plt.bar([1,2],var_ratio)
plt.show() #从结果可看出两个主成分的比例都比较高,不用再降维了

 

[0.51664723 0.48335277]

 

 

# 完成数据分离,数据分离参数:random_state=4,test_size=0.4
from sklearn.model_selection import train_test_split
#其中random_state是随机数种子,填0或不填,每次都会不一样;test_size:如果是浮点数,在0-1之间,表示样本占比;如果是整数的话就是样本的数量
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=4,test_size=0.4)
print(X_train.shape,X_test.shape,X.shape)
#从结果看出原数据被拆分成21组训练数据和14组测试数据

 

(21, 2) (15, 2) (36, 2)

 

#建立KNN模型完成分类,n_neighbors取10,计算分类准确率,可视化分类边界
from sklearn.neighbors import KNeighborsClassifier
knn_10 = KNeighborsClassifier(n_neighbors=10)
knn_10.fit(X_train,y_train)
y_train_predict = knn_10.predict(X_train)
y_test_predict = knn_10.predict(X_test)
#calculate the accuracy
from sklearn.metrics import accuracy_score
accuracy_train = accuracy_score(y_train,y_train_predict)
accuracy_test = accuracy_score(y_test,y_test_predict)
print("trianing accuracy:",accuracy_train)
print('testing accuracy:',accuracy_test)

 

trianing accuracy: 0.9047619047619048
testing accuracy: 0.5333333333333333

 

#visualize the knn result and boundary画网格
xx, yy = np.meshgrid(np.arange(0,10,0.05),np.arange(0,10,0.05))
print(yy.shape)

 

(200, 200)

 

x_range = np.c_[xx.ravel(),yy.ravel()]#生成网格数据 
print(x_range.shape)

 

(40000, 2)

 

y_range_predict = knn_10.predict(x_range)

 

fig4 = plt.figure(figsize=(10,10))
knn_bad = plt.scatter(x_range[:,0][y_range_predict==0],x_range[:,1][y_range_predict==0])
knn_good = plt.scatter(x_range[:,0][y_range_predict==1],x_range[:,1][y_range_predict==1])
bad = plt.scatter(X.loc[:,'x1'][y==0],X.loc[:,'x2'][y==0])
good = plt.scatter(X.loc[:,'x1'][y==1],X.loc[:,'x2'][y==1])
plt.legend((good,bad,knn_good,knn_bad),('good','bad','knn_good','knn_bad'))
plt.title('prediction result')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()

 

 

#计算测试数据集对应的混淆矩阵,计算准确率、召回率、特异度、精确率、F1分数
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_test_predict)
print(cm)

 

[[4 3]
 [4 4]]

 

#一级指标 TP、FN、FP、TN
TP = cm[1,1]
TN = cm[0,0]
FP = cm[0,1]
FN = cm[1,0]
print(TP,TN,FP,FN)

 

4 4 3 4

 

#二级指标——准确率(Accuracy)、精确率(Precision)、召回率(Recall)、特异度(Specificity)
#通过上面的四个二级指标,可以将混淆矩阵中数量的结果转化为0-1之间的比率。便于进行标准化的衡量

 

准确率:整体样本中,预测正确样本数的比例

Accuracy = (TP + TN)/(TP + TN + FP + FN)

accuracy = (TP + TN)/(TP + TN + FP + FN)
print(accuracy)

 

0.6428571428571429

 

灵敏度(召回率):正样本中,预测正确的比例

Sensitivity = Recall = TP/(TP + FN)

recall = TP/(TP + FN)
print(recall)

 

0.625

 

特异度:负样本中,预测正确的比例

Specificity = TN/(TN + FP)

specificity = TN/(TN + FP)
print(specificity)

 

0.6666666666666666

 

精确率:预测结果为正的样本中,预测正确的比例

Precision = TP/(TP + FP)

precision = TP/(TP + FP)
print(precision)

 

0.5714285714285714

 

#三级指标——F1-Score的取值范围从0到1的,1代表模型的输出最好,0代表模型的输出结果最差

 

F1分数:综合Precision和Recall的一个判断指标

F1 Score = 2*Precision X Recall/(Precision + Recall)

f1 = 2*precision*recall/(precision+recall)
print(f1)

 

0.6666666666666666

 

#尝试不同的n_neighbors(1-20),计算其在训练数据集、测试数据集上的准确率并作图
n = [i for i in range(1,21)]#生成1到20的数组
accuracy_train = []
accuracy_test = []
for i in n:
    knn = KNeighborsClassifier(n_neighbors=i)
    knn.fit(X_train,y_train)
    y_train_predict = knn.predict(X_train)
    y_test_predict = knn.predict(X_test)
    accuracy_train_i = accuracy_score(y_train,y_train_predict)
    accuracy_test_i = accuracy_score(y_test,y_test_predict)
    accuracy_train.append(accuracy_train_i)
    accuracy_test.append(accuracy_test_i)

 

fig5 = plt.figure(figsize=(12,5))
plt.subplot(121)
plt.plot(n,accuracy_train,marker='o')
plt.title('training accuracy vs n_neighbors')
plt.xlabel('n_neighbors')
plt.ylabel('accuracy')
plt.subplot(122)
plt.plot(n,accuracy_test,marker='o')
plt.title('testing accuracy vs n_neighbors')
plt.xlabel('n_neighbors')
plt.ylabel('accuracy')
plt.show()

 

 

好坏质检分类实战summary:

 

1、通过进行异常检测,帮助找到了潜在的异常数据点; 2、通过PCA分析,发现需要保留2维数据集; 3、实现了训练数据与测试数据的分离,并计算模型对于测试数据的预测准确率 4、计算得到混淆矩阵,实现模型更全面的评估 5、通过新的方法,可视化分类的决策边界 6、通过调整核心参数n_neighbors值,在计算对应的准确率,可以帮助我们更好的确定使用哪个模型

Be First to Comment

发表评论

您的电子邮箱地址不会被公开。 必填项已用*标注