```import numpy as np
import matplotlib.pyplot as plt
x = [0,0,1,1]
y = [0,1,0,1]
plt.scatter(x[0],y[0], color="red",label="negative")
plt.scatter(x[1:],y[1:], color="green",label="positive")
plt.legend(loc="best")
plt.show()```

```def decide(data,label,w,b):
result = w[0] * data[0] + w[1] * data[1] - b
print("result = ",result)
if np.sign(result) * label <= 0:
w[0] += 1 * (label - result) * data[0]
w[1] += 1 * (label - result) * data[1]
b += 1 * (label - result)*(-1)
return w,b```

```def run(data, label):
w,b = [0,0],0
for epoch in range(10):
for item in zip(data, label):
dataset,labelset = item[0],item[1]
w,b = decide(dataset, labelset, w, b)
print("dataset = ",dataset, ",", "w = ",w,",","b = ",b)
print(w,b)```

```data = [(0,0),(0,1),(1,0),(1,1)]
label = [0,1,1,1]```

`run(data,label)`

```result =  0
dataset =  (0, 0) , w =  [0, 0] , b =  0
result =  0
dataset =  (0, 1) , w =  [0, 1] , b =  -1
result =  1
dataset =  (1, 0) , w =  [0, 1] , b =  -1
result =  2
dataset =  (1, 1) , w =  [0, 1] , b =  -1
result =  1
dataset =  (0, 0) , w =  [0, 1] , b =  0
result =  1
dataset =  (0, 1) , w =  [0, 1] , b =  0
result =  0
dataset =  (1, 0) , w =  [1, 1] , b =  -1
result =  3
dataset =  (1, 1) , w =  [1, 1] , b =  -1
result =  1
dataset =  (0, 0) , w =  [1, 1] , b =  0
result =  1
dataset =  (0, 1) , w =  [1, 1] , b =  0
result =  1

```import os
import pylab as pl
import numpy as np
import pandas as pd```

`os.chdir(r"DataSets\pima-indians-diabetes-database")`

`pima = np.loadtxt("pima.txt", delimiter=",", skiprows=1)`

`pima.shape`

`(768, 9)`

```indices0 = np.where(pima[:,8]==0)
indices1 = np.where(pima[:,8]==1)```

```pl.ion()
pl.plot(pima[indices0,0],pima[indices0,1],"go")
pl.plot(pima[indices1,0],pima[indices1,1],"rx")
pl.show()```

1.将年龄离散化

```pima[np.where(pima[:,7]<=30),7] = 1
pima[np.where((pima[:,7]>30) & (pima[:,7]<=40)),7] = 2
pima[np.where((pima[:,7]>40) & (pima[:,7]<=50)),7] = 3
pima[np.where((pima[:,7]>50) & (pima[:,7]<=60)),7] = 4
pima[np.where(pima[:,7]>60),7] = 5```

2.将女性的怀孕次数大于8次的统一用8次代替

`pima[np.where(pima[:,0]>8),0] = 8`

3.将数据标准化处理

```pima[:,:8] = pima[:,:8]-pima[:,:8].mean(axis=0)
pima[:,:8] = pima[:,:8]/pima[:,:8].var(axis=0)```

4.切分训练集和测试集

```trainin = pima[::2,:8]
testin = pima[1::2,:8]
traintgt = pima[::2,8:9]
testtgt = pima[1::2,8:9]```

```class Perceptron:
def __init__(self, inputs, targets):
# 设置网络规模
# 记录输入向量的维度，神经元的维度要和它相等
if np.ndim(inputs) > 1:
self.nIn = np.shape(inputs)[1]
else:
self.nIn = 1

# 记录目标向量的维度，神经元的个数要和它相等
if np.ndim(targets) > 1:
self.nOut = np.shape(targets)[1]
else:
self.nOut = 1

# 记录输入向量的样本个数
self.nData = np.shape(inputs)[0]

# 初始化网络，这里加1是为了包含偏置项
self.weights = np.random.rand(self.nIn + 1, self.nOut) * 0.1 - 0.05

def train(self, inputs, targets, eta, epoch):
"""训练环节"""
# 和前面处理偏置项同步地，这里对输入样本加一项-1，与W0相匹配
inputs = np.concatenate((inputs, -np.ones((self.nData,1))),axis=1)

for n in range(epoch):
self.activations = self.forward(inputs)
self.weights -= eta * np.dot(np.transpose(inputs), self.activations - targets)
return self.weights

def forward(self, inputs):
"""神经网路前向传播环节"""
# 计算
activations = np.dot(inputs, self.weights)
# 判断是否激活
return np.where(activations>0, 1, 0)

def confusion_matrix(self, inputs, targets):
# 计算混淆矩阵
inputs = np.concatenate((inputs, -np.ones((self.nData,1))),axis=1)
outputs = np.dot(inputs, self.weights)
nClasses = np.shape(targets)[1]

if nClasses == 1:
nClasses = 2
outputs = np.where(outputs<0, 1, 0)
else:
outputs = np.argmax(outputs, 1)
targets = np.argmax(targets, 1)

cm = np.zeros((nClasses, nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] = np.sum(np.where(outputs==i, 1,0) * np.where(targets==j, 1, 0))
print(cm)
print(np.trace(cm)/np.sum(cm))```

```print("Output after preprocessing of data")
p = Perceptron(trainin,traintgt)
p.train(trainin,traintgt,0.15,10000)
p.confusion_matrix(testin,testtgt)```

```Output after preprocessing of data
[[ 69.  86.]
[182.  47.]]
0.3020833333333333```

### step 1：首先导入所需的包，并且设置好数据所在路径

```import numpy as np
import pandas as pd
import matplotlib.pyplot as plt```

```train = pd.read_csv(r"DataSets\Digit_Recognizer\train.csv", engine="python")

```print("Training set has {0[0]} rows and {0[1]} columns".format(train.shape))
print("Test set has {0[0]} rows and {0[1]} columns".format(test.shape))```

```Training set has 42000 rows and 785 columns
Test set has 28000 rows and 784 columns```

### step 2：数据预处理

```trainlabels = train.label
trainlabels.shape```

`(42000,)`

```traindata = np.asmatrix(train.loc[:,"pixel0":])
traindata.shape```

`(42000, 784)`

```weights = np.zeros((10,784))
weights.shape```

`(10, 784)`

```# 从矩阵中随便取一行
samplerow = traindata[123:124]
# 重新变成28*28
samplerow = np.reshape(samplerow, (28,28))
plt.imshow(samplerow, cmap="hot")```

### step 3：训练

```# 先创建一个列表，用来记录每一轮训练的错误率
errors = []
epoch = 20
for epoch in range(epoch):
err = 0
# 对每一个样本（亦矩阵中的每一行）
for i, data in enumerate(traindata):
# 创建一个列表，用来记录每个神经元输出的值
output = []
# 对每个神经元都做点乘操作，并记录下输出值
for w in weights:
output.append(np.dot(data, w))
# 这里简单的取输出值最大者为最有可能的
guess = np.argmax(output)
# 实际的值为标签列表中对应项
actual = trainlabels[i]

# 如果估计值和实际值不同，则分类错误，需要更新权重向量
if guess != actual:
weights[guess] = weights[guess] - data
weights[actual] = weights[actual] + data
err += 1
# 计算迭代完42000个样本之后，错误率 = 错误次数/样本个数
errors.append(err/42000)```

```x = list(range(20))
plt.plot(x, errors)```

`[<matplotlib.lines.Line2D at 0x5955c50>]`