Press "Enter" to skip to content

tensorflow1.x及tensorflow2.x不同版本实现验证码识别

近一个假期,入坑深度学习,先从能看得着的验证码识别入门。从B站看了几天的黑马程序员的“3天带你玩转python深度学习后“,一是将教程中提到的代码一一码出来;二是针对不同的tensorflow版本,结合网络上其它文章,重新利用tensorflow2.x的keras实现同样的功能。两遍代码写完后,深感深度学习的恐怖。

 

一、Anaconda安装。

 

1.为了一些不必要的麻烦,还是先安装anaconda。下载地址:https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/

 

 

(这个版本支持python3.8.3)

 

2.下载完成后,啥也不说了,直接安装即可,能选择的一般都选择上,特别是一些环境变量的设置等。

 

3.安装完成后,在“Anaconda prompt”里,使用如下命令安装tensorflow

 

pip install tensorflow-cpu==2.2.0 -i https://pypi.doubanio.com/simple/

 

(注意:本机没有nvidia显卡,所以只能使用cpu版本;另,至于网上说防止出现“avx2“啥的警告,到Github上下载avx版本,去了之后会发现……木法下载,还是用这个版本吧)

 

4.如上,安装tensorflow完成,你可以测试一下下了。

 

import tensorflow as tf
print(tf.__version__)

 

如果提示“Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2”,禁用警告吧。方法如下:

 

import os
os.environ['TF_CPP_MIN_LOG_LEVEL']=2

 

二、因为我们的代码需要分别在tensorflow的不同版本上跑,而tensorflow1.x和2.x几乎是断代的,所以需要在anaconda中再配置一个低版本的环境。

 

1.先设置一下conda的国内源,找到用户文件夹下的.condarc文件,编辑如下:

 

channels:
  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
show_channel_urls: true
ssl_verify: true

 

2.进入anaconda prompt,在conda中新建一个python3.5的环境,并进入这个环境,然后安装tensorflow1.8,pandas。

 

conda create -n python3.5 python=3.5
conda activate python3.5
pip install tensorflow==1.8 -i https://pypi.doubanio.com/simple/
pip install pandas -i https://pypi.doubanio.com/simple/

 

三、验证码识别的代码如下:

 

1.tensorflow1.8版本:

 

import tensorflow as tf
import glob, os,io,sys
import pandas as pd
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
def read_pic():
    file_list=glob.glob("./code_imgs/*.png")
    file_queue=tf.train.string_input_producer(file_list)
    reader=tf.WholeFileReader()
    filename,image=reader.read(file_queue)
    decoded=tf.image.decode_png(image,channels=3)
    decoded.set_shape([28,96,3])
    image_cast=tf.cast(decoded,tf.float32)
    filename_batch,image_batch=tf.train.batch([filename,image_cast],batch_size=40,num_threads=2,capacity=40)
    return filename_batch,image_batch
def parse_csv():
    csv_data=pd.read_csv('labels.csv',names=['file_num','chars'],index_col='file_num')
    labels=[]
    for label in csv_data["chars"]:
        letter=[]
        for word in label:
            letter.append(ord(word)-ord('a'))
        labels.append(letter)
    csv_data['labels']=labels
    return csv_data
def filename2label(filenames,csv_data):
    labels=[]
    for filename in filenames:
        file_num="".join(filter(str.isdigit,str(filename)))
        target=csv_data.loc[int(file_num),"labels"]
        labels.append(target)
    return np.array(labels)
def create_weights(shape):
    return tf.Variable(initial_value=tf.random_normal(shape=shape,stddev=0.01))
def create_model(x):
    with tf.variable_scope('conv1'):
        conv1_weights=create_weights(shape=[5,5,3,32])
        conv1_bias=create_weights(shape=[32])
        conv1_x=tf.nn.conv2d(input=x,filter=conv1_weights,strides=[1,1,1,1],padding='SAME')+conv1_bias
        relu1_x=tf.nn.relu(conv1_x)
        pool1_x=tf.nn.max_pool(value=relu1_x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
   
    with tf.variable_scope('conv2'):
        conv2_weights=create_weights(shape=[5,5,32,64])
        conv2_bias=create_weights(shape=[64])
        conv2_x=tf.nn.conv2d(input=pool1_x,filter=conv2_weights,strides=[1,1,1,1],padding='SAME')+conv2_bias
        relu2_x=tf.nn.relu(conv2_x)
        pool2_x=tf.nn.max_pool(value=relu2_x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    
    with tf.variable_scope('full_connection'):
        x_fc=tf.reshape(pool2_x,shape=[-1,7*24*64])
        weights_fc=create_weights(shape=[7*24*64,104])
        bias_fc=create_weights(shape=[104])
        y_predict=tf.matmul(x_fc,weights_fc)+bias_fc
    return y_predict
def list2text(textlist):
    tm=[]
    for i in textlist:
        tm.append(chr(97+i))
    return "".join(tm)
def train():
    filename,image=read_pic()
    csv_data=parse_csv()
    x=tf.placeholder(tf.float32,shape=[None,28,96,3])
    y_true=tf.placeholder(tf.float32,shape=[None,104])
    y_predict=create_model(x)
    loss_list=tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true,logits=y_predict)
    loss=tf.reduce_mean(loss_list)
    optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)#优化损失
    equal_list=tf.reduce_all(tf.equal(tf.argmax(tf.reshape(y_predict,shape=[-1,4,26]),axis=2), tf.argmax(tf.reshape(y_true,shape=[-1,4,26]),axis=2)),axis=1)
    accurary=tf.reduce_mean(tf.cast(equal_list,tf.float32))
    init=tf.global_variables_initializer()
    saver=tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        coord=tf.train.Coordinator()
        threads=tf.train.start_queue_runners(sess=sess,coord=coord)
        try:
            for i in range(10000):
                filename_val,image_val=sess.run([filename,image])
                labels=filename2label(filename_val,csv_data)
                labels_value=tf.reshape(tf.one_hot(labels,depth=26),[-1,104]).eval()
                _,error,accurary_value=sess.run([optimizer,loss,accurary],feed_dict={x:image_val,y_true:labels_value})
                print("The %d Train Result---loss:%f,accurary:%f" % (i+1,error,accurary_value))
                if accurary_value>0.99:
                    saver.save(sess,'model/crack_captcha.model99',global_step=i)
                    break
        except tf.errors.OutOfRangeError:
            print("done ,now let's kill all threads")
        finally:
            coord.request_stop()
            print("all threads ask stop")
        coord.join(threads)
        print("all thread stopped")
def crackcaptcha():
    truetext=[]
    with open('a.txt','r') as f:
        for filename in f.readlines():
            truetext.append(filename.strip('\r\n'))
    dis='False'
    goodnum=0
    x=tf.placeholder(tf.float32,shape=[None,28,96,3])
    y_true=tf.placeholder(tf.float32,shape=[None,104])
    keep_prob = tf.placeholder(tf.float32)
    y_predict=create_model(x)
    saver=tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        checkpoint=tf.train.get_checkpoint_state('model')
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess,checkpoint.model_checkpoint_path)
            print("successfully loaded:",checkpoint.model_checkpoint_path)
        else:
            print("Could not found model files")
        
        for i in range(1,201):
            image=tf.read_file('crackimgs/'+str(i)+'.png')
            decoded=tf.image.decode_png(image,channels=3)
            decoded.set_shape([28,96,3])
            decoded_val=sess.run(decoded)
            image=np.array(decoded_val)
            predict=tf.argmax(tf.reshape(y_predict,[-1,4,26]),2)
            outtext=sess.run(predict,feed_dict={x:[image],keep_prob:1})
            cracktext=list2text(outtext[0].tolist())
            if cracktext==truetext[i-1]:
                goodnum+=1
                dis='True'
            else:
                dis='False'
            print('The {} Image Content is:{},Your Crack Word is :{},Result:{}'.format(i,truetext[i-1],cracktext,dis))
        print('The End accurary is:{}%'.format(goodnum/200*100))
if __name__=='__main__':
    train()
    crackcaptcha()
View Code

 

在运行上面的代码时,如果出现:“dtypes.py:521: FutureWarning: Passing (type, 1) or ‘1type’ as a synonym”等警告信息,打开dtypes.py这个文件,修改如下:

 

np_resource = np.dtype([("resource", np.ubyte, 1)])修改为:np_resource = np.dtype([("resource", np.ubyte, (1,))])

 

2.tensorflow2.3版本:

 

import tensorflow as tf
import pandas as pd
import glob,random,os
import numpy as np
from PIL import Image
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
            'v', 'w', 'x', 'y', 'z']
def text2vec(text):
    vector = np.zeros([4, 26])
    for i, c in enumerate(text):
        idx = alphabet.index(c)
        vector[i][idx] = 1.0
    return vector
def vec2text(vec):
    text = []
    for i, c in enumerate(vec):
        text.append(alphabet[c])
    return "".join(text)

def read_pic(batch_size):
    batch_x = np.zeros([batch_size, 28, 96,3])
    batch_y = np.zeros([batch_size, 4, 26])
    file_list=glob.glob('code_imgs2/*.png')
    batchfile=np.random.choice(file_list,batch_size)#随机取出batch_size个图片
    for i,filename in enumerate(batchfile):
        text=filename.replace('code_imgs2\\','').replace('.png','')
        image=tf.io.read_file(filename)
        image_ar=tf.io.decode_png(image)
        image_ar=tf.cast(image_ar,tf.float32)
        batch_x[i,:]=image_ar
        batch_y[i,:]=text2vec(text)
    return batch_x,batch_y
        
def crack_captcha_cnn():
    model=tf.keras.Sequential()
    model.add(tf.keras.layers.Conv2D(filters=32,kernel_size=(3,3),activation="relu",input_shape=(28,96,3),padding="same"))
    model.add(tf.keras.layers.PReLU())
    model.add(tf.keras.layers.MaxPool2D((2,2),strides=2))
    model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(5,5),activation="relu",input_shape=(28,96,3),padding="same"))
    model.add(tf.keras.layers.PReLU())
    model.add(tf.keras.layers.MaxPool2D((2,2),strides=2))
    model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(5,5),activation="relu",input_shape=(28,96,3),padding="same"))
    model.add(tf.keras.layers.PReLU())
    model.add(tf.keras.layers.MaxPool2D((2,2),strides=2))
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(4*26))
    model.add(tf.keras.layers.Reshape([4,26]))
    model.add(tf.keras.layers.Softmax())
    return model
def train():
    model=crack_captcha_cnn()
    model.compile(optimizer='Adam',metrics=['accuracy'],loss='categorical_crossentropy')
    for i in range(200):
        batch_x,batch_y=read_pic(256)
        model.fit(batch_x,batch_y,epochs=4)
        if i%20==0 and i>0:
            model.save('slj_tf2_model')
def predict():
    model=tf.keras.models.load_model('slj_tf2_model')
    file_list=glob.glob('crackimgs2/*.png')
    true_count=0
    for filename in file_list:
        data_x = np.zeros([1, 28, 96,3])
        image=tf.io.read_file(filename)
        image_ar=tf.io.decode_png(image)
        image_ar=tf.cast(image_ar,tf.float32)
        data_x[0,:]=image_ar
        prediction_value = model.predict(data_x)
        predict=tf.argmax(tf.reshape(prediction_value,[-1,4,26]),2)
        index_ar=predict.numpy().tolist()
        crack_text=vec2text(index_ar[0])
        true_text=filename.replace('crackimgs2\\','').replace('.png','')
        if crack_text==true_text:
            true_count+=1
        print('原验证码:{};破解后结果:{}'.format(true_text,crack_text))
    print('共破解200个,其中正确{}个,正确率为{}%'.format(true_count,true_count/200*100))
if __name__=='__main__':
#    train() 
    predict()
View Code

 

四、程序所需要的图片文件及csv文件,在此下载。

 

链接: https://pan.baidu.com/s/15npPVXnUEmRCNo1KfqeLOQ 提取码: kpr1

Be First to Comment

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注