优化model,改为tf加载

pull/726/head^2
xianping.wen 2019-09-25 10:49:24 +08:00
parent bc337041d5
commit d7f1172272
3 changed files with 92 additions and 41 deletions

View File

@ -1,4 +1,6 @@
# coding=utf-8
import base64
import threading
import unittest
from collections import OrderedDict
@ -63,6 +65,19 @@ class testAll(unittest.TestCase):
ua = UserAgent(verify_ssl=False)
print(ua.random)
def testVerfyImage(self):
"""
测试模型加载识别
:return:
"""
from verify.localVerifyCode import Verify
v = Verify()
with open('../tkcode.png', 'rb') as f:
base64Image = base64.b64encode(f.read())
for i in range(5):
t = threading.Thread(target=v.verify, args=(base64Image,))
t.start()
if __name__ == '__main__':
unittest.main()

View File

@ -3,7 +3,9 @@ from PIL import Image
from config.urlConf import urls
from myUrllib.httpUtils import HTTPClient
from verify.localVerifyCode import verify
from verify.localVerifyCode import Verify
v = Verify()
def getRandCode(is_auto_code, auto_code_type, result):
@ -17,7 +19,7 @@ def getRandCode(is_auto_code, auto_code_type, result):
print(u"打码兔已关闭, 如需使用自动识别,请使用如果平台 auto_code_type == 2")
return
elif auto_code_type == 2:
Result = verify(result)
Result = v.verify(result)
return codexy(Ofset=Result, is_raw_input=False)
elif auto_code_type == 3:
print("您已设置使用云打码,但是服务器资源有限,请尽快改为本地打码")

View File

@ -1,19 +1,27 @@
# coding: utf-8
import TickerConfig
if TickerConfig.AUTO_CODE_TYPE == 2:
import base64
import os
import cv2
import numpy as np
from keras import models, backend
import tensorflow as tf
from verify import pretreatment
from verify.mlearn_for_image import preprocess_input
graph = tf.get_default_graph()
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
TEXT_MODEL = ""
IMG_MODEL = ""
def get_text(img, offset=0):
text = pretreatment.get_text(img, offset)
text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY)
@ -34,48 +42,74 @@ def base64_to_image(base64_code):
return img
def verify(fn):
backend.clear_session()
verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺', '海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅', '电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮', '菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟', '刺绣', '铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '', '蒸笼', '珊瑚', '雨靴', '薯条', '蜜蜂', '日历', '口哨']
# 读取并预处理验证码
img = base64_to_image(fn)
text = get_text(img)
imgs = np.array(list(pretreatment._get_imgs(img)))
imgs = preprocess_input(imgs)
text_list = []
# 识别文字
model = models.load_model(PATH('../model.v2.0.h5'))
label = model.predict(text)
label = label.argmax()
text = verify_titles[label]
text_list.append(text)
# 获取下一个词
# 根据第一个词的长度来定位第二个词的位置
if len(text) == 1:
offset = 27
elif len(text) == 2:
offset = 47
else:
offset = 60
text = get_text(img, offset=offset)
if text.mean() < 0.95:
label = model.predict(text)
class Verify:
def __init__(self):
self.textModel = ""
self.imgModel = ""
self.loadImgModel()
self.loadTextModel()
def loadTextModel(self):
if not self.textModel:
self.textModel = models.load_model(PATH('../model.v2.0.h5'))
def loadImgModel(self):
if not self.imgModel:
self.imgModel = models.load_model(PATH('../12306.image.model.h5'))
def verify(self, fn):
verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺',
'海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅',
'电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮',
'菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟', '刺绣',
'铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '', '蒸笼', '珊瑚', '雨靴', '薯条',
'蜜蜂', '日历', '口哨']
# 读取并预处理验证码
img = base64_to_image(fn)
text = get_text(img)
imgs = np.array(list(pretreatment._get_imgs(img)))
imgs = preprocess_input(imgs)
text_list = []
# 识别文字
self.loadTextModel()
global graph
with graph.as_default():
label = self.textModel.predict(text)
label = label.argmax()
text = verify_titles[label]
text_list.append(text)
print("题目为{}".format(text_list))
# 加载图片分类器
model = models.load_model(PATH('../12306.image.model.h5'))
labels = model.predict(imgs)
labels = labels.argmax(axis=1)
results = []
for pos, label in enumerate(labels):
l = verify_titles[label]
print(pos+1, l)
if l in text_list:
results.append(str(pos+1))
return results
# 获取下一个词
# 根据第一个词的长度来定位第二个词的位置
if len(text) == 1:
offset = 27
elif len(text) == 2:
offset = 47
else:
offset = 60
text = get_text(img, offset=offset)
if text.mean() < 0.95:
with graph.as_default():
label = self.textModel.predict(text)
label = label.argmax()
text = verify_titles[label]
text_list.append(text)
print("题目为{}".format(text_list))
# 加载图片分类器
self.loadImgModel()
with graph.as_default():
labels = self.imgModel.predict(imgs)
labels = labels.argmax(axis=1)
results = []
for pos, label in enumerate(labels):
l = verify_titles[label]
print(pos + 1, l)
if l in text_list:
results.append(str(pos + 1))
return results
if __name__ == '__main__':
verify("verify-img1.jpeg")
pass
# verify("verify-img1.jpeg")