-
Notifications
You must be signed in to change notification settings - Fork 97
/
model_gru_ctc.py
69 lines (52 loc) · 2.7 KB
/
model_gru_ctc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 21:21:46 2018
@author: yy
"""
import numpy as np
from sklearn.model_selection import train_test_split
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda, Activation, Conv2D, MaxPooling2D, ZeroPadding2D, Reshape, Concatenate,Flatten,Dense,Dropout,GRU,LSTM,Add
from keras.regularizers import l2
import keras.backend as K
char_set = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
image_size = (128, 32)
IMAGE_HEIGHT = image_size[1]
IMAGE_WIDTH = image_size[0]
def get_gru_ctc_model(image_size = image_size,
seq_len = 8,#字符最大长度
label_count = 63):#标签数量
img_height, img_width = image_size[0], image_size[1]
input_tensor = Input((img_height, img_width, 1))
x = input_tensor
for i in range(3):
x = Conv2D(32*2**i, (3, 3), activation='relu', padding='same')(x)
# x = Convolution2D(32*2**i, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
conv_shape = x.get_shape()
# print(conv_shape)
x = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2] * conv_shape[3])))(x)
x = Dense(32, activation='relu')(x)
gru_1 = GRU(32, return_sequences=True, kernel_initializer='he_normal', name='gru1')(x)
gru_1b = GRU(32, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(x)
gru1_merged = Add()([gru_1, gru_1b])
gru_2 = GRU(32, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(32, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(
gru1_merged)
x = Concatenate()([gru_2, gru_2b])
x = Dropout(0.25)(x)
x = Dense(label_count, kernel_initializer='he_normal', activation='softmax')(x)
base_model = Model(inputs=input_tensor, outputs=x)
labels = Input(name='the_labels', shape=[seq_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([x, labels, input_length, label_length])
ctc_model = Model(inputs=[input_tensor, labels, input_length, label_length], outputs=[loss_out])
ctc_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adadelta')
ctc_model.summary()
return conv_shape, base_model, ctc_model
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, :, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)