1+ import tensorflow as tf
2+ from csdn import csdn
3+ from IEC_extrc import edge_chan ,intensive_chan ,color_chan
4+
5+ def end_to_CNN (x ,batch_prob ):
6+ with tf .name_scope ("convolutional_base" ):
7+ x_image = x
8+ I = intensity_cha (x_image )
9+ C = color_cha (ximage )
10+ E = edge_cha (image )
11+
12+ i_C = csdn (I ) # CSDN Block 0#
13+ e_C = csdn (E ) # CSDN Blok 1#
14+ c_C = csdn (C ) # CSDN Block 2#
15+
16+ Feature_IE = tf .concat ([i_C , e_C ], 2 )
17+ Feature_IEC = tf .concat (Feature_IE , c_C , 2 )
18+
19+ SM = tf .nn .conv2d (Feature_IEC , 1 , [1 , 1 ], stride = (1 , 1 ), padding = 'SAME' )
20+ SM = tf .nn .relu (tf .layers .batch_normalization (c_pyramid2 , training = is_training )) # Saliency Map
21+
22+ out_channel = tf .concat (x_image , SM )
23+ return out_channel
24+
25+ def CNN (x ,batch_prob ):
26+ gamma = 0.01
27+ # batch_prob = tf.placeholder(tf.bool)
28+
29+ x_image = tf .reshape (x , [- 1 , 32 , 32 , 1 ])
30+ with tf .name_scope ("convolutional_base" ):
31+
32+ #block1
33+ W_conv1 = tf .Variable (tf .truncated_normal (shape = [7 , 7 , 1 , 128 ], stddev = 5e-2 ),name = 'w1' )
34+ b_conv1 = tf .Variable (tf .constant (0.1 , shape = [128 ]),name = 'b1' )
35+ h_conv1 = tf .nn .conv2d (x_image , W_conv1 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
36+ h_conv1 = tf .layers .batch_normalization (h_conv1 , center = True , scale = True , training = batch_prob , name = 'bn1' )
37+ h_relu1 = tf .nn .relu (h_conv1 )
38+
39+ l2_loss_W = gamma * tf .nn .l2_loss (W_conv1 )
40+
41+ W_conv2 = tf .Variable (tf .truncated_normal (shape = [5 , 5 , 128 , 128 ], stddev = 5e-2 ),name = 'w2' )
42+ b_conv2 = tf .Variable (tf .constant (0.1 , shape = [128 ]),name = 'b2' )
43+ h_conv2 = tf .nn .conv2d (h_relu1 , W_conv2 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
44+ h_conv2 = tf .layers .batch_normalization (h_conv2 , center = True , scale = True , training = batch_prob ,name = 'bn2' )
45+ h_relu2 = tf .nn .relu (h_conv2 )
46+
47+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv2 )
48+
49+ W_conv3 = tf .Variable (tf .truncated_normal (shape = [1 , 1 , 128 , 64 ], stddev = 5e-2 ),name = 'w3' )
50+ b_conv3 = tf .Variable (tf .constant (0.1 , shape = [64 ]),name = 'b2' )
51+ h_conv3 = tf .nn .conv2d (h_relu2 , W_conv3 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
52+ h_conv3 = tf .layers .batch_normalization (h_conv3 , center = True , scale = True , training = batch_prob ,name = 'bn2' )
53+ h_relu3 = tf .nn .relu (h_conv3 )
54+
55+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv3 )
56+
57+ h_pool1 = tf .nn .max_pool (h_relu3 , ksize = [1 , 2 , 2 , 1 ], strides = [1 , 2 , 2 , 1 ], padding = 'SAME' )
58+ h_drop1 = tf .nn .dropout (h_pool1 , rate = 0.4 )
59+
60+ #block 2
61+ W_conv4 = tf .Variable (tf .truncated_normal (shape = [3 , 3 , 64 , 64 ], stddev = 5e-2 ),name = 'w4' )
62+ b_conv4 = tf .Variable (tf .constant (0.1 , shape = [64 ]),name = 'b4' )
63+ h_conv4 = tf .nn .conv2d (h_drop1 , W_conv4 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
64+ h_conv4 = tf .layers .batch_normalization (h_conv4 , center = True , scale = True , training = batch_prob ,name = 'bn4' )
65+ h_relu4 = tf .nn .relu (h_conv4 )
66+
67+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv4 )
68+
69+ W_conv5 = tf .Variable (tf .truncated_normal (shape = [3 , 3 , 64 , 64 ], stddev = 5e-2 ),name = 'w5' )
70+ b_conv5 = tf .Variable (tf .constant (0.1 , shape = [64 ]),name = 'b5' )
71+ h_conv5 = tf .nn .conv2d (h_relu4 , W_conv5 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
72+ h_conv5 = tf .layers .batch_normalization (h_conv5 , center = True , scale = True , training = batch_prob ,name = 'bn5' )
73+ h_relu5 = tf .nn .relu (h_conv5 )
74+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv5 )
75+
76+ W_conv6 = tf .Variable (tf .truncated_normal (shape = [1 , 1 , 64 , 32 ], stddev = 5e-2 ),name = 'w6' )
77+ b_conv6 = tf .Variable (tf .constant (0.1 , shape = [32 ]),name = 'b6' )
78+ h_conv6 = tf .nn .conv2d (h_relu5 , W_conv6 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
79+ h_conv6 = tf .layers .batch_normalization (h_conv6 , center = True , scale = True , training = batch_prob ,name = 'bn6' )
80+ h_relu6 = tf .nn .relu (h_conv6 )
81+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv6 )
82+
83+ h_pool2 = tf .nn .max_pool (h_relu6 , ksize = [1 , 2 , 2 , 1 ], strides = [1 , 2 , 2 , 1 ], padding = 'SAME' )
84+ h_drop2 = tf .nn .dropout (h_pool2 , rate = 0.4 )
85+
86+ #block3
87+
88+ W_conv7 = tf .Variable (tf .truncated_normal (shape = [3 , 3 , 32 , 32 ], stddev = 5e-2 ),name = 'w7' )
89+ b_conv7 = tf .Variable (tf .constant (0.1 , shape = [32 ]),name = 'b7' )
90+ h_conv7 = tf .nn .conv2d (h_drop2 , W_conv7 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
91+ h_conv7 = tf .layers .batch_normalization (h_conv7 , center = True , scale = True , training = batch_prob ,name = 'bn7' )
92+ h_relu7 = tf .nn .relu (h_conv7 )
93+
94+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv7 )
95+
96+ W_conv8 = tf .Variable (tf .truncated_normal (shape = [3 , 3 , 32 , 32 ], stddev = 5e-2 ),name = 'w8' )
97+ b_conv8 = tf .Variable (tf .constant (0.1 , shape = [32 ]),name = 'b8' )
98+ h_conv8 = tf .nn .conv2d (h_relu7 , W_conv8 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
99+ h_conv8 = tf .layers .batch_normalization (h_conv8 , center = True , scale = True , training = batch_prob ,name = 'bn8' )
100+ h_relu8 = tf .nn .relu (h_conv8 )
101+
102+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv8 )
103+
104+ W_conv9 = tf .Variable (tf .truncated_normal (shape = [1 , 1 , 32 , 32 ], stddev = 5e-2 ),name = 'w9' )
105+ b_conv9 = tf .Variable (tf .constant (0.1 , shape = [32 ]),name = 'b9' )
106+ h_conv9 = tf .nn .conv2d (h_relu8 , W_conv9 , strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ) + b_conv1
107+ h_conv9 = tf .layers .batch_normalization (h_conv9 , center = True , scale = True , training = batch_prob ,name = 'bn9' )
108+ h_relu9 = tf .nn .relu (h_conv8 )
109+
110+ l2_loss_W = + gamma * tf .nn .l2_loss (W_conv9 )
111+
112+ h_pool3 = tf .nn .max_pool (h_relu9 , ksize = [1 , 2 , 2 , 1 ], strides = [1 , 2 , 2 , 1 ], padding = 'SAME' )
113+ h_drop3 = tf .nn .dropout (h_pool3 , rate = 0.4 )
114+ train_vars = {'w1' : W_conv1 , 'b1' : b_conv1 , 'bn1' :h_conv1 , 'w2' : W_conv2 , 'b2' : b_conv2 , 'bn1' : h_conv2 }
115+ for key , var in train_vars .items ():
116+ tf .add_to_collection (key , var )
117+
118+ with tf .name_scope ("classifier" ):
119+
120+ W_fc1 = tf .Variable (tf .truncated_normal (shape = [4 * 4 * 32 , 256 ], stddev = 5e-2 ))
121+ b_fc1 = tf .Variable (tf .constant (0.1 , shape = [6096 ]))
122+ h_pool2_flat = tf .reshape (h_drop3 , [- 1 , 28 * 28 * 128 ])
123+ h_fc1 = tf .nn .relu (tf .matmul (h_pool2_flat , W_fc1 ) + b_fc1 )
124+
125+ l2_loss_W = + gamma * tf .nn .l2_loss (W_fc1 )
126+
127+ # Output Layer
128+
129+ W_output = tf .Variable (tf .truncated_normal (shape = [1024 , 100 ], stddev = 5e-2 ))
130+ b_output = tf .Variable (tf .constant (0.1 , shape = [2 ]))
131+ logits = tf .matmul (h_fc1 , W_output ) + b_output
132+ y_pred = tf .nn .softmax (logits )
133+
134+ l2_loss_W = + gamma * tf .nn .l2_loss (W_output )
135+
136+
137+
138+ return y_pred , logits , l2_loss_W ,h_pool3 ,train_vars
0 commit comments