Skip to content

Commit 6582ed6

Browse files
committed
initial commit
0 parents  commit 6582ed6

File tree

5 files changed

+560
-0
lines changed

5 files changed

+560
-0
lines changed

IEC_extra.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import tensorflow as tf
2+
3+
def edge_cha(x_image):
4+
out=tf.image.sobel_edges(x_image)
5+
return out
6+
7+
def intensity_cha(x_image):
8+
out=tf.image.rgb_to_grayscale(x_image)
9+
return out
10+
11+
def color_cha(x_image):
12+
R_filter = tf.constant([[[1, 0, 0]]])
13+
G_filter = tf.constant([[[0, 1, 0]]])
14+
B_filter = tf.constant([[[0, 0, 1]]])
15+
R = tf.nn.conv2d(x_image, R_filter, strides=[1, 1, 1, 1], padding='SAME')
16+
G = tf.nn.conv2d(x_image, G_filter, strides=[1, 1, 1, 1], padding='SAME')
17+
B = tf.nn.conv2d(x_image, R_filter, strides=[1, 1, 1, 1], padding='SAME')
18+
Y=((R+G)/2)-B
19+
#r-g
20+
RG=tf.abs(R-G)
21+
BY=tf.abs(B-Y)
22+
C_out=tf.add(RG,BY)
23+
return C_out
24+

csdn.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import tensorflow as tf
2+
3+
def CSDN(x_image,is_training) :
4+
5+
c_pyramid0 = tf.nn.conv2d(x_image,56, [3,3],stride=(1,1), padding='SAME') # 28
6+
c_pyramid0= tf.nn.relu(tf.layers.batch_normalization(c_pyramid0,training=is_training))
7+
c_pyramid0 = tf.nn.conv2d(c_pyramid0, 1, [1, 1], stride=(1, 1), padding='SAME') # 28
8+
c_pyramid0 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid0, training=is_training))
9+
10+
11+
c_pyramid1 = tf.layers.conv2d_transpose(x_image, 56, [2, 2], strides=(2, 2), padding='SAME') # 56
12+
c_pyramid1=tf.nn.relu(tf.layers.batch_normalization(c_pyramid1, training=is_training))
13+
c_pyramid1 = tf.nn.conv2d(c_pyramid1, 1, [2, 2], stride=(1, 1), padding='SAME')
14+
c_pyramid1 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid1, training=is_training))
15+
16+
c_pyramid2 = tf.layers.conv2d_transpose(x_image, 56, [3, 3], strides=(3, 3), padding='SAME') # 84
17+
c_pyramid2 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid2, training=is_training))
18+
c_pyramid2 = tf.nn.conv2d(c_pyramid2, 1, [3, 3], stride=(1, 1), padding='SAME')
19+
c_pyramid2 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid2, training=is_training))
20+
21+
c_pyramid3 = tf.layers.conv2d_transpose(x_image, 56, [4, 4], strides=(4, 4), padding='SAME') # 112
22+
c_pyramid3 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid3, training=is_training))
23+
c_pyramid3 = tf.nn.conv2d(c_pyramid3, 1, [4, 4], stride=(1, 1), padding='SAME')
24+
c_pyramid3 = tf.nn.relu(tf.layers.batch_normalization(c_pyramid3, training=is_training))
25+
26+
# print(c_pyramid2.numpy())
27+
c_pool0 = tf.nn.max_pool(c_pyramid3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 56
28+
c_pool1 = tf.nn.max_pool(c_pyramid2, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding='SAME') # 28
29+
print(c_pool0)
30+
c_sub0 = tf.abs((tf.subtract(c_pyramid0, c_pool1))) # |28-28|
31+
c_sub1 = tf.abs((tf.subtract(c_pyramid1, c_pool0))) # |56-56|
32+
c_sub1 = tf.nn.max_pool(c_sub1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # |28-28|
33+
c_0 = tf.add(c_sub0, c_sub1)
34+
35+
return c_0

data_arg.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
2+
import glob,os
3+
datagen = ImageDataGenerator(
4+
rotation_range=40,
5+
width_shift_range=0.2,
6+
height_shift_range=0.2,
7+
shear_range=0.2,
8+
zoom_range=0.2,
9+
horizontal_flip=True,
10+
fill_mode="nearest")
11+
os.chdir("dir")
12+
for file in glob.glob("jpg"):
13+
img = load_img("./D41.jpg")
14+
x = img_to_array(img)
15+
x = x.reshape((1,) + x.shape)
16+
17+
18+
i = 0
19+
for batch in datagen.flow(x, batch_size=1,
20+
save_to_dir="preview", save_prefix="1", save_format="jpeg"):
21+
i += 1
22+
if i > 20:
23+
break # generate image
24+

end to CNN.py

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import tensorflow as tf
2+
from csdn import csdn
3+
from IEC_extrc import edge_chan,intensive_chan,color_chan
4+
5+
def end_to_CNN(x,batch_prob):
6+
with tf.name_scope("convolutional_base"):
7+
x_image = x
8+
I = intensity_cha(x_image)
9+
C = color_cha(ximage)
10+
E = edge_cha(image)
11+
12+
i_C = csdn(I) # CSDN Block 0#
13+
e_C = csdn(E) # CSDN Blok 1#
14+
c_C = csdn(C) # CSDN Block 2#
15+
16+
Feature_IE = tf.concat([i_C, e_C], 2)
17+
Feature_IEC = tf.concat(Feature_IE, c_C, 2)
18+
19+
SM = tf.nn.conv2d(Feature_IEC, 1, [1, 1], stride=(1, 1), padding='SAME')
20+
SM = tf.nn.relu(tf.layers.batch_normalization(c_pyramid2, training=is_training)) # Saliency Map
21+
22+
out_channel= tf.concat(x_image, SM)
23+
return out_channel
24+
25+
def CNN(x,batch_prob):
26+
gamma = 0.01
27+
# batch_prob = tf.placeholder(tf.bool)
28+
29+
x_image = tf.reshape(x, [-1, 32, 32, 1])
30+
with tf.name_scope("convolutional_base"):
31+
32+
#block1
33+
W_conv1 = tf.Variable(tf.truncated_normal(shape=[7, 7, 1, 128], stddev=5e-2),name='w1')
34+
b_conv1 = tf.Variable(tf.constant(0.1, shape=[128]),name='b1')
35+
h_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
36+
h_conv1 = tf.layers.batch_normalization(h_conv1, center=True, scale=True, training=batch_prob, name='bn1')
37+
h_relu1=tf.nn.relu(h_conv1)
38+
39+
l2_loss_W = gamma * tf.nn.l2_loss(W_conv1)
40+
41+
W_conv2 = tf.Variable(tf.truncated_normal(shape=[5, 5, 128, 128], stddev=5e-2),name='w2')
42+
b_conv2 = tf.Variable(tf.constant(0.1, shape=[128]),name='b2')
43+
h_conv2 = tf.nn.conv2d(h_relu1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
44+
h_conv2 = tf.layers.batch_normalization(h_conv2, center=True, scale=True, training=batch_prob,name='bn2')
45+
h_relu2 = tf.nn.relu(h_conv2)
46+
47+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv2)
48+
49+
W_conv3 = tf.Variable(tf.truncated_normal(shape=[1, 1, 128, 64], stddev=5e-2),name='w3')
50+
b_conv3 = tf.Variable(tf.constant(0.1, shape=[64]),name='b2')
51+
h_conv3 = tf.nn.conv2d(h_relu2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
52+
h_conv3 = tf.layers.batch_normalization(h_conv3, center=True, scale=True, training=batch_prob,name='bn2')
53+
h_relu3 = tf.nn.relu(h_conv3)
54+
55+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv3)
56+
57+
h_pool1 = tf.nn.max_pool(h_relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
58+
h_drop1=tf.nn.dropout(h_pool1, rate=0.4)
59+
60+
#block 2
61+
W_conv4 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 64], stddev=5e-2),name='w4')
62+
b_conv4 = tf.Variable(tf.constant(0.1, shape=[64]),name='b4')
63+
h_conv4 = tf.nn.conv2d(h_drop1, W_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
64+
h_conv4 = tf.layers.batch_normalization(h_conv4, center=True, scale=True, training=batch_prob,name='bn4')
65+
h_relu4 = tf.nn.relu(h_conv4)
66+
67+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv4)
68+
69+
W_conv5 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 64], stddev=5e-2),name='w5')
70+
b_conv5 = tf.Variable(tf.constant(0.1, shape=[64]),name='b5')
71+
h_conv5 = tf.nn.conv2d(h_relu4, W_conv5, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
72+
h_conv5 = tf.layers.batch_normalization(h_conv5, center=True, scale=True, training=batch_prob,name='bn5')
73+
h_relu5 = tf.nn.relu(h_conv5)
74+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv5)
75+
76+
W_conv6 = tf.Variable(tf.truncated_normal(shape=[1, 1, 64, 32], stddev=5e-2),name='w6')
77+
b_conv6 = tf.Variable(tf.constant(0.1, shape=[32]),name='b6')
78+
h_conv6 = tf.nn.conv2d(h_relu5, W_conv6, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
79+
h_conv6 = tf.layers.batch_normalization(h_conv6, center=True, scale=True, training=batch_prob,name='bn6')
80+
h_relu6 = tf.nn.relu(h_conv6)
81+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv6)
82+
83+
h_pool2 = tf.nn.max_pool(h_relu6, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
84+
h_drop2=tf.nn.dropout(h_pool2, rate=0.4)
85+
86+
#block3
87+
88+
W_conv7 = tf.Variable(tf.truncated_normal(shape=[3, 3, 32, 32], stddev=5e-2),name='w7')
89+
b_conv7 = tf.Variable(tf.constant(0.1, shape=[32]),name='b7')
90+
h_conv7 = tf.nn.conv2d(h_drop2, W_conv7, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
91+
h_conv7 = tf.layers.batch_normalization(h_conv7, center=True, scale=True, training=batch_prob,name='bn7')
92+
h_relu7 = tf.nn.relu(h_conv7)
93+
94+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv7)
95+
96+
W_conv8 = tf.Variable(tf.truncated_normal(shape=[3, 3, 32, 32], stddev=5e-2),name='w8')
97+
b_conv8 = tf.Variable(tf.constant(0.1, shape=[32]),name='b8')
98+
h_conv8 = tf.nn.conv2d(h_relu7, W_conv8, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
99+
h_conv8 = tf.layers.batch_normalization(h_conv8, center=True, scale=True, training=batch_prob,name='bn8')
100+
h_relu8 = tf.nn.relu(h_conv8)
101+
102+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv8)
103+
104+
W_conv9 = tf.Variable(tf.truncated_normal(shape=[1, 1, 32, 32], stddev=5e-2),name='w9')
105+
b_conv9 = tf.Variable(tf.constant(0.1, shape=[32]),name='b9')
106+
h_conv9 = tf.nn.conv2d(h_relu8, W_conv9, strides=[1, 1, 1, 1], padding='SAME') + b_conv1
107+
h_conv9 = tf.layers.batch_normalization(h_conv9, center=True, scale=True, training=batch_prob,name='bn9')
108+
h_relu9 = tf.nn.relu(h_conv8)
109+
110+
l2_loss_W = + gamma * tf.nn.l2_loss(W_conv9)
111+
112+
h_pool3 = tf.nn.max_pool(h_relu9, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
113+
h_drop3= tf.nn.dropout(h_pool3, rate=0.4)
114+
train_vars = {'w1': W_conv1, 'b1': b_conv1, 'bn1':h_conv1, 'w2': W_conv2, 'b2': b_conv2, 'bn1': h_conv2 }
115+
for key, var in train_vars.items():
116+
tf.add_to_collection(key, var)
117+
118+
with tf.name_scope("classifier"):
119+
120+
W_fc1 = tf.Variable(tf.truncated_normal(shape=[4 * 4 * 32, 256], stddev=5e-2))
121+
b_fc1 = tf.Variable(tf.constant(0.1, shape=[6096]))
122+
h_pool2_flat = tf.reshape(h_drop3, [-1, 28 * 28 * 128])
123+
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
124+
125+
l2_loss_W = + gamma * tf.nn.l2_loss(W_fc1)
126+
127+
# Output Layer
128+
129+
W_output = tf.Variable(tf.truncated_normal(shape=[1024, 100], stddev=5e-2))
130+
b_output = tf.Variable(tf.constant(0.1, shape=[2]))
131+
logits = tf.matmul(h_fc1, W_output) + b_output
132+
y_pred = tf.nn.softmax(logits)
133+
134+
l2_loss_W = + gamma * tf.nn.l2_loss(W_output)
135+
136+
137+
138+
return y_pred, logits, l2_loss_W,h_pool3,train_vars

0 commit comments

Comments
 (0)