-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathquantization_op_pyfunc.py
More file actions
114 lines (92 loc) · 3.97 KB
/
quantization_op_pyfunc.py
File metadata and controls
114 lines (92 loc) · 3.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.client import timeline
MyFuncID = 55103664
# define common custom quant function
def my_quant_def(x, threshold=0.05):
'''if x > 0.5:
return 1
elif x < -0.5:
return -1
else:
return 0'''
quant_noise = np.random.random()
if quant_noise < x - np.floor(x):
return np.floor(x) + 1
else:
return np.floor(x)
# print(x, '+', quant_noise)
# if quant_noise < (1+x)/2:
# return 1.0
# else:
# return -1.0
def my_quant_grad_def(x, threshold=0.05):
return 1.0
# making a common function into a numpy function
my_quant_np = np.vectorize(my_quant_def)
my_quant_grad_np = np.vectorize(my_quant_grad_def)
# numpy uses float64 but tensorflow uses float32
my_quant_np_32 = lambda x: my_quant_np(x).astype(np.float32)
my_quant_grad_np_32 = lambda x: my_quant_grad_np(x).astype(np.float32)
def my_quant_grad_tf(x, name=None):
with ops.name_scope(name, "my_quant_grad_tf", [x]) as name:
y = tf.py_func(my_quant_grad_np_32,
[x],
[tf.float32],
name=name,
stateful=False)
return y[0]
def my_py_func(func, inp, Tout, stateful=False, name=None, my_grad_func=None):
# Need to generate a unique name to avoid duplicates:
random_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(random_name)(my_grad_func) # see _my_quant_grad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": random_name, "PyFuncStateless": random_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
# The grad function we need to pass to the above my_py_func function takes a special form:
# It needs to take in (an operation, the previous gradients before the operation)
# and propagate(i.e., return) the gradients backward after the operation.
def _my_quant_grad(op, pre_grad):
return pre_grad
def my_quant_tf(x, name=None):
with ops.name_scope(name, "quantization", [x]) as name:
y = my_py_func(my_quant_np_32,
[x],
[tf.float32],
stateful=False,
name=name,
my_grad_func=_my_quant_grad) # <-- here's the call to the gradient
# batch_size = tf.shape(x).get_shape().as_list()[0]
# print(x.get_shape().as_list())
return tf.reshape(y[0], [-1] + x.get_shape().as_list()[1:], 'quant')
quantization_one_pass = tf.make_template('quantization_one_pass', my_quant_tf)
if __name__ == '__main__':
with tf.Session() as sess:
x = tf.placeholder(name="x", shape=[None, 4], dtype=tf.float32)
y = my_quant_tf(x)
tf.summary.histogram('y', y)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('log/build_model/quantization/', sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, oy = sess.run([merged, y], feed_dict={x: [[-0.8, -0.3, 0.3, 0.9], [0.1, 0.9, -0.8, -0.1]]},
options=run_options, run_metadata=run_metadata)
summary_writer.add_run_metadata(run_metadata, "this")
tl = timeline.Timeline(step_stats=run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('quantization_timeline.json', 'w') as f:
f.write(ctf)
summary_writer.add_summary(summary)
print(oy)
'''x = tf.constant([[-0.8, -0.3, 0.3, 0.9],[0.1,0.9,-0.8,-0.1]])
y = my_quant_tf(x)
print(x)
print(y)
tf.global_variables_initializer().run()
print(x.eval())
print(y.eval())
print(tf.gradients(y, [x])[0].eval()) '''
# [-0.30000001 0.005 0.08 0.12 ]
# [ 0. 0. 0.08 0.12]
# [ 0. 0. 1. 1.] s