-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
49 lines (40 loc) · 1.76 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import tensorflow as tf
#----------------------------- cal attention -------------------------------
#input_q, input_a (batch_size, rnn_size, seq_len)
def cal_attention(input_q, input_a, U):
batch_size = int(input_q.get_shape()[0])
U = tf.tile(tf.expand_dims(U, 0), [batch_size, 1, 1])
G = tf.batch_matmul(tf.batch_matmul(input_q, U, True), input_a)
delta_q = tf.nn.softmax(tf.reduce_max(G, 1), 1)
delta_a = tf.nn.softmax(tf.reduce_max(G, 2), 1)
return delta_q, delta_a
def feature2cos_sim(feat_q, feat_a):
norm_q = tf.sqrt(tf.reduce_sum(tf.mul(feat_q, feat_q), 1))
norm_a = tf.sqrt(tf.reduce_sum(tf.mul(feat_a, feat_a), 1))
mul_q_a = tf.reduce_sum(tf.mul(feat_q, feat_a), 1)
cos_sim_q_a = tf.div(mul_q_a, tf.mul(norm_q, norm_a))
return cos_sim_q_a
# return 1 output of lstm cells after pooling, lstm_out(batch, step, rnn_size * 2)
def max_pooling(lstm_out):
height, width = int(lstm_out.get_shape()[1]), int(lstm_out.get_shape()[2]) # (step, length of input for one step)
# do max-pooling to change the (sequence_lenght) tensor to 1-lenght tensor
lstm_out = tf.expand_dims(lstm_out, -1)
output = tf.nn.max_pool(
lstm_out,
ksize=[1, height, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
output = tf.reshape(output, [-1, width])
return output
def cal_loss_and_acc(ori_cand, ori_neg):
# the target function
zero = tf.fill(tf.shape(ori_cand), 0.0)
margin = tf.fill(tf.shape(ori_cand), 0.1)
with tf.name_scope("loss"):
losses = tf.maximum(zero, tf.sub(margin, tf.sub(ori_cand, ori_neg)))
loss = tf.reduce_sum(losses)
# cal accurancy
with tf.name_scope("acc"):
correct = tf.equal(zero, losses)
acc = tf.reduce_mean(tf.cast(correct, "float"), name="acc")
return loss, acc