Skip to content

Commit b94e33e

Browse files
committed
updated to tf-1.10, removed unnecessary folders and files
1 parent 70f0cb4 commit b94e33e

File tree

7 files changed

+36
-370
lines changed

7 files changed

+36
-370
lines changed

attention_reader/attention_reader.py

-150
This file was deleted.
File renamed without changes.

key_value_memory/joint.py renamed to joint.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44
from __future__ import print_function
55

66
from data_utils import load_task, vectorize_data
7-
from sklearn import cross_validation, metrics
7+
from sklearn import metrics
8+
from sklearn.model_selection import train_test_split
89
from memn2n_kv import MemN2N_KV
910
from itertools import chain
1011
from six.moves import range, reduce
@@ -36,7 +37,6 @@
3637
tf.flags.DEFINE_string("reader", "bow", "Reader for the model")
3738
FLAGS = tf.flags.FLAGS
3839

39-
FLAGS._parse_flags()
4040
print("\nParameters:")
4141
with open(FLAGS.param_output_file, 'w') as f:
4242
for attr, value in sorted(FLAGS.__flags.items()):
@@ -80,7 +80,7 @@
8080
valA = []
8181
for task in train:
8282
S, Q, A = vectorize_data(task, word_idx, sentence_size, memory_size)
83-
ts, vs, tq, vq, ta, va = cross_validation.train_test_split(S, Q, A, test_size=0.1, random_state=FLAGS.random_state)
83+
ts, vs, tq, vq, ta, va = train_test_split(S, Q, A, test_size=0.1, random_state=FLAGS.random_state)
8484
trainS.append(ts)
8585
trainQ.append(tq)
8686
trainA.append(ta)
@@ -130,7 +130,9 @@
130130

131131
model = MemN2N_KV(batch_size=batch_size, vocab_size=vocab_size,
132132
query_size=sentence_size, story_size=sentence_size, memory_key_size=memory_size,
133-
feature_size=FLAGS.feature_size, memory_value_size=memory_size, embedding_size=FLAGS.embedding_size, hops=FLAGS.hops, reader=FLAGS.reader, l2_lambda=FLAGS.l2_lambda)
133+
feature_size=FLAGS.feature_size, memory_value_size=memory_size,
134+
embedding_size=FLAGS.embedding_size, hops=FLAGS.hops, reader=FLAGS.reader,
135+
l2_lambda=FLAGS.l2_lambda)
134136
grads_and_vars = optimizer.compute_gradients(model.loss_op)
135137

136138
grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)
@@ -144,7 +146,7 @@
144146
nil_grads_and_vars.append((g, v))
145147

146148
train_op = optimizer.apply_gradients(nil_grads_and_vars, name="train_op", global_step=global_step)
147-
sess.run(tf.initialize_all_variables())
149+
sess.run(tf.global_variables_initializer())
148150

149151
def train_step(s, q, a):
150152
feed_dict = {

key_value_memory/eval.py

-190
This file was deleted.

0 commit comments

Comments
 (0)