Skip to content
This repository was archived by the owner on Jan 1, 2021. It is now read-only.

Commit ecde8d0

Browse files
committed
fixed bugs in chatbot
1 parent 89772fa commit ecde8d0

File tree

3 files changed

+2
-29
lines changed

3 files changed

+2
-29
lines changed

assignments/chatbot/chatbot.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,8 @@ def _get_user_input():
169169
""" Get user's input, which will be transformed into encoder input later """
170170
print("> ", end="")
171171
sys.stdout.flush()
172-
return sys.stdin.readline()
172+
text = sys.stdin.readline()
173+
return data.tokenize_helper(text)
173174

174175
def _find_right_bucket(length):
175176
""" Find the proper bucket for an encoder input based on its length """
@@ -183,7 +184,6 @@ def _construct_response(output_logits, inv_dec_vocab):
183184
184185
This is a greedy decoder - outputs are just argmaxes of output_logits.
185186
"""
186-
print(output_logits[0])
187187
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
188188
# If there is an EOS symbol in outputs, cut them at that point.
189189
if config.EOS_ID in outputs:

assignments/chatbot/config.py

-2
Original file line numberDiff line numberDiff line change
@@ -48,5 +48,3 @@
4848
MAX_GRAD_NORM = 5.0
4949

5050
NUM_SAMPLES = 512
51-
ENC_VOCAB = 24133
52-
DEC_VOCAB = 22879

assignments/chatbot/model.py

-25
Original file line numberDiff line numberDiff line change
@@ -86,31 +86,6 @@ def _seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
8686
config.BUCKETS,
8787
lambda x, y: _seq2seq_f(x, y, False),
8888
softmax_loss_function=self.softmax_loss_function)
89-
90-
# if self.fw_only:
91-
# self.outputs, self.losses = tf.contrib.training.bucket_by_sequence_length(
92-
# self.encoder_inputs,
93-
# self.decoder_inputs,
94-
# self.targets,
95-
# self.decoder_masks,
96-
# config.BUCKETS,
97-
# lambda x, y: _seq2seq_f(x, y, True),
98-
# softmax_loss_function=self.softmax_loss_function)
99-
# # If we use output projection, we need to project outputs for decoding.
100-
# if self.output_projection:
101-
# for bucket in range(len(config.BUCKETS)):
102-
# self.outputs[bucket] = [tf.matmul(output,
103-
# self.output_projection[0]) + self.output_projection[1]
104-
# for output in self.outputs[bucket]]
105-
# else:
106-
# self.outputs, self.losses = tf.contrib.training.bucket_by_sequence_length(
107-
# self.encoder_inputs,
108-
# self.decoder_inputs,
109-
# self.targets,
110-
# self.decoder_masks,
111-
# config.BUCKETS,
112-
# lambda x, y: _seq2seq_f(x, y, False),
113-
# softmax_loss_function=self.softmax_loss_function)
11489
print('Time:', time.time() - start)
11590

11691
def _creat_optimizer(self):

0 commit comments

Comments
 (0)