Skip to content

Commit

Permalink
BUG: no activation pre-softmax
Browse files Browse the repository at this point in the history
  • Loading branch information
awni committed Apr 4, 2017
1 parent 6bdc6ba commit 32d247e
Showing 1 changed file with 7 additions and 5 deletions.
12 changes: 7 additions & 5 deletions network.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,9 @@ def init_inference(self, config):
# Reduce the time-dimension to make a single prediction
acts = tf.reduce_mean(acts, axis=1)

self.logits = tf.contrib.layers.fully_connected(acts, self.output_dim)
self.logits = tf.contrib.layers.fully_connected(acts,
self.output_dim,
activation_fn=None)
self.probs = tf.nn.softmax(self.logits)

def init_loss(self):
Expand Down Expand Up @@ -110,9 +112,9 @@ def set_momentum(self, session):
#TODO: write a builder nicely later
def get_optimizer(self, config):
logger.debug("Config " + str(config))

optimizer_name = config.get('name')

if optimizer_name.lower() == 'momentum':
return tf.train.MomentumOptimizer(config.get('learning_rate'), self.mom_var)
elif optimizer_name.lower() == 'adam':
Expand All @@ -125,10 +127,10 @@ def get_optimizer(self, config):
beta_2 = config.get('beta_2')
if config.get('epsilon') != None:
t_epsilon = config.get('epsilon')

return tf.train.AdamOptimizer(config.get('learning_rate'), beta1=beta_1, beta2=beta_2, epsilon=t_epsilon)
return tf.train.GradientDescentOptimizer(config.get('learning_rate'))


def feed_dict(self, inputs, labels=None):
"""
Expand Down

0 comments on commit 32d247e

Please sign in to comment.