Switch to the adam optimizer instead of SGD+momentum

Converges faster, and doesn't get stuck forever like momentum
optimization does
This commit is contained in:
Niklas Haas
2017-07-11 03:00:49 +02:00
parent fad61f249b
commit bbbaabe2c0
2 changed files with 1 additions and 6 deletions

View File

@@ -10,8 +10,6 @@ flags = tf.app.flags
flags.DEFINE_boolean("fast", False, "Use the fast model (FSRCNN-s) [False]")
flags.DEFINE_integer("epoch", 10, "Number of epochs [10]")
flags.DEFINE_integer("batch_size", 128, "The size of batch images [128]")
flags.DEFINE_float("learning_rate", 1e-3, "The learning rate of gradient descent algorithm [1e-3]")
flags.DEFINE_float("momentum", 0.9, "The momentum value for the momentum SGD [0.9]")
flags.DEFINE_integer("c_dim", 1, "Dimension of image color [1]")
flags.DEFINE_integer("scale", 3, "The size of scale factor for preprocessing input image [3]")
flags.DEFINE_integer("stride", 4, "The size of stride to apply to input image [4]")

View File

@@ -32,8 +32,6 @@ class FSRCNN(object):
self.scale = config.scale
self.stride = config.stride
self.batch_size = config.batch_size
self.learning_rate = config.learning_rate
self.momentum = config.momentum
self.threads = config.threads
self.params = config.params
@@ -96,8 +94,7 @@ class FSRCNN(object):
self.saver = tf.train.Saver()
def run(self):
# SGD with momentum
self.train_op = tf.train.MomentumOptimizer(self.learning_rate, self.momentum).minimize(self.loss)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
tf.initialize_all_variables().run()