mirror of
https://github.com/igv/FSRCNN-TensorFlow.git
synced 2026-02-06 23:41:59 +08:00
Switch to the adam optimizer instead of SGD+momentum
Converges faster, and doesn't get stuck forever like momentum optimization does
This commit is contained in:
2
main.py
2
main.py
@@ -10,8 +10,6 @@ flags = tf.app.flags
|
||||
flags.DEFINE_boolean("fast", False, "Use the fast model (FSRCNN-s) [False]")
|
||||
flags.DEFINE_integer("epoch", 10, "Number of epochs [10]")
|
||||
flags.DEFINE_integer("batch_size", 128, "The size of batch images [128]")
|
||||
flags.DEFINE_float("learning_rate", 1e-3, "The learning rate of gradient descent algorithm [1e-3]")
|
||||
flags.DEFINE_float("momentum", 0.9, "The momentum value for the momentum SGD [0.9]")
|
||||
flags.DEFINE_integer("c_dim", 1, "Dimension of image color [1]")
|
||||
flags.DEFINE_integer("scale", 3, "The size of scale factor for preprocessing input image [3]")
|
||||
flags.DEFINE_integer("stride", 4, "The size of stride to apply to input image [4]")
|
||||
|
||||
5
model.py
5
model.py
@@ -32,8 +32,6 @@ class FSRCNN(object):
|
||||
self.scale = config.scale
|
||||
self.stride = config.stride
|
||||
self.batch_size = config.batch_size
|
||||
self.learning_rate = config.learning_rate
|
||||
self.momentum = config.momentum
|
||||
self.threads = config.threads
|
||||
self.params = config.params
|
||||
|
||||
@@ -96,8 +94,7 @@ class FSRCNN(object):
|
||||
self.saver = tf.train.Saver()
|
||||
|
||||
def run(self):
|
||||
# SGD with momentum
|
||||
self.train_op = tf.train.MomentumOptimizer(self.learning_rate, self.momentum).minimize(self.loss)
|
||||
self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
|
||||
|
||||
tf.initialize_all_variables().run()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user