mirror of
https://github.com/igv/FSRCNN-TensorFlow.git
synced 2025-12-16 17:54:36 +08:00
Switch to the adam optimizer instead of SGD+momentum
Converges faster, and doesn't get stuck forever like momentum optimization does
This commit is contained in:
5
model.py
5
model.py
@@ -32,8 +32,6 @@ class FSRCNN(object):
|
||||
self.scale = config.scale
|
||||
self.stride = config.stride
|
||||
self.batch_size = config.batch_size
|
||||
self.learning_rate = config.learning_rate
|
||||
self.momentum = config.momentum
|
||||
self.threads = config.threads
|
||||
self.params = config.params
|
||||
|
||||
@@ -96,8 +94,7 @@ class FSRCNN(object):
|
||||
self.saver = tf.train.Saver()
|
||||
|
||||
def run(self):
|
||||
# SGD with momentum
|
||||
self.train_op = tf.train.MomentumOptimizer(self.learning_rate, self.momentum).minimize(self.loss)
|
||||
self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
|
||||
|
||||
tf.initialize_all_variables().run()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user