diff options
author | Logan Bauman <logan_bauman@brown.edu> | 2022-05-07 14:16:42 -0400 |
---|---|---|
committer | Logan Bauman <logan_bauman@brown.edu> | 2022-05-07 14:16:42 -0400 |
commit | 1b54bfa9de44f29ca2046d1eeb0ab174ae0dadbd (patch) | |
tree | 5b9738733d10bd36e00e6d5f7a6463174f52059b | |
parent | 95be7aba1fa85881253a0a09752204863f31bcb8 (diff) |
hi
-rw-r--r-- | hyperparameters.py | 4 | ||||
-rw-r--r-- | losses.py | 2 |
2 files changed, 3 insertions, 3 deletions
diff --git a/hyperparameters.py b/hyperparameters.py index 80141fcf..6c82a745 100644 --- a/hyperparameters.py +++ b/hyperparameters.py @@ -9,14 +9,14 @@ Number of epochs. If you experiment with more complex networks you might need to increase this. Likewise if you add regularization that slows training. """ -num_epochs = 20000 +num_epochs = 7000 """ A critical parameter that can dramatically affect whether training succeeds or fails. The value for this depends significantly on which optimizer is used. Refer to the default learning rate parameter """ -learning_rate = 1e-2 +learning_rate = 2e-3 momentum = 0.01 @@ -34,7 +34,7 @@ class YourModel(tf.keras.Model): #print(self.content_image.shape, self.style_image.shape) - self.optimizer = tf.keras.optimizers.Adam() + self.optimizer = tf.keras.optimizers.Adam(hp.learning_rate) self.vgg16 = [ # Block 1 |