aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--__pycache__/hyperparameters.cpython-38.pycbin341 -> 341 bytes
-rw-r--r--__pycache__/losses.cpython-38.pycbin4610 -> 4599 bytes
-rw-r--r--hyperparameters.py6
-rw-r--r--losses.py2
-rw-r--r--save.jpgbin25059 -> 46187 bytes
5 files changed, 4 insertions, 4 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc
index 40cba873..d0ec2a3e 100644
--- a/__pycache__/hyperparameters.cpython-38.pyc
+++ b/__pycache__/hyperparameters.cpython-38.pyc
Binary files differ
diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc
index ff01b5a7..9cd2e604 100644
--- a/__pycache__/losses.cpython-38.pyc
+++ b/__pycache__/losses.cpython-38.pyc
Binary files differ
diff --git a/hyperparameters.py b/hyperparameters.py
index fedd4dd7..75d58118 100644
--- a/hyperparameters.py
+++ b/hyperparameters.py
@@ -9,7 +9,7 @@ Number of epochs. If you experiment with more complex networks you
might need to increase this. Likewise if you add regularization that
slows training.
"""
-num_epochs = 150
+num_epochs = 1000
"""
A critical parameter that can dramatically affect whether training
@@ -20,6 +20,6 @@ learning_rate = 1e-2
momentum = 0.01
-alpha = 1e1
+alpha = 1e-3
-beta = 1e2
+beta = 1e-1
diff --git a/losses.py b/losses.py
index 4c7b5750..eba94c36 100644
--- a/losses.py
+++ b/losses.py
@@ -34,7 +34,7 @@ class YourModel(tf.keras.Model):
#print(self.content_image.shape, self.style_image.shape)
- self.optimizer = tf.keras.optimizers.Adam(1e-2)
+ self.optimizer = tf.keras.optimizers.Adam()
self.vgg16 = [
# Block 1
diff --git a/save.jpg b/save.jpg
index a77ce43c..a3bc2d80 100644
--- a/save.jpg
+++ b/save.jpg
Binary files differ