diff options
author | Benjamin Fiske <bffiske@gmail.com> | 2022-05-04 23:33:40 -0400 |
---|---|---|
committer | Benjamin Fiske <bffiske@gmail.com> | 2022-05-04 23:33:40 -0400 |
commit | 98be0e58a000880d7e05e79f977452642eab54c6 (patch) | |
tree | d0edc4eeb39c77f2b3943de868d9a37494b91608 | |
parent | 9d87471579c80d1c8baff6711c1297dec8f0dcf4 (diff) |
hp adjustments
-rw-r--r-- | __pycache__/hyperparameters.cpython-38.pyc | bin | 369 -> 369 bytes | |||
-rw-r--r-- | __pycache__/losses.cpython-38.pyc | bin | 4603 -> 4473 bytes | |||
-rw-r--r-- | hyperparameters.py | 4 | ||||
-rw-r--r-- | losses.py | 11 | ||||
-rw-r--r-- | save.jpg | bin | 39951 -> 30377 bytes |
5 files changed, 6 insertions, 9 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc Binary files differindex d772a682..11bc2070 100644 --- a/__pycache__/hyperparameters.cpython-38.pyc +++ b/__pycache__/hyperparameters.cpython-38.pyc diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc Binary files differindex 1112f0ad..d583a985 100644 --- a/__pycache__/losses.cpython-38.pyc +++ b/__pycache__/losses.cpython-38.pyc diff --git a/hyperparameters.py b/hyperparameters.py index 4f264528..a0068dd1 100644 --- a/hyperparameters.py +++ b/hyperparameters.py @@ -9,14 +9,14 @@ Number of epochs. If you experiment with more complex networks you might need to increase this. Likewise if you add regularization that slows training. """ -num_epochs = 10 +num_epochs = 100 """ A critical parameter that can dramatically affect whether training succeeds or fails. The value for this depends significantly on which optimizer is used. Refer to the default learning rate parameter """ -learning_rate = 1e-4 +learning_rate = 3e-2 momentum = 0.01 @@ -102,7 +102,7 @@ class YourModel(tf.keras.Model): il = input_layers[i] L_content = tf.math.add(L_content, tf.reduce_mean(tf.square(pl - il))) - print('content loss', L_content) + #print('content loss', L_content) return L_content def layer_loss(self, art_layer, input_layer): @@ -137,7 +137,7 @@ class YourModel(tf.keras.Model): # while Sotech is botty: # Jayson_tatum.tear_acl() # return ("this is just another day") - print('Layer loss', E_l) + #print('Layer loss', E_l) return E_l def style_loss(self, art_layers, input_layers): @@ -146,18 +146,15 @@ class YourModel(tf.keras.Model): art_layer = art_layers[i] input_layer = input_layers[i] L_style = tf.math.add(L_style, self.layer_loss(art_layer, input_layer)) - print('style loss', L_style) + #print('style loss', L_style) return L_style def train_step(self): with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(self.x) loss = self.loss_fn(self.content_image, self.style_image, self.x) - #print('loss', loss) + print('loss', loss) #print('self.x', self.x) gradients = tape.gradient(loss, [self.x]) #print('gradients', gradients) - print(self.x.shape) - print(type(self.x)) - print(type(gradients)) self.optimizer.apply_gradients(zip(gradients, [self.x])) Binary files differ |