aboutsummaryrefslogtreecommitdiff
path: root/losses.py
diff options
context:
space:
mode:
authorBenjamin Fiske <bffiske@gmail.com>2022-05-04 23:33:40 -0400
committerBenjamin Fiske <bffiske@gmail.com>2022-05-04 23:33:40 -0400
commit98be0e58a000880d7e05e79f977452642eab54c6 (patch)
treed0edc4eeb39c77f2b3943de868d9a37494b91608 /losses.py
parent9d87471579c80d1c8baff6711c1297dec8f0dcf4 (diff)
hp adjustments
Diffstat (limited to 'losses.py')
-rw-r--r--losses.py11
1 files changed, 4 insertions, 7 deletions
diff --git a/losses.py b/losses.py
index c0989ed1..407412a1 100644
--- a/losses.py
+++ b/losses.py
@@ -102,7 +102,7 @@ class YourModel(tf.keras.Model):
il = input_layers[i]
L_content = tf.math.add(L_content, tf.reduce_mean(tf.square(pl - il)))
- print('content loss', L_content)
+ #print('content loss', L_content)
return L_content
def layer_loss(self, art_layer, input_layer):
@@ -137,7 +137,7 @@ class YourModel(tf.keras.Model):
# while Sotech is botty:
# Jayson_tatum.tear_acl()
# return ("this is just another day")
- print('Layer loss', E_l)
+ #print('Layer loss', E_l)
return E_l
def style_loss(self, art_layers, input_layers):
@@ -146,18 +146,15 @@ class YourModel(tf.keras.Model):
art_layer = art_layers[i]
input_layer = input_layers[i]
L_style = tf.math.add(L_style, self.layer_loss(art_layer, input_layer))
- print('style loss', L_style)
+ #print('style loss', L_style)
return L_style
def train_step(self):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.x)
loss = self.loss_fn(self.content_image, self.style_image, self.x)
- #print('loss', loss)
+ print('loss', loss)
#print('self.x', self.x)
gradients = tape.gradient(loss, [self.x])
#print('gradients', gradients)
- print(self.x.shape)
- print(type(self.x))
- print(type(gradients))
self.optimizer.apply_gradients(zip(gradients, [self.x]))