aboutsummaryrefslogtreecommitdiff
path: root/losses.py
diff options
context:
space:
mode:
authorDavid Doan <daviddoan@davids-mbp-3.devices.brown.edu>2022-05-04 18:22:04 -0400
committerDavid Doan <daviddoan@davids-mbp-3.devices.brown.edu>2022-05-04 18:22:04 -0400
commite9443420d1297bf9a0f5d6023e4d1117152877de (patch)
tree44e8b061980f4c5c8181166105a485c168a86d65 /losses.py
parent53cef1b18f12287f187776aecf1c8f5ba7c04b87 (diff)
tried testing (unsuccessfully)
Diffstat (limited to 'losses.py')
-rw-r--r--losses.py10
1 files changed, 7 insertions, 3 deletions
diff --git a/losses.py b/losses.py
index 16ca278e..297bd2f1 100644
--- a/losses.py
+++ b/losses.py
@@ -65,8 +65,8 @@ class YourModel(tf.keras.Model):
activation="relu", name="block5_conv3"),
AveragePooling2D(2, name="block5_pool"),
]
- # for layer in self.vgg16:
- # layer.trainable = False
+ for layer in self.vgg16:
+ layer.trainable = False
self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name]
self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name]
@@ -98,7 +98,8 @@ class YourModel(tf.keras.Model):
return (self.alpha * content_l) + (self.beta * style_l)
def content_loss(self, photo_layers, input_layers):
- L_content = tf.reduce_mean(tf.square(photo_layers - input_layers))
+ L_content = np.mean(np.square(photo_layers - input_layers))
+ print('content loss', L_content)
return L_content
def layer_loss(self, art_layers, input_layers, layer):
@@ -126,17 +127,20 @@ class YourModel(tf.keras.Model):
# while Sotech is botty:
# Jayson_tatum.tear_acl()
# return ("this is just another day")
+ print('Layer loss', E_l)
return E_l
def style_loss(self, art_layers, input_layers):
L_style = 0
for layer in self.indexed_layers:
L_style += self.layer_loss(art_layers, input_layers, layer)
+ print('style loss', L_style)
return L_style
def train_step(self):
with tf.GradientTape() as tape:
loss = self.loss_fn(self.content_image, self.style_image, self.x)
+ print('loss', loss)
gradients = tape.gradient(loss, self.x)
print('gradients', gradients)
self.optimizer.apply_gradients(zip(gradients, self.x))