aboutsummaryrefslogtreecommitdiff
path: root/losses.py
diff options
context:
space:
mode:
Diffstat (limited to 'losses.py')
-rw-r--r--losses.py25
1 files changed, 16 insertions, 9 deletions
diff --git a/losses.py b/losses.py
index 2cb51e15..bf6f6973 100644
--- a/losses.py
+++ b/losses.py
@@ -68,9 +68,12 @@ class YourModel(tf.keras.Model):
# for layer in self.vgg16:
# layer.trainable = False
- self.indexed_layers = [layer for layer in self.vgg16 if layer.name == "conv1"]
+ self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name]
print(self.indexed_layers)
- self.desired = [layer.name for layer in self.vgg16 if layer.name == "conv1"]
+ self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name]
+
+ # create a map of the layers to their corresponding number of filters if it is a convolutional layer
+ self.layer_to_filters = {layer.name: layer.filters for layer in self.vgg16 if "conv" in layer.name}
def call(self, x):
layers = []
@@ -99,15 +102,18 @@ class YourModel(tf.keras.Model):
return (self.alpha * content_l) + (self.beta * style_l)
def content_loss(self, photo_layers, input_layers):
+ print(photo_layers, input_layers)
L_content = tf.reduce_mean(tf.square(photo_layers - input_layers))
print(L_content)
return L_content
- def layer_loss(art_layers, input_layers, layer):
+ def layer_loss(self, art_layers, input_layers, layer):
+ # vectorize the art_layers
+ art_layers = tf.reshape(art_layers, (-1, art_layers.shape[-1]))
+ # vectorize the input_layers
+ input_layers = tf.reshape(input_layers, (-1, input_layers.shape[-1]))
- #vectorize the inputs
- art_vector = art_layers.reshape(-1, 224**2)
- input_vector = input_layers.reshape(-1, 224**2)
+ print('layer', layer.name, self.layer_to_filters[layer.name])
# get the gram matrix
input_dim = input_layers.shape[0]
@@ -122,18 +128,19 @@ class YourModel(tf.keras.Model):
# N depends on # of filters in the layer, M depends on hight and width of feature map
M_l = art_layers.shape[0] * art_layers.shape[1]
- # layer.filteres might not work
- E_l = 1/4 * (layer.filters**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers))
+ # layer.filters might not work
+ E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers))
# while Sotech is botty:
# Jayson_tatum.tear_acl()
# return ("this is just another day")
+ return E_l
def style_loss(self, art_layers, input_layers):
L_style = 0
for layer in self.indexed_layers:
L_style += self.layer_loss(art_layers, input_layers, layer)
- print('this is style loss',L_style)
+ print('this is style loss', L_style)
return L_style
def train_step(self):