diff options
author | Michael Foiani <sotech117@michaels-mbp-3.devices.brown.edu> | 2022-05-04 17:59:18 -0400 |
---|---|---|
committer | Michael Foiani <sotech117@michaels-mbp-3.devices.brown.edu> | 2022-05-04 17:59:18 -0400 |
commit | 53cef1b18f12287f187776aecf1c8f5ba7c04b87 (patch) | |
tree | b07cf28766f1b169366f070dc46683453e31e1df | |
parent | b83cd8eba837348bc194db8c1dd12e369602e4ac (diff) |
fix return error - gradients is none now
-rw-r--r-- | __pycache__/losses.cpython-38.pyc | bin | 4495 -> 4316 bytes | |||
-rw-r--r-- | losses.py | 18 |
2 files changed, 4 insertions, 14 deletions
diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc Binary files differindex c409cebf..e9a8d331 100644 --- a/__pycache__/losses.cpython-38.pyc +++ b/__pycache__/losses.cpython-38.pyc @@ -69,26 +69,23 @@ class YourModel(tf.keras.Model): # layer.trainable = False self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name] - print(self.indexed_layers) self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name] # create a map of the layers to their corresponding number of filters if it is a convolutional layer self.layer_to_filters = {layer.name: layer.filters for layer in self.vgg16 if "conv" in layer.name} def call(self, x): - layers = [] + layers = np.empty(0) for layer in self.vgg16: # pass the x through - print('this is', x.shape) - print(layer.name) x = layer(x) # print("Sotech117 is so so sus") # save the output of each layer if it is in the desired list if layer.name in self.desired: - layers.append(x) + layers = np.append(layers, x) - return x, np.array(layers) + return x, layers def loss_fn(self, p, a, x): _, photo_layers = self.call(p) @@ -98,13 +95,10 @@ class YourModel(tf.keras.Model): content_l = self.content_loss(photo_layers, input_layers) style_l = self.style_loss(art_layers, input_layers) # Equation 7 - print(content_l, style_l) return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): - print(photo_layers, input_layers) L_content = tf.reduce_mean(tf.square(photo_layers - input_layers)) - print(L_content) return L_content def layer_loss(self, art_layers, input_layers, layer): @@ -113,8 +107,6 @@ class YourModel(tf.keras.Model): # vectorize the input_layers input_layers = tf.reshape(input_layers, (-1, input_layers.shape[-1])) - print('layer', layer.name, self.layer_to_filters[layer.name]) - # get the gram matrix input_dim = input_layers.shape[0] G = np.zeros((input_dim, input_dim)) @@ -140,15 +132,13 @@ class YourModel(tf.keras.Model): L_style = 0 for layer in self.indexed_layers: L_style += self.layer_loss(art_layers, input_layers, layer) - print('this is style loss', L_style) return L_style def train_step(self): with tf.GradientTape() as tape: loss = self.loss_fn(self.content_image, self.style_image, self.x) - print(loss) gradients = tape.gradient(loss, self.x) - print(gradients) + print('gradients', gradients) self.optimizer.apply_gradients(zip(gradients, self.x)) |