diff options
author | David Doan <daviddoan@davids-mbp-3.devices.brown.edu> | 2022-05-04 18:28:49 -0400 |
---|---|---|
committer | David Doan <daviddoan@davids-mbp-3.devices.brown.edu> | 2022-05-04 18:28:49 -0400 |
commit | 0027c1d0258a0751322cfcaed7f0a81a1367b977 (patch) | |
tree | 807584f45e71d897763e0535bfaece6d78753d6c /losses.py | |
parent | e9443420d1297bf9a0f5d6023e4d1117152877de (diff) |
tried testing (unsuccessfully)
Diffstat (limited to 'losses.py')
-rw-r--r-- | losses.py | 18 |
1 files changed, 10 insertions, 8 deletions
@@ -12,13 +12,13 @@ class YourModel(tf.keras.Model): def __init__(self, content_image, style_image): #normalize these images to float values super(YourModel, self).__init__() - self.content_image = transform.resize(content_image, np.shape(style_image), anti_aliasing=True) - self.content_image = np.expand_dims(self.content_image, axis=0) + self.content_image = transform.resize(content_image, tf.shape(style_image), anti_aliasing=True) + self.content_image = tf.expand_dims(self.content_image, axis=0) #perhaps consider cropping to avoid distortion - self.style_image = transform.resize(style_image, np.shape(style_image), anti_aliasing=True) - self.style_image = np.expand_dims(self.style_image, axis=0) - self.x = tf.Variable(tf.random.uniform(np.shape(content_image)), trainable=True) + self.style_image = transform.resize(style_image, tf.shape(style_image), anti_aliasing=True) + self.style_image = tf.expand_dims(self.style_image, axis=0) + self.x = tf.Variable(tf.random.uniform(tf.shape(content_image)), trainable=True) self.x = tf.expand_dims(self.x, axis=0) self.alpha = hp.alpha self.beta = hp.beta @@ -85,7 +85,7 @@ class YourModel(tf.keras.Model): if layer.name in self.desired: layers = np.append(layers, x) - return x, layers + return x, tf.Variable(layers, dtype=np.float32) def loss_fn(self, p, a, x): _, photo_layers = self.call(p) @@ -98,7 +98,7 @@ class YourModel(tf.keras.Model): return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): - L_content = np.mean(np.square(photo_layers - input_layers)) + L_content = tf.reduce_mean(tf.square(photo_layers - input_layers)) print('content loss', L_content) return L_content @@ -116,13 +116,14 @@ class YourModel(tf.keras.Model): for j in range(input_dim): k = np.dot(input_layers[i], art_layers[j]) G[i,j] = k + G = tf.Variable(G, dtype=np.float32) # get the loss per each lateral layer # N depends on # of filters in the layer, M depends on hight and width of feature map M_l = art_layers.shape[0] * art_layers.shape[1] # layer.filters might not work - E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) + E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * tf.reduce_sum(tf.square(G - input_layers)) # while Sotech is botty: # Jayson_tatum.tear_acl() @@ -141,6 +142,7 @@ class YourModel(tf.keras.Model): with tf.GradientTape() as tape: loss = self.loss_fn(self.content_image, self.style_image, self.x) print('loss', loss) + print('self.x', self.x) gradients = tape.gradient(loss, self.x) print('gradients', gradients) self.optimizer.apply_gradients(zip(gradients, self.x)) |