aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Doan <daviddoan@davids-mbp-3.devices.brown.edu>2022-05-04 18:28:49 -0400
committerDavid Doan <daviddoan@davids-mbp-3.devices.brown.edu>2022-05-04 18:28:49 -0400
commit0027c1d0258a0751322cfcaed7f0a81a1367b977 (patch)
tree807584f45e71d897763e0535bfaece6d78753d6c
parente9443420d1297bf9a0f5d6023e4d1117152877de (diff)
tried testing (unsuccessfully)
-rw-r--r--__pycache__/losses.cpython-38.pycbin4473 -> 4563 bytes
-rw-r--r--losses.py18
2 files changed, 10 insertions, 8 deletions
diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc
index e157ea9c..67d4f227 100644
--- a/__pycache__/losses.cpython-38.pyc
+++ b/__pycache__/losses.cpython-38.pyc
Binary files differ
diff --git a/losses.py b/losses.py
index 297bd2f1..e39d0f8e 100644
--- a/losses.py
+++ b/losses.py
@@ -12,13 +12,13 @@ class YourModel(tf.keras.Model):
def __init__(self, content_image, style_image): #normalize these images to float values
super(YourModel, self).__init__()
- self.content_image = transform.resize(content_image, np.shape(style_image), anti_aliasing=True)
- self.content_image = np.expand_dims(self.content_image, axis=0)
+ self.content_image = transform.resize(content_image, tf.shape(style_image), anti_aliasing=True)
+ self.content_image = tf.expand_dims(self.content_image, axis=0)
#perhaps consider cropping to avoid distortion
- self.style_image = transform.resize(style_image, np.shape(style_image), anti_aliasing=True)
- self.style_image = np.expand_dims(self.style_image, axis=0)
- self.x = tf.Variable(tf.random.uniform(np.shape(content_image)), trainable=True)
+ self.style_image = transform.resize(style_image, tf.shape(style_image), anti_aliasing=True)
+ self.style_image = tf.expand_dims(self.style_image, axis=0)
+ self.x = tf.Variable(tf.random.uniform(tf.shape(content_image)), trainable=True)
self.x = tf.expand_dims(self.x, axis=0)
self.alpha = hp.alpha
self.beta = hp.beta
@@ -85,7 +85,7 @@ class YourModel(tf.keras.Model):
if layer.name in self.desired:
layers = np.append(layers, x)
- return x, layers
+ return x, tf.Variable(layers, dtype=np.float32)
def loss_fn(self, p, a, x):
_, photo_layers = self.call(p)
@@ -98,7 +98,7 @@ class YourModel(tf.keras.Model):
return (self.alpha * content_l) + (self.beta * style_l)
def content_loss(self, photo_layers, input_layers):
- L_content = np.mean(np.square(photo_layers - input_layers))
+ L_content = tf.reduce_mean(tf.square(photo_layers - input_layers))
print('content loss', L_content)
return L_content
@@ -116,13 +116,14 @@ class YourModel(tf.keras.Model):
for j in range(input_dim):
k = np.dot(input_layers[i], art_layers[j])
G[i,j] = k
+ G = tf.Variable(G, dtype=np.float32)
# get the loss per each lateral layer
# N depends on # of filters in the layer, M depends on hight and width of feature map
M_l = art_layers.shape[0] * art_layers.shape[1]
# layer.filters might not work
- E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers))
+ E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * tf.reduce_sum(tf.square(G - input_layers))
# while Sotech is botty:
# Jayson_tatum.tear_acl()
@@ -141,6 +142,7 @@ class YourModel(tf.keras.Model):
with tf.GradientTape() as tape:
loss = self.loss_fn(self.content_image, self.style_image, self.x)
print('loss', loss)
+ print('self.x', self.x)
gradients = tape.gradient(loss, self.x)
print('gradients', gradients)
self.optimizer.apply_gradients(zip(gradients, self.x))