aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--__pycache__/hyperparameters.cpython-38.pycbin343 -> 335 bytes
-rw-r--r--__pycache__/losses.cpython-38.pycbin4236 -> 4495 bytes
-rw-r--r--__pycache__/preprocess.cpython-38.pycbin5048 -> 5040 bytes
-rw-r--r--losses.py25
-rw-r--r--main.py2
5 files changed, 18 insertions, 9 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc
index 9b86a7da..8654cf2c 100644
--- a/__pycache__/hyperparameters.cpython-38.pyc
+++ b/__pycache__/hyperparameters.cpython-38.pyc
Binary files differ
diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc
index ebfd7772..c409cebf 100644
--- a/__pycache__/losses.cpython-38.pyc
+++ b/__pycache__/losses.cpython-38.pyc
Binary files differ
diff --git a/__pycache__/preprocess.cpython-38.pyc b/__pycache__/preprocess.cpython-38.pyc
index e2f42bca..a4fcfb04 100644
--- a/__pycache__/preprocess.cpython-38.pyc
+++ b/__pycache__/preprocess.cpython-38.pyc
Binary files differ
diff --git a/losses.py b/losses.py
index 2cb51e15..bf6f6973 100644
--- a/losses.py
+++ b/losses.py
@@ -68,9 +68,12 @@ class YourModel(tf.keras.Model):
# for layer in self.vgg16:
# layer.trainable = False
- self.indexed_layers = [layer for layer in self.vgg16 if layer.name == "conv1"]
+ self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name]
print(self.indexed_layers)
- self.desired = [layer.name for layer in self.vgg16 if layer.name == "conv1"]
+ self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name]
+
+ # create a map of the layers to their corresponding number of filters if it is a convolutional layer
+ self.layer_to_filters = {layer.name: layer.filters for layer in self.vgg16 if "conv" in layer.name}
def call(self, x):
layers = []
@@ -99,15 +102,18 @@ class YourModel(tf.keras.Model):
return (self.alpha * content_l) + (self.beta * style_l)
def content_loss(self, photo_layers, input_layers):
+ print(photo_layers, input_layers)
L_content = tf.reduce_mean(tf.square(photo_layers - input_layers))
print(L_content)
return L_content
- def layer_loss(art_layers, input_layers, layer):
+ def layer_loss(self, art_layers, input_layers, layer):
+ # vectorize the art_layers
+ art_layers = tf.reshape(art_layers, (-1, art_layers.shape[-1]))
+ # vectorize the input_layers
+ input_layers = tf.reshape(input_layers, (-1, input_layers.shape[-1]))
- #vectorize the inputs
- art_vector = art_layers.reshape(-1, 224**2)
- input_vector = input_layers.reshape(-1, 224**2)
+ print('layer', layer.name, self.layer_to_filters[layer.name])
# get the gram matrix
input_dim = input_layers.shape[0]
@@ -122,18 +128,19 @@ class YourModel(tf.keras.Model):
# N depends on # of filters in the layer, M depends on hight and width of feature map
M_l = art_layers.shape[0] * art_layers.shape[1]
- # layer.filteres might not work
- E_l = 1/4 * (layer.filters**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers))
+ # layer.filters might not work
+ E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers))
# while Sotech is botty:
# Jayson_tatum.tear_acl()
# return ("this is just another day")
+ return E_l
def style_loss(self, art_layers, input_layers):
L_style = 0
for layer in self.indexed_layers:
L_style += self.layer_loss(art_layers, input_layers, layer)
- print('this is style loss',L_style)
+ print('this is style loss', L_style)
return L_style
def train_step(self):
diff --git a/main.py b/main.py
index 063670b8..8363fcef 100644
--- a/main.py
+++ b/main.py
@@ -55,7 +55,9 @@ def main():
print('this is',ARGS.content)
content_image = imread(ARGS.content)
+ content_image = np.resize(content_image, (255, 255, 3))
style_image = imread(ARGS.style)
+ style_image = np.resize(style_image, (255, 255, 3))
my_model = YourModel(content_image=content_image, style_image=style_image)
train(my_model)