diff options
author | Michael Foiani <sotech117@michaels-mbp-3.devices.brown.edu> | 2022-05-04 17:37:55 -0400 |
---|---|---|
committer | Michael Foiani <sotech117@michaels-mbp-3.devices.brown.edu> | 2022-05-04 17:37:55 -0400 |
commit | b83cd8eba837348bc194db8c1dd12e369602e4ac (patch) | |
tree | 843664e430f3f5d5f32d1e34de5a034ce5c934ae | |
parent | 8f9fbd734209dd21a7f9a070200ebd31de207a17 (diff) |
got it to run, but it hangs
-rw-r--r-- | __pycache__/hyperparameters.cpython-38.pyc | bin | 343 -> 335 bytes | |||
-rw-r--r-- | __pycache__/losses.cpython-38.pyc | bin | 4236 -> 4495 bytes | |||
-rw-r--r-- | __pycache__/preprocess.cpython-38.pyc | bin | 5048 -> 5040 bytes | |||
-rw-r--r-- | losses.py | 25 | ||||
-rw-r--r-- | main.py | 2 |
5 files changed, 18 insertions, 9 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc Binary files differindex 9b86a7da..8654cf2c 100644 --- a/__pycache__/hyperparameters.cpython-38.pyc +++ b/__pycache__/hyperparameters.cpython-38.pyc diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc Binary files differindex ebfd7772..c409cebf 100644 --- a/__pycache__/losses.cpython-38.pyc +++ b/__pycache__/losses.cpython-38.pyc diff --git a/__pycache__/preprocess.cpython-38.pyc b/__pycache__/preprocess.cpython-38.pyc Binary files differindex e2f42bca..a4fcfb04 100644 --- a/__pycache__/preprocess.cpython-38.pyc +++ b/__pycache__/preprocess.cpython-38.pyc @@ -68,9 +68,12 @@ class YourModel(tf.keras.Model): # for layer in self.vgg16: # layer.trainable = False - self.indexed_layers = [layer for layer in self.vgg16 if layer.name == "conv1"] + self.indexed_layers = [layer for layer in self.vgg16 if "conv1" in layer.name] print(self.indexed_layers) - self.desired = [layer.name for layer in self.vgg16 if layer.name == "conv1"] + self.desired = [layer.name for layer in self.vgg16 if "conv1" in layer.name] + + # create a map of the layers to their corresponding number of filters if it is a convolutional layer + self.layer_to_filters = {layer.name: layer.filters for layer in self.vgg16 if "conv" in layer.name} def call(self, x): layers = [] @@ -99,15 +102,18 @@ class YourModel(tf.keras.Model): return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): + print(photo_layers, input_layers) L_content = tf.reduce_mean(tf.square(photo_layers - input_layers)) print(L_content) return L_content - def layer_loss(art_layers, input_layers, layer): + def layer_loss(self, art_layers, input_layers, layer): + # vectorize the art_layers + art_layers = tf.reshape(art_layers, (-1, art_layers.shape[-1])) + # vectorize the input_layers + input_layers = tf.reshape(input_layers, (-1, input_layers.shape[-1])) - #vectorize the inputs - art_vector = art_layers.reshape(-1, 224**2) - input_vector = input_layers.reshape(-1, 224**2) + print('layer', layer.name, self.layer_to_filters[layer.name]) # get the gram matrix input_dim = input_layers.shape[0] @@ -122,18 +128,19 @@ class YourModel(tf.keras.Model): # N depends on # of filters in the layer, M depends on hight and width of feature map M_l = art_layers.shape[0] * art_layers.shape[1] - # layer.filteres might not work - E_l = 1/4 * (layer.filters**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) + # layer.filters might not work + E_l = 1/4 * (self.layer_to_filters[layer.name]**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) # while Sotech is botty: # Jayson_tatum.tear_acl() # return ("this is just another day") + return E_l def style_loss(self, art_layers, input_layers): L_style = 0 for layer in self.indexed_layers: L_style += self.layer_loss(art_layers, input_layers, layer) - print('this is style loss',L_style) + print('this is style loss', L_style) return L_style def train_step(self): @@ -55,7 +55,9 @@ def main(): print('this is',ARGS.content) content_image = imread(ARGS.content) + content_image = np.resize(content_image, (255, 255, 3)) style_image = imread(ARGS.style) + style_image = np.resize(style_image, (255, 255, 3)) my_model = YourModel(content_image=content_image, style_image=style_image) train(my_model) |