diff options
-rw-r--r-- | __pycache__/hyperparameters.cpython-38.pyc | bin | 440 -> 343 bytes | |||
-rw-r--r-- | __pycache__/losses.cpython-38.pyc | bin | 3369 -> 3998 bytes | |||
-rw-r--r-- | losses.py | 16 | ||||
-rw-r--r-- | main.py | 3 |
4 files changed, 10 insertions, 9 deletions
diff --git a/__pycache__/hyperparameters.cpython-38.pyc b/__pycache__/hyperparameters.cpython-38.pyc Binary files differindex 637c2796..9b86a7da 100644 --- a/__pycache__/hyperparameters.cpython-38.pyc +++ b/__pycache__/hyperparameters.cpython-38.pyc diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc Binary files differindex 398e9cb3..9d57ce5b 100644 --- a/__pycache__/losses.cpython-38.pyc +++ b/__pycache__/losses.cpython-38.pyc @@ -1,6 +1,6 @@ import tensorflow as tf from tensorflow.keras.layers import \ - Conv2D, MaxPool2D, Dropout, Flatten, Dense, AveragePool2D + Conv2D, MaxPool2D, Dropout, Flatten, Dense, AveragePooling2D import numpy as np @@ -28,13 +28,13 @@ class YourModel(tf.keras.Model): activation="relu", name="block1_conv1"), Conv2D(64, 3, 1, padding="same", activation="relu", name="block1_conv2"), - AveragePool2D(2, name="block1_pool"), + AveragePooling2D(2, name="block1_pool"), # Block 2 Conv2D(128, 3, 1, padding="same", activation="relu", name="block2_conv1"), Conv2D(128, 3, 1, padding="same", activation="relu", name="block2_conv2"), - AveragePool2D(2, name="block2_pool"), + AveragePooling2D(2, name="block2_pool"), # Block 3 Conv2D(256, 3, 1, padding="same", activation="relu", name="block3_conv1"), @@ -42,7 +42,7 @@ class YourModel(tf.keras.Model): activation="relu", name="block3_conv2"), Conv2D(256, 3, 1, padding="same", activation="relu", name="block3_conv3"), - AveragePool2D(2, name="block3_pool"), + AveragePooling2D(2, name="block3_pool"), # Block 4 Conv2D(512, 3, 1, padding="same", activation="relu", name="block4_conv1"), @@ -50,7 +50,7 @@ class YourModel(tf.keras.Model): activation="relu", name="block4_conv2"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block4_conv3"), - AveragePool2D(2, name="block4_pool"), + AveragePooling2D(2, name="block4_pool"), # Block 5 Conv2D(512, 3, 1, padding="same", activation="relu", name="block5_conv1"), @@ -58,7 +58,7 @@ class YourModel(tf.keras.Model): activation="relu", name="block5_conv2"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block5_conv3"), - AveragePool2D(2, name="block5_pool"), + AveragePooling2D(2, name="block5_pool"), ] for layer in self.vgg16: layer.trainable = False @@ -71,7 +71,7 @@ class YourModel(tf.keras.Model): for layer in self.vgg16.layers: # pass the x through x = layer(x) - print("Sotech117 is so so sus") + # print("Sotech117 is so so sus") # save the output of each layer if it is in the desired list if layer.name in self.desired: @@ -126,7 +126,7 @@ class YourModel(tf.keras.Model): return L_style def train_step(self): - with tf.GradientTape as tape: + with tf.GradientTape() as tape: loss = self.loss_fn(self.content_image, self.style_image, self.x) gradients = tape.gradient(loss, self.x) self.optimizer.apply_gradients(zip(gradients, self.x)) @@ -42,7 +42,7 @@ def parse_args(): return parser.parse_args() def train(model): - for _ in hp.num_epochs: + for _ in range(hp.num_epochs): model.train_step() def main(): @@ -52,6 +52,7 @@ def main(): if os.path.exists(ARGS.style): ARGS.style = os.path.abspath(ARGS.style) os.chdir(sys.path[0]) + print('this is',ARGS.content) content_image = imread(ARGS.content) style_image = imread(ARGS.style) |