aboutsummaryrefslogtreecommitdiff
path: root/model.py
diff options
context:
space:
mode:
Diffstat (limited to 'model.py')
-rw-r--r--model.py9
1 files changed, 5 insertions, 4 deletions
diff --git a/model.py b/model.py
index edf2da44..b83fb55f 100644
--- a/model.py
+++ b/model.py
@@ -11,7 +11,7 @@ class YourModel(tf.keras.Model):
super(YourModel, self).__init__()
# --------------------------------------------------------------------------------------------------------------
- # PART 1 : preprocess/init the CONTENT, STYLE, and CREATION IMAGES #
+ # PART 1 : preprocess/init the CONTENT, STYLE, and CREATION IMAGES
# --------------------------------------------------------------------------------------------------------------
# 1) resize the content and style images to be the same size
self.content_image = transform.resize(content_image, tf.shape(style_image), anti_aliasing=True,
@@ -28,7 +28,7 @@ class YourModel(tf.keras.Model):
self.x = tf.Variable([image])
# --------------------------------------------------------------------------------------------------------------
- # PART 2 : load and configure vgg_16 network use (without classification head) #
+ # PART 2 : load and configure vgg_16 network use (without classification head)
# --------------------------------------------------------------------------------------------------------------
# 1) load the pretrained vgg_16 network
self.vgg16 = tf.keras.applications.VGG16(include_top=False, weights='vgg16_imagenet.h5')
@@ -57,7 +57,7 @@ class YourModel(tf.keras.Model):
self.vgg16 = tf.keras.Model([self.vgg16.input], [p_output, G])
# --------------------------------------------------------------------------------------------------------------
- # PART 3 : assign our optimizers, loss weights, and loss/style targets #
+ # PART 3 : assign our optimizers, loss weights, and loss/style targets
# --------------------------------------------------------------------------------------------------------------
# 1) use the adam optimizer with hyperparameters defined in the hyperparamters.py
self.optimizer = tf.keras.optimizers.Adam(learning_rate=hp.learning_rate, beta_1=hp.beta_1, epsilon=hp.epsilon)
@@ -102,7 +102,7 @@ class YourModel(tf.keras.Model):
gradients = tape.gradient(loss, self.x)
# print the progress of the training and loss
- print('\rEpoch {}: Loss: {:.4f}'.format(epoch, loss), end='')
+ print('\rEpoch {}: Loss: {:.4f}'.format(epoch + 1, loss), end='')
# update the optimizer based on the gradients
self.optimizer.apply_gradients([(gradients, self.x)])
@@ -130,6 +130,7 @@ class YourModel(tf.keras.Model):
@staticmethod
def __get_gram(style_output):
style_shape = tf.shape(style_output)
+ # implement the gram matrix using the product of the indices of the equation
output = tf.linalg.einsum('bijc,bijd->bcd', style_output, style_output)
dimensions = style_shape[1] * style_shape[2]
dimensions = tf.cast(dimensions, tf.float32)