From 423c72c1eb93ea7fc6aaeba29e1c9f5e376b9fe6 Mon Sep 17 00:00:00 2001 From: David Doan Date: Sat, 7 May 2022 17:41:40 -0400 Subject: testing --- __pycache__/losses.cpython-38.pyc | Bin 4664 -> 4620 bytes losses.py | 8 ++++---- main.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/__pycache__/losses.cpython-38.pyc b/__pycache__/losses.cpython-38.pyc index d25c70d0..1f2c9bf4 100644 Binary files a/__pycache__/losses.cpython-38.pyc and b/__pycache__/losses.cpython-38.pyc differ diff --git a/losses.py b/losses.py index 51f387a4..99b1eded 100644 --- a/losses.py +++ b/losses.py @@ -11,16 +11,16 @@ class YourModel(tf.keras.Model): def __init__(self, content_image, style_image): #normalize these images to float values super(YourModel, self).__init__() - self.content_image = transform.resize(content_image, tf.shape(style_image), anti_aliasing=True, preserve_range=True).astype('uint8') + self.content_image = transform.resize(content_image, tf.shape(style_image), anti_aliasing=True) self.content_image = tf.expand_dims(self.content_image, axis=0) print(self.content_image) #perhaps consider cropping to avoid distortion - self.style_image = transform.resize(style_image, tf.shape(style_image), anti_aliasing=True, preserve_range=True).astype('uint8') + self.style_image = transform.resize(style_image, tf.shape(style_image), anti_aliasing=True) self.style_image = tf.expand_dims(self.style_image, axis=0) #self.x = tf.Variable(initial_value = self.content_image.numpy().astype(np.float32), trainable=True) self.x = tf.Variable(initial_value = np.random.rand(self.content_image.shape[0], - self.content_image.shape[1], self.content_image.shape[2], self.content_image.shape[3]).astype('uint8'), trainable=True) + self.content_image.shape[1], self.content_image.shape[2], self.content_image.shape[3]).astype(np.float32), trainable=True) self.alpha = hp.alpha self.beta = hp.beta @@ -117,7 +117,7 @@ class YourModel(tf.keras.Model): return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): - L_content = tf.constant(0.0).astype('uint8') + L_content = tf.constant(0.0) for i in range(len(photo_layers)): pl = photo_layers[i] il = input_layers[i] diff --git a/main.py b/main.py index 2d8c216a..605ddee7 100644 --- a/main.py +++ b/main.py @@ -54,7 +54,7 @@ def main(): content_image = imread(ARGS.content) style_image = imread(ARGS.style) - style_image = transform.resize(style_image, content_image.shape).astype('uint8') + style_image = transform.resize(style_image, content_image.shape) my_model = YourModel(content_image=content_image, style_image=style_image) my_model.vgg16.build([1, 255, 255, 3]) my_model.vgg16.load_weights('vgg16_imagenet.h5', by_name=True) @@ -62,7 +62,7 @@ def main(): final_image = tf.squeeze(my_model.x) - plt.imshow(final_image) + plt.imshow(final_image).astype('uint8') imsave(ARGS.savefile, final_image) -- cgit v1.2.3-70-g09d2