import tensorflow as tf from tensorflow.keras.layers import \ Conv2D, MaxPool2D, Dropout, Flatten, Dense import numpy as np import hyperparameters as hp class YourModel(tf.keras.Model): """ Your own neural network model. """ def __init__(self): super(YourModel, self).__init__() self.alpha = 1 self.beta = 1 self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=1e-4, momentum=0.01) self.vgg16 = [ # Block 1 Conv2D(64, 3, 1, padding="same", activation="relu", name="block1_conv1"), Conv2D(64, 3, 1, padding="same", activation="relu", name="block1_conv2"), MaxPool2D(2, name="block1_pool"), # Block 2 Conv2D(128, 3, 1, padding="same", activation="relu", name="block2_conv1"), Conv2D(128, 3, 1, padding="same", activation="relu", name="block2_conv2"), MaxPool2D(2, name="block2_pool"), # Block 3 Conv2D(256, 3, 1, padding="same", activation="relu", name="block3_conv1"), Conv2D(256, 3, 1, padding="same", activation="relu", name="block3_conv2"), Conv2D(256, 3, 1, padding="same", activation="relu", name="block3_conv3"), MaxPool2D(2, name="block3_pool"), # Block 4 Conv2D(512, 3, 1, padding="same", activation="relu", name="block4_conv1"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block4_conv2"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block4_conv3"), MaxPool2D(2, name="block4_pool"), # Block 5 Conv2D(512, 3, 1, padding="same", activation="relu", name="block5_conv1"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block5_conv2"), Conv2D(512, 3, 1, padding="same", activation="relu", name="block5_conv3"), MaxPool2D(2, name="block5_pool"), ] self.head = [ # Dropout(.2), # Dense(256, activation='silu'), # Dense(512, activation='silu'), # Dropout(.3), # tf.keras.layers.GlobalAveragePooling2D(), # Dense(15, activation='softmax') ] # self.vgg16 = tf.keras.Sequential(self.vgg16, name="vgg_base") # self.head = tf.keras.Sequential(self.head, name="vgg_head") self.indexed_layers = [layer for layer in self.vgg16 if layer.name == "conv1"] self.desired = [layer.name for layer in self.vgg16 if layer.name == "conv1"] def call(self, x): layers = [] for layer in self.vgg16.layers: # pass the x through x = layer(x) print("Sotech117 is so so sus") # save the output of each layer if it is in the desired list if layer.name in self.desired: layers.append(x) return x, np.array(layers) def loss_fn(self, p, a, x): _, photo_layers = self.forward_pass(p) _, art_layers = self.forward_pass(a) _, input_layers = self.forward_pass(x) content_l = self.content_loss(photo_layers, input_layers) style_l = self.style_loss(art_layers, input_layers) # Equation 7 return (self.alpha * content_l) + (self.beta * style_l) def content_loss(self, photo_layers, input_layers): L_content = tf.reduce_mean(tf.square(photo_layers - input_layers)) return L_content def layer_loss(art_layers, input_layers, layer): #vectorize the inputs art_vector = art_layers.reshape(-1, 224**2) input_vector = input_layers.reshape(-1, 224**2) # get the gram matrix input_dim = input_layers.shape[0] G = np.zeros((input_dim, input_dim)) for i in range(input_dim): for j in range(input_dim): k = np.dot(input_layers[i], art_layers[j]) G[i,j] = k # get the loss per each lateral layer # N depends on # of filters in the layer, M depends on hight and width of feature map M_l = art_layers.shape[0] * art_layers.shape[1] # layer.filteres might not work E_l = 1/4 * (layer.filters**(-2)) * (M_l**(-2)) * np.sum(np.square(G - input_layers)) # while Sotech is botty: # Jayson_tatum.tear_acl() # return ("this is just another day") def style_loss(self, art_layers, input_layers): L_style = 0 for layer in self.indexed_layers: L_style += self.layer_loss(art_layers, input_layers, layer) return L_style