# custom loss function with extra arguments classLocLoss(keras.losses.Loss): def__init__(self, kg_raw, model_size, nobs, batch_size, name="custom_loss"): super().__init__(name=name) # these are some extra arguments: self.kg_raw = kg_raw self.model_size = model_size self.nobs = nobs self.batch_size = batch_size
defcall(self, kg_true, loc_f): k = tf.constant([1, 1, self.nobs, 1], tf.int32) kg_pred = tf.math.multiply(self.kg_raw, tf.tile(tf.reshape(loc_f, [self.batch_size,self.model_size,1,1]), k)) mse = tf.math.reduce_mean(tf.square(kg_true - kg_pred)) return mse # create a model # specify model structure... model = keras.Model(inputs=inputs, outputs=outputs)
# compile the model model.compile( optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss=LocLoss(inputs,model_size,nobs,batch_size), # model inputs as extra args experimental_run_tf_function=False, # neccecarry ! )
# train the model history = model.fit(train_dataset, epochs=10, validation_data=test_dataset)
在compile model时将inputs作为输入参数,就实现了含其他参数的损失函数的自定义。这里会碰到一个问题,如果不设置experimental_run_tf_function=False,编译会报错(Check this for more information)
1
tensorflow.python.eager.core._SymbolicException: Inputs to eager execution function cannot be Keras symbolic tensors, but found [<tf.Tensor '...' shape=(...) dtype=float32>]
# custom metrics classGradientNorm(tf.keras.metrics.Metric): def__init__(self, name='gradient_norm', **kwargs): super().__init__(name=name, **kwargs) self.gd_norm = self.add_weight(name='gdnorm', initializer='zeros') # variable must be assigned by add_weight method
defupdate_state(self, y, gradients, sample_weight=None): norm = tf.norm(gradients, ord='euclidean') / tf.cast(tf.size(gradients, out_type=tf.int32), tf.float32) self.gd_norm.assign(norm) # variable must be updated by add_weight method
defresult(self): return self.gd_norm
defreset_states(self): self.gd_norm.assign(0) # custom what happened during training by overriding train_step() classCustomModel(keras.Model): deftrain_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. x, y = data
with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Return a dict mapping metric names to current value return_metrics = {m.name: m.result() for m in self.metrics} return return_metrics
# create a model # specify model structure... model = CustomModel(inputs=inputs, outputs=outputs)
# compile the model model.compile( optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss=LocLoss(inputs,model_size,nobs,batch_size), # model inputs as extra args experimental_run_tf_function=False, # neccecarry ! metrics=[GradientNorm()], # custom metrics )
gd_norm = GradientNorm() # PSNR is another custom metric, which takes (y_true, y_pred) as input psnr_metric = PSNR() loss_tracker = keras.metrics.Mean(name="loss")
classCustomModel(keras.Model): deftrain_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. x, y = data
with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value loss = keras.losses.mean_squared_error(y, y_pred)
# Update metrics (includes the metric that tracks the loss) gd_norm.update_state(y, gradients) psnr_metric.update_state(y, y_pred) loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value return_metrics = {m.name: m.result() for m in self.metrics} return return_metrics
@property defmetrics(self): # We list our `Metric` objects here so that `reset_states()` can be # called automatically at the start of each epoch # or at the start of `evaluate()`. # If you don't implement this property, you have to call # `reset_states()` yourself at the time of your choosing. return [loss_tracker, gd_norm, psnr_metric]
不过这种办法只在tensorflow 2.2以上版本支持,否则会报错(check this for more info)
1
ValueError: The model cannot be compiled because it has no loss to optimize.
defrotate(self, matrix, shifts): """"here are some codes""" defcall(self, inputs): kg_f = tf.squeeze(inputs[0]) # multiple inputs as a list loc_f = tf.squeeze(inputs[1]) # multiple inputs as a list
defget_config(self): config = {'obs_dens': self.obs_dens, 'model_size': self.model_size, 'nobs': self.nobs, 'batch_size': self.batch_size} base_config = super(Loc_by_1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) # the last layer of my keras model outputs = Loc_by_1D(obs_dens=obs_dens, model_size=model_size, nobs=nobs, batch_size=kwargs['batch_size'])([inputs, x5]) # here the inputs is a list [inputs, x5]