> 文章列表 > 自定义模型训练

自定义模型训练

自定义模型训练

文章目录

  • 自定义模型训练
    • 1. Define the Model
    • 2. Define Optimizer and Loss
    • 3. Evaluate Untrained Model
    • 4. Define Metrics
    • 5. Apply Gradients
    • 6. Train Loop
    • 7. Validation Loop
    • 8. Main loop

自定义模型训练

  在一些场景下,使用tensorflow默认的训练过程无法满足我们的需求,故需要自定义模型训练,以下是自定义模型及其训练的全过程。

1. Define the Model

def base_model():inputs = tf.keras.layers.Input(shape=(len(train.columns)))x = tf.keras.layers.Dense(128, activation='relu')(inputs)x = tf.keras.layers.Dense(64, activation='relu')(x)outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)model = tf.keras.Model(inputs=inputs, outputs=outputs)return modelmodel = base_model()

2. Define Optimizer and Loss

optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001)
loss_object = tf.keras.losses.BinaryCrossentropy()

3. Evaluate Untrained Model

outputs = model(norm_test_X.values)
loss_value = loss_object(y_true=test_Y.values, y_pred=outputs)
print("Loss before training %.4f" % loss_value.numpy())

4. Define Metrics

class F1Score(tf.keras.metrics.Metric):def __init__(self, name='f1_score', **kwargs):super(F1Score, self).__init__(name=name, **kwargs)self.tp = tf.Variable(0, dtype = 'int32')self.fp = tf.Variable(0, dtype = 'int32')self.tn = tf.Variable(0, dtype = 'int32')self.fn = tf.Variable(0, dtype = 'int32')def update_state(self, y_true, y_pred, sample_weight=None):conf_matrix = tf.math.confusion_matrix(y_true, y_pred, num_classes=2)self.tn.assign_add(conf_matrix[0][0])self.tp.assign_add(conf_matrix[1][1])self.fp.assign_add(conf_matrix[0][1])self.fn.assign_add(conf_matrix[1][0])def result(self):if (self.tp + self.fp == 0):precision = 1.0else:precision = self.tp / (self.tp + self.fp)if (self.tp + self.fn == 0):recall = 1.0else:recall = self.tp / (self.tp + self.fn)# 返回 F1 Scoref1_score = 2 * ((precision * recall)/(precision + recall))return f1_scoredef reset_states(self):self.tp.assign(0)self.tn.assign(0) self.fp.assign(0)self.fn.assign(0)train_f1score_metric = F1Score()
val_f1score_metric = F1Score()train_acc_metric = tf.keras.metrics.BinaryAccuracy()
val_acc_metric = tf.keras.metrics.BinaryAccuracy()

5. Apply Gradients

def apply_gradient(optimizer, loss_object, model, x, y):with tf.GradientTape() as tape:logits = model(x)loss_value = loss_object(y_true=y, y_pred=logits)gradients = tape.gradient(loss_value, model.trainable_weights)optimizer.apply_gradients(zip(gradients, model.trainable_weights))return logits, loss_value

6. Train Loop

def train_data_for_one_epoch(train_dataset, optimizer, loss_object, model, train_acc_metric, train_f1score_metric, verbose=True):losses = []# 遍历所有训练数据for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):# 模型计算的预测值logits, loss_value = apply_gradient(optimizer, loss_object, model, x_batch_train, y_batch_train)losses.append(loss_value)# 格式化logits = tf.round(logits)logits = tf.cast(logits, 'int64')# 更新training metricstrain_acc_metric.update_state(y_batch_train, logits)train_f1score_metric.update_state(y_batch_train, logits)# 更新进度if verbose:print("Training loss for step %s: %.4f" % (int(step), float(loss_value)))return losses

7. Validation Loop

def perform_validation():losses = []# 遍历所有validation data.for x_val, y_val in test_dataset:# 计算validation的预测值及lossval_logits = model(x_val) val_loss = loss_object(y_true=y_val, y_pred=val_logits)losses.append(val_loss)# 格式化val_logits = tf.cast(tf.round(model(x_val)), 'int64')# 更新validation metricsval_acc_metric.update_state(y_val, val_logits)val_f1score_metric.update_state(y_val, val_logits)return losses

8. Main loop

# 循环epochs次
epochs = 5
epochs_val_losses, epochs_train_losses = [], []for epoch in range(epochs):print('Start of epoch %d' % (epoch,))# 执行Train Looplosses_train = train_data_for_one_epoch(train_dataset, optimizer, loss_object, model, train_acc_metric, train_f1score_metric)# 获取训练结果train_acc = train_acc_metric.result()train_f1score = train_f1score_metric.result()# 执行Validation Looplosses_val = perform_validation()# 获取Validation结果val_acc = val_acc_metric.result()val_f1score = val_f1score_metric.result()# 计算train和validation的losslosses_train_mean = np.mean(losses_train)losses_val_mean = np.mean(losses_val)epochs_val_losses.append(losses_val_mean)epochs_train_losses.append(losses_train_mean)print('\\n Epcoh %s: Train loss: %.4f  Validation Loss: %.4f, Train Accuracy: %.4f, Validation Accuracy %.4f, Train F1 Score: %.4f, Validation F1 Score: %.4f' % (epoch, float(losses_train_mean), float(losses_val_mean), float(train_acc), float(val_acc), train_f1score, val_f1score))# 重置metricstrain_acc_metric.reset_states()val_acc_metric.reset_states()val_f1score_metric.reset_states()train_f1score_metric.reset_states()