learning_rate = 0.03
opt = tf.keras.optimizers.Adam(learning_rate)
loss = tf.keras.losses.categorical_crossentropy
model.compile(optimizer=opt, loss=loss, metrics=["accuracy"])
# custom loss
def custom_loss(y_true, y_pred): #정답, 예측치 순서
return tf.reduce_mean(tf.square(y_true - y_pred))
model.compile(optimizer=opt, loss=custom_loss, metrics=["accuracy"])
#여러 개의 Loss
model.compile(optimizer=opt, loss=[loss, custom_loss], metrics=["accuracy"])
#여러 개의 Loss + loss weights -> 학습 중요도
model.compile(optimizer=opt, loss=[loss, custom_loss], loss_weights=[0.7, 0.3], metrics=["accuracy"])
loss = "categorical_crossentropy" # 이렇게 텍스트로 가능한 함수도 있음.
model.compile(optimizer=opt, loss=loss, metrics=["accuracy"])
acc = tf.keras.metrics.Accuracy()
auc = tf.keras.metrics.AUC()
model.compile(optimizer=opt, loss=loss, metrics=[acc, auc])
# custom metrics
def custom_metric(y_true, y_pred):
true = tf.argmax(y_true, axis=-1)
pred = tf.argmax(y_pred, axis=-1)
return tf.reduce_sum(tf.cast(tf.equal(y_true, y_pred), tf.int32))
model.compile(optimizer=opt, loss=loss, metrics=[custom_metric])
model.compile(optimizer=opt, loss=loss, metrics=["accuracy", custom_metric])
hist = model.fit(train_x,
train_y,
epochs=1,
batch_size=128,
validation_split=0.3,
verbose=1
)
def scheduler(epoch, lr):
if epoch > 10:
return lr * (0.9**(epoch - 10))
else:
return lr
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler)
hist = model.fit(train_x,
train_y,
epochs=1,
batch_size=128,
validation_split=0.3,
verbose=1,
callbacks=[lr_scheduler],
)
tf.keras.callbacks.EarlyStopping #조건만족시 스탑
tf.keras.callbacks.History
Reference
1) 제로베이스 데이터스쿨 강의자료