当我使用批量梯度下降的自定义损失函数时。我在第一个时期后在下面的照片中给予错误。代码运行良好的binaryCrossEntropy。
我收到以下错误:
optimizer.apply_gradients(zip(grads, model_2.trainable_weights))
No gradients provided for any variable: (['dense_22/kernel:0', 'dense_22/bias:0', 'dense_23/kernel:0', 'dense_23/bias:0', 'dense_24/kernel:0', 'dense_24/bias:0'],).
代码:
# importing necessary libraries and functions
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import InputLayer, GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Mean, CategoricalAccuracy
import matplotlib.pyplot as plt
import keras.backend as K
import pandas as pd
import tensorflow_datasets as tfds
from collections import deque
from sklearn.model_selection import train_test_split #train test split
from sklearn.model_selection import StratifiedKFold #Stratifying the data (for test train split)
from sklearn.preprocessing import MinMaxScaler #data normalization with sklearn
import matplotlib.pyplot as plt
import math
lambda_par = tf.Variable(0.5)
def fairnessLoss(y_true,y_pred):
print("HI",y_true,y_pred)
cse_min = cse_maj = tf.Variable(0.0)
n_min = n_maj = tf.Variable(0.0)
print(y_pred.shape[0])
for i in range(y_pred.shape[0]):
print(i)
if(y_true[i][0]==1):
cse_min.assign_add(tf.math.log(y_pred[i][0]))
n_min.assign_add(1.0)
else:
cse_maj.assign_add(tf.math.log(1-y_pred[i][0]))
n_maj.assign_add(1.0)
print("First step")
tem1 = tf.divide(cse_min,n_min)
tem2 = tf.divide(cse_maj,n_maj)
fe = tf.Variable(tem1)
fe.assign_add(-tem2)
fe = tf.math.multiply(fe,fe)
ans = tf.Variable(0.0)
ans.assign_add(cse_min)
ans.assign_add(cse_maj)
ans.assign_add(tf.math.multiply(lambda_par,fe))
return ans
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid), # hidden layer 1, ReLU activation
tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid),
tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid)
])
batch_size=len(train_X)
train_yy = []
for i in range(len(train_y)):
train_yy.append([train_y[i]])
train_dataset = tf.data.Dataset.from_tensor_slices((train_X, train_yy))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# # Prepare the validation dataset.
# val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
# val_dataset = val_dataset.batch(batch_size)
train_acc_metric = keras.metrics.BinaryAccuracy()
val_acc_metric = keras.metrics.BinaryAccuracy()
epochs = 500
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam()
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
# storing variables to plot loss and accuracy
losses = []
accuracy = []
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
epoch_loss_avg = Mean()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):[
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = fairnessLoss(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
epoch_loss_avg.update_state(loss_value)
train_acc_metric.update_state(y_batch_train, logits)
losses.append(epoch_loss_avg.result())
accuracy.append(train_acc_metric.result())
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * batch_size))
print(train_acc_metric.result())
train_acc_metric.reset_states()
1条答案
按热度按时间zzwlnbp81#
加载 和 优化 器 在 统计 数据 中 是 双重 并行 的 , 加速 以 听到 优化 器 或 改变 它们 的 速率 以 看到 真实 情况 。
示例 : 梯度 带 当 应用 值 到 tf . 变量 , 损失 函数 是 什么 变化 或 应用 , 和 测量 从 你 提供 的 逻辑 , 但 优化 器 是 你 如何 实现 它 或 设置 为 目标 。
数据 集 : 图像 类别 问题 、 图像 和 类别 标签 。
中 的 每 一 个
代码 : 仅 用于 测试 胶带 和 梯度
格式
结果 : 损失 是 变化 少 , 这 是 因为 自 定义 优化 器 是 简单 的 算法 。
格式