tensorflow 错误:使用自定义损失时“未为任何变量提供梯度”

e5njpo68  于 2022-11-16  发布在  其他
关注(0)|答案(1)|浏览(157)

当我使用批量梯度下降的自定义损失函数时。我在第一个时期后在下面的照片中给予错误。代码运行良好的binaryCrossEntropy。
我收到以下错误:

optimizer.apply_gradients(zip(grads, model_2.trainable_weights))

No gradients provided for any variable: (['dense_22/kernel:0', 'dense_22/bias:0', 'dense_23/kernel:0', 'dense_23/bias:0', 'dense_24/kernel:0', 'dense_24/bias:0'],).

代码:

# importing necessary libraries and functions
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import InputLayer, GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Mean, CategoricalAccuracy
import matplotlib.pyplot as plt
import keras.backend as K
import pandas as pd
import tensorflow_datasets as tfds
from collections import deque
from sklearn.model_selection import train_test_split       #train test split
from sklearn.model_selection import StratifiedKFold        #Stratifying the data (for test train split)
from sklearn.preprocessing import MinMaxScaler             #data normalization with sklearn
import matplotlib.pyplot as plt

import math

lambda_par = tf.Variable(0.5)

def fairnessLoss(y_true,y_pred):
  print("HI",y_true,y_pred)
  cse_min = cse_maj = tf.Variable(0.0)
  n_min = n_maj = tf.Variable(0.0)
  print(y_pred.shape[0])
  for i in range(y_pred.shape[0]):
    print(i)
    if(y_true[i][0]==1):
      cse_min.assign_add(tf.math.log(y_pred[i][0]))
      n_min.assign_add(1.0)
    else:
      cse_maj.assign_add(tf.math.log(1-y_pred[i][0]))
      n_maj.assign_add(1.0)
  print("First step")
  tem1 = tf.divide(cse_min,n_min)
  tem2 = tf.divide(cse_maj,n_maj)
  fe = tf.Variable(tem1)
  fe.assign_add(-tem2)
  fe = tf.math.multiply(fe,fe)
  ans = tf.Variable(0.0)
  ans.assign_add(cse_min)
  ans.assign_add(cse_maj)
  ans.assign_add(tf.math.multiply(lambda_par,fe))
  return ans

model = tf.keras.Sequential([
  tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid), # hidden layer 1, ReLU activation
  tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid),
  tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid)
])

batch_size=len(train_X)
train_yy = []
for i in range(len(train_y)):
  train_yy.append([train_y[i]])
train_dataset = tf.data.Dataset.from_tensor_slices((train_X, train_yy))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)

# # Prepare the validation dataset.
# val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
# val_dataset = val_dataset.batch(batch_size)

train_acc_metric = keras.metrics.BinaryAccuracy()
val_acc_metric = keras.metrics.BinaryAccuracy()

epochs = 500
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam()
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
# storing variables to plot loss and accuracy
losses = []
accuracy = []
for epoch in range(epochs):
    print("\nStart of epoch %d" % (epoch,))
    epoch_loss_avg = Mean()

    # Iterate over the batches of the dataset.
    for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):[
        # Open a GradientTape to record the operations run
        # during the forward pass, which enables auto-differentiation.
        with tf.GradientTape() as tape:

            # Run the forward pass of the layer.
            # The operations that the layer applies
            # to its inputs are going to be recorded
            # on the GradientTape.
            logits = model(x_batch_train, training=True)  # Logits for this minibatch

            # Compute the loss value for this minibatch.
            loss_value = fairnessLoss(y_batch_train, logits)

        # Use the gradient tape to automatically retrieve
        # the gradients of the trainable variables with respect to the loss.
        grads = tape.gradient(loss_value, model.trainable_weights)

        # Run one step of gradient descent by updating
        # the value of the variables to minimize the loss.
        optimizer.apply_gradients(zip(grads, model.trainable_weights))

        epoch_loss_avg.update_state(loss_value)

        train_acc_metric.update_state(y_batch_train, logits)

        losses.append(epoch_loss_avg.result())
        accuracy.append(train_acc_metric.result())

        # Log every 200 batches.
        if step % 200 == 0:
            print(
                "Training loss (for one batch) at step %d: %.4f"
                % (step, float(loss_value))
            )
            print("Seen so far: %s samples" % ((step + 1) * batch_size))
    print(train_acc_metric.result())
    
    train_acc_metric.reset_states()

Photo of the error-1
Photo of the error-2

zzwlnbp8

zzwlnbp81#

加载 和 优化 器 在 统计 数据 中 是 双重 并行 的 , 加速 以 听到 优化 器 或 改变 它们 的 速率 以 看到 真实 情况 。
示例 : 梯度 带 当 应用 值 到 tf . 变量 , 损失 函数 是 什么 变化 或 应用 , 和 测量 从 你 提供 的 逻辑 , 但 优化 器 是 你 如何 实现 它 或 设置 为 目标 。
数据 集 : 图像 类别 问题 、 图像 和 类别 标签 。

Index    Image                                             Label

1   F:\datasets\downloads\Actors\train\Candidt Kibt\01.tif  0
2   F:\datasets\downloads\Actors\train\Candidt Kibt\02.tif  0
19  F:\datasets\downloads\Actors\train\Pikaploy\01.tif      1

中 的 每 一 个
代码 : 仅 用于 测试 胶带 和 梯度

import os
from os.path import exists

import tensorflow as tf
import pandas as pd

import matplotlib.pyplot as plt

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
num_iter = 1000
train_generator_batch_size = 1
batch_size = 1
WIDTH = 256
HEIGHT = 256
CHANNEL = 3

checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)

if not exists(checkpoint_dir) : 
    os.mkdir(checkpoint_dir)
    print("Create directory: " + checkpoint_dir)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Definition / Class
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def create_image_generator( ):
    variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0], dtype=str)
    
    train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        
        validation_split=0.2,
    
    )

    train_image_ds = train_generator.flow_from_dataframe(
        dataframe = variables,
        directory=None,
        x_col= 'Image',
        y_col= 'Label',
        weight_col=None,
        target_size=( WIDTH, HEIGHT ),
        color_mode='rgb',
        classes=None,
        class_mode='categorical',           ####
        batch_size=train_generator_batch_size,
        shuffle=True,
        seed=None,
        save_to_dir=None,
        save_prefix='',
        save_format='png',
        subset=None,
        interpolation='nearest',
        validate_filenames=True,
    )

    return train_image_ds

class gradient_tape_optimizer( ):
    def __init__ ( self, model, num_iter, content_iter, batch_size ):
        
        self.num_iter = num_iter
        self.content_iter = content_iter
        self.style_iter = content_iter 
        self.batch_size = batch_size
        self.model = model
        self.loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, 
            reduction=tf.keras.losses.Reduction.AUTO, 
            name='sparse_categorical_crossentropy' )
            
        self.optimizer = tf.keras.optimizers.Nadam( learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )

    def _compute_mean_std( self, feats : tf.Tensor, eps=1e-8 ):
        """
        feats: Features should be in shape N x H x W x C
        """
        mean = tf.math.reduce_mean(feats, axis=[1,2], keepdims=True)
        std = tf.math.reduce_std(feats, axis=[1,2], keepdims=True) + eps
        return mean, std

    def criterion( self, stylized_img : tf.Tensor, style_img : tf.Tensor, t : tf.Tensor ):
        stylized_content_feats = self.model.encode(stylized_img)
        stylized_feats = self.model.encode(stylized_img, return_all=True)
        style_feats = self.model.encode(style_img, return_all=True)

        content_loss = self.mse_loss(t, stylized_content_feats)

        style_loss = 0
        for f1, f2 in zip(stylized_feats, style_feats):
            m1, s1 = self._compute_mean_std(f1)
            m2, s2 = self._compute_mean_std(f2)
            style_loss += self.mse_loss(m1, m2) + self.mse_loss(s1, s2)

        return content_loss + self.style_weight * style_loss

    def train( self ):
        step = 0
        while step < self.num_iter:
            content_batch = self.content_iter.get_next()

            if content_batch[0].shape[1] != self.batch_size:
                content_batch = self.content_iter.get_next()

            style_batch = self.style_iter.get_next()
            
            if style_batch[0].shape[1] != self.batch_size:
                style_batch = self.style_iter.get_next()
            
            
            current_label = tf.constant( content_batch[1], shape=( 2, 1 ) ).numpy()
            loss_value = tf.Variable( 10.0 )
            
            with tf.GradientTape() as tape:
                
                result = self.model( inputs=tf.constant( content_batch[0], shape=( 1, WIDTH, HEIGHT, CHANNEL ) ) )
                result = tf.constant( result, shape=( 2, 1 ) )

                predict_label = tf.Variable( tf.constant( self.model.trainable_weights[len(self.model.trainable_weights) - 1], shape=( 2, 1 ) ) )
                loss_value =  self.loss( result.numpy(), current_label )
                loss_value =  tf.Variable( tf.constant( loss_value, shape=( 1, ) ).numpy() )
                tape.watch( loss_value )
            
            gradients = tape.gradient( loss_value, loss_value )

            self.optimizer.apply_gradients(zip(gradients, self.model.trainable_weights))

            # log and save every 200 batches
            if step % 200 == 0:
            
                if result[tf.math.argmax(result).numpy()[0]][0] > 0 :
                    print(f'Training loss (for one batch) at step {step}: {self.loss} value {result[tf.math.argmax(result).numpy()[0]]}')
                else :  
                    print(f'Training loss (for one batch) at step {step}: {self.loss} value {result[abs( 1 - tf.math.argmax(result).numpy()[0]) ]}')
                    
                print(f'Seen so far: {(step+1)*self.batch_size} samples')

                self.model.save_weights(checkpoint_path)

            step += 1

        print("Finished training...")
        self.model.save_weights(checkpoint_path)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Dataset
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0], dtype=str)

train_image_ds = tf.data.Dataset.from_generator(
    create_image_generator,
    output_types=None,
    output_shapes=None,
    args=None,

    output_signature=(
        tf.TensorSpec(shape=( 1, WIDTH, HEIGHT, CHANNEL ), dtype=tf.float32, name=None), tf.TensorSpec(shape=(1, 2), dtype=tf.float32, name=None),
        ),
    
    name='train_image_ds'
)

train_image_ds = train_image_ds.batch( 1 )
iterator = iter( train_image_ds )

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
    tf.keras.layers.InputLayer(input_shape=( WIDTH, HEIGHT, CHANNEL )),
    tf.keras.layers.Normalization(mean=3., variance=2.),
    tf.keras.layers.Normalization(mean=4., variance=6.),
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPooling2D((2, 2)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Reshape((128, 127 * 127)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(192, activation='relu'),
    tf.keras.layers.Dense(2),
])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
    learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=0.0000001,
    name='Nadam'
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""                               
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False,
    reduction=tf.keras.losses.Reduction.AUTO,
    name='sparse_categorical_crossentropy'
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
gradient_tape_optimizer = gradient_tape_optimizer( model, num_iter, iterator, batch_size )
result = gradient_tape_optimizer.train()

input( '...' )

格式
结果 : 损失 是 变化 少 , 这 是 因为 自 定义 优化 器 是 简单 的 算法 。

2022-10-15 14:23:57.141863: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8100
Training loss (for one batch) at step 0: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.06285592]
Seen so far: 1 samples
Training loss (for one batch) at step 200: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05492945]
Seen so far: 201 samples
Training loss (for one batch) at step 400: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05577546]
Seen so far: 401 samples
Training loss (for one batch) at step 600: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.06180618]
Seen so far: 601 samples
Training loss (for one batch) at step 800: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05990243]
Seen so far: 801 samples
Finished training...
...

格式

相关问题