keras 如何在培训期间显示模型性能?

nwlqm0z1  于 2022-12-27  发布在  其他
关注(0)|答案(2)|浏览(116)

我建立了一个UNET模型用于我的研究目的。当我用CNN模型或任何迁移学习模型的数据集拟合模型时,我可以看到模型性能,即每个时期的损失、准确度、验证损失和验证准确度[如下所示]。但对于我的UNET模型,这种性能没有显示出来。
我想在每个时期的训练中观看我的模型的表现!
注意:-我对Tensorflow框架的了解还不够。
例如:

Epoch 1/10
1875/1875 [==============================] - 32s 17ms/step - loss: 0.1992 - accuracy: 0.9395 - val_loss: 0.0711 - val_accuracy: 0.9785
Epoch 2/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0694 - accuracy: 0.9788 - val_loss: 0.0454 - val_accuracy: 0.9850
Epoch 3/10
1875/1875 [==============================] - 32s 17ms/step - loss: 0.0507 - accuracy: 0.9839 - val_loss: 0.0333 - val_accuracy: 0.9884
Epoch 4/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0403 - accuracy: 0.9868 - val_loss: 0.0360 - val_accuracy: 0.9890
Epoch 5/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0342 - accuracy: 0.9888 - val_loss: 0.0337 - val_accuracy: 0.9895
Epoch 6/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0283 - accuracy: 0.9909 - val_loss: 0.0301 - val_accuracy: 0.9898
Epoch 7/10
1875/1875 [==============================] - 32s 17ms/step - loss: 0.0245 - accuracy: 0.9922 - val_loss: 0.0260 - val_accuracy: 0.9918
Epoch 8/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0222 - accuracy: 0.9930 - val_loss: 0.0290 - val_accuracy: 0.9905
Epoch 9/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0188 - accuracy: 0.9934 - val_loss: 0.0302 - val_accuracy: 0.9914
Epoch 10/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.0169 - accuracy: 0.9944 - val_loss: 0.0388 - val_accuracy: 0.9886

编译:

## instanctiating model
inputs = tf.keras.layers.Input((256, 256, 3))
myTransformer = GiveMeUnet(inputs, droupouts= 0.07)
myTransformer.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['accuracy'] )

拟合:

retVal = myTransformer.fit(np.array(framObjTrain['img']), np.array(framObjTrain['mask']), epochs = 100, verbose = 0)

我附上了完整的代码。如果有人想看看它:Full Code

czq61nw1

czq61nw11#

只需删除verbose = 0。在model.fit()方法中设置verbose = 0会在训练期间禁用性能输出。

retVal = myTransformer.fit(np.array(framObjTrain['img']), np.array(framObjTrain['mask']), epochs = 100)

您可以在verbosehere上阅读更多详细信息。

41zrol4v

41zrol4v2#

他们有一个 Jmeter 板,当矩阵值(损失、准确性、估计值)由默认用法提供时,您可以在其中添加一些自定义属性或单个值。
1.创建继承tensorbaords函数的回调。
1.指定www.example.com()的回调函数model.fit,以收集其值沿着矩阵设置、损耗、精度和默认值。
1.配置Tensorboard,打开Web界面后,您可以阅读分步说明。
1.调用tensorboard网络界面并探索目标设备学习教程。

tensorboard --logdir="F:\models\checkpoint\test_tf_tensorboard\\"

[样品]:

import os
from os.path import exists

import tensorflow as tf
import tensorflow_io as tfio

import matplotlib.pyplot as plt

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)

list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
    image = tf.io.read_file( file )
    image = tfio.experimental.image.decode_tiff(image, index=0)
    list_file_actual.append(image)
    image = tf.image.resize(image, [32,32], method='nearest')
    list_file.append(image)
    list_label.append(1)
    
for file in files_2.take(5):
    image = tf.io.read_file( file )
    image = tfio.experimental.image.decode_tiff(image, index=0)
    list_file_actual.append(image)
    image = tf.image.resize(image, [32,32], method='nearest')
    list_file.append(image)
    list_label.append(9)

checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"

if not exists(checkpoint_dir) : 
    os.mkdir(checkpoint_dir)
    print("Create directory: " + checkpoint_dir)
    
log_dir = checkpoint_dir

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
tb_callback = tf.keras.callbacks.TensorBoard(log_dir, update_freq=1)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
    tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
    tf.keras.layers.Normalization(mean=3., variance=2.),
    tf.keras.layers.Normalization(mean=4., variance=6.),
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPooling2D((2, 2)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Reshape((128, 225)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(192, activation='relu'),
    tf.keras.layers.Dense(10),
])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
    learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
    name='Nadam'
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""                               
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False,
    reduction=tf.keras.losses.Reduction.AUTO,
    name='sparse_categorical_crossentropy'
)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
    model.load_weights(checkpoint_path)
    print("model load: " + checkpoint_path)
    input("Press Any Key!")

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50, callbacks=[tb_callback] )
model.save_weights(checkpoint_path)

plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
    img = tf.keras.preprocessing.image.array_to_img(
        list_file[i],
        data_format=None,
        scale=True
    )
    img_array = tf.keras.preprocessing.image.img_to_array(img)
    img_array = tf.expand_dims(img_array, 0)
    predictions = model.predict(img_array)
    score = tf.nn.softmax(predictions[0])
    plt.subplot(5, 2, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(list_file_actual[i])
    plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" +  str(list_label_actual[tf.math.argmax(score)]))
    
plt.show()

input('...')

### tensorboard --logdir="F:\models\checkpoint\test_tf_tensorboard\\"

相关问题