我正在为一个项目创建一个cGAN模型网络,但遇到了错误(代码将在底部):
Exception has occurred: ValueError
Exception encountered when calling layer 'sequential' (type Sequential).
Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)
Call arguments received by layer 'sequential' (type Sequential):
• inputs=tf.Tensor(shape=(102,), dtype=float32)
• training=None
• mask=None
ValueError: Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)
During handling of the above exception, another exception occurred:
File "D:\School\Internship (S5)\data\cgan_model_v2.py", line 131, in <module>
generated_data = generator([noise_tensor, fake_labels_tensor])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ValueError: Exception encountered when calling layer 'sequential' (type Sequential).
Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)
Call arguments received by layer 'sequential' (type Sequential):
• inputs=tf.Tensor(shape=(102,), dtype=float32)
• training=None
• mask=None
如何解决这个错误?我试着搜索了一下,但没有找到解决办法。标签的数据格式将是具有2个条目的数组。对于数据,它将是具有512个数据点的数组。两个阵列均为一维阵列
如需了解更多详情,请随时询问
代码:
import mat73
import tensorflow as tf
from keras.layers import Dense, Input, Concatenate, Flatten
from keras.models import Model
from keras.optimizers import Adam
# Load the data
def import_data():
# Load the data
data_dict = mat73.loadmat('example_data.mat')
# Assuming raw_data contains a list of lists
raw_data = list(data_dict.values())
# Convert each element in raw_data to a NumPy array
data = np.asarray(raw_data[0])
labels = np.asarray(raw_data[1])
secotion_labels = [x for x in labels for _ in range(4)]
data, split_labels = split_data(data)
labels = combine_arrays(split_labels,secotion_labels)
data = z_score_normalization(data)
labels = z_score_normalization(labels)
return data, labels
def split_data(input_data):
data_array = input_data
label_array = 0, 1, 2, 3
split_data_r = []
split_labels = []
for i in data_array:
temp_data = i
for j in range(0, len(temp_data), 512):
split_data_r.append(temp_data[j:j + 512])
split_labels.append(label_array)
return np.array(split_data_r), np.array(split_labels)
def combine_arrays(array1, array2):
if len(array1) != len(array2):
raise ValueError("Input arrays must have the same length")
combined_array = np.column_stack((array1, array2))
return combined_array
def z_score_normalization(data_array):
if data_array.ndim == 1:
n_data = tf.convert_to_tensor(data_array, dtype=tf.float32)
mean = tf.reduce_mean(n_data)
stddev = tf.math.reduce_std(n_data)
normalized_data = (n_data - mean) / stddev
elif data_array.ndim == 2:
mean = tf.reduce_mean(data_array, axis=0)
stddev = tf.math.reduce_std(data_array, axis=0)
normalized_data = (data_array - mean) / stddev
else:
raise ValueError("Input array must be 1D or 2D")
return normalized_data
def discriminator(data_input_shape, label_input_shape):
# Create input layers for data and labels
data_input = Input(shape=data_input_shape, name='data_input')
label_input = Input(shape=label_input_shape, name='label_input')
# Concatenate data and labels
merged_inputs = Concatenate(axis=-1)([data_input, label_input])
# Create the discriminator model
model = tf.keras.Sequential()
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='sigmoid')) # Use 'sigmoid' for binary classification
# Connect the input layer to the discriminator model
discriminator_output = model(merged_inputs)
# Create a Model using the input layers and discriminator output
discriminator_model = Model(inputs=[data_input, label_input], outputs=discriminator_output)
return discriminator_model
def generator(noise_input_shape, label_input_shape):
noise_input = Input(shape=noise_input_shape, name='data_input')
label_input = Input(shape=label_input_shape, name='label_input')
merged_inputs = Concatenate(axis=-1)([noise_input,label_input])
model = tf.keras.Sequential()
model.add(Dense(512, activation='relu'))
model.add(Dense(768, activation='relu'))
model.add(Dense(768, activation='relu'))
model.add(Dense(512, activation='relu'))
genarator_output = model(merged_inputs)
# Create a Model using the input layers and genarator output
genarator_model = Model(inputs=[noise_input, label_input], outputs=genarator_output)
return genarator_model
def cgan(genartor_model, discrimanator_model):
print ("not done yet")
noise_input_shape = (100,)
label_input_shape = (2,)
generator = generator(noise_input_shape, label_input_shape)
discriminator = discriminator((512,), label_input_shape)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
cross_entropy = tf.keras.losses.BinaryCrossentropy()
#training loop
epochs = 200
batch_size = 1
for x in range(epochs):
data, labels = import_data();
for i in range(len(data)):
noise = np.random.rand(noise_input_shape[0])
fake_labels = np.random.rand(label_input_shape[0])
noise_tensor = tf.convert_to_tensor(noise, dtype=tf.float32)
fake_labels_tensor = tf.convert_to_tensor(fake_labels, dtype=tf.float32)
generated_data = generator([noise_tensor, fake_labels_tensor])
real_data = data[i]
real_labels = labels[i]
#discriminator
with tf.GradientTape as disc_tape:
real_loss = cross_entropy(real_labels, discriminator([real_data, real_labels]))
fake_loss = cross_entropy(fake_labels, discriminator([generated_data, fake_labels]))
total_disc_loss = real_loss + fake_loss
# Update discriminator weights
gradients_of_discriminator = disc_tape.gradient(total_disc_loss, discriminator.trainable_variables)
optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
#genarator
with tf.GradientTape() as gen_tape:
generated_data = generator([noise, fake_labels])
validity = discriminator([generated_data, fake_labels])
gen_loss = cross_entropy(tf.ones_like(validity), validity)
# Update generator weights
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
我尝试重新格式化输入到模型中,但不幸的是没有得到任何值得一提的结果
1条答案
按热度按时间mmvthczy1#
从错误消息中可以看到,
sequential
Dense
层需要min_ndim=2
输入,您需要将1D数组重新整形为2D数组或重新定义模型的层以接受1D输入。