我是python的第一个使用者,我正在尝试使用下面的代码构建一个LSTM自动编码器模型。我的目标是在瓶颈层提取压缩数据。但是,我一直收到这个错误,“”“此模型尚未生成。请先通过调用”build“生成模型()'''每次我跑到编码器前,我已经尝试了一些建议,但我仍然一次又一次地得到同样的错误。请问我错过了什么?
from google.colab import drive
drive.mount('/content/drive')
data=pd.read_csv(r'/content/drive/MyDrive/Datasets/All_Autoencoder_Data.csv')
data1 = data.drop('Labels', axis=1)
data1.shape
(225711, 68)
def create_sequences(X, time_steps=5):
Xs = []
for i in range(0, len(X)-time_steps, 5):
Xs.append(X.iloc[i:(i+time_steps)].values)
return np.array(Xs)
X_train=create_sequences(data1)
X_train.shape
(45142, 5, 68)
inputs = Input(shape=(X_train.shape[1], X_train.shape[2]))
eL0 = LSTM(68, activation='tanh', return_sequences=True,
recurrent_activation="sigmoid",
kernel_initializer="glorot_uniform",
recurrent_regularizer=regularizers.l2(0.001),
kernel_regularizer=regularizers.l2(0.001))(inputs)
eL1 = LSTM(32, activation='tanh', return_sequences=True,
recurrent_activation="sigmoid",
kernel_initializer="glorot_uniform",
recurrent_regularizer=regularizers.l2(0.001),
kernel_regularizer=regularizers.l2(0.001))(eL0)
eL2 = LSTM(8, activation='tanh', return_sequences=False)(eL1)
# eL3 = LSTM(16, activation='relu', return_sequences=False)(eL2)
h = RepeatVector(X_train.shape[1])(eL2)
dL2 = LSTM(32, activation='tanh', return_sequences=True)(h)
dL3 = LSTM(68, activation='tanh', return_sequences=True)(dL2)
output = TimeDistributed(Dense(X_train.shape[2]))(dL3)
#output = Dense(X_train.shape[2])(dL4)
model = Model(inputs=inputs, outputs=output)
#return model
model.summary()
model.build(input_shape=(None, 5, 68))
# fit the model to the data
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
nb_epochs = 200
batch_size = 250
opt = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt, loss='log_cosh', metrics=['accuracy'])
history = model.fit(X_train, X_train,
epochs=nb_epochs,
batch_size=batch_size,
validation_split=0.3,
callbacks=[callback]
).history
# plot the training losses
fig, ax = plt.subplots(figsize=(6, 4), dpi=80)
ax.plot(history['loss'], 'b', label='Train', linewidth=2)
ax.plot(history['val_loss'], 'r', label='Validation', linewidth=2)
ax.set_title('Model loss', fontsize=16)
ax.set_ylabel('Loss (mse)')
ax.set_xlabel('Epoch')
ax.legend(loc='upper right')
plt.show()
#Encoder
enc=Sequential()
enc.add(model.layers[1])
enc.add(model.layers[2])
enc.add(model.layers[3])
enc.summary()
ValueError Traceback (most recent call last)
<ipython-input-50-225773931044> in <module>
3 enc.add(model.layers[2])
4 enc.add(model.layers[3])
----> 5 enc.summary()
/usr/local/lib/python3.9/dist-packages/keras/engine/training.py in summary(self, line_length, positions, print_fn, expand_nested, show_trainable, layer_range)
3290 """
3291 if not self.built:
-> 3292 raise ValueError(
3293 "This model has not yet been built. "
3294 "Build the model first by calling `build()` or by calling "
ValueError: This model has not yet been built. Build the model first by calling `build()` or by calling the model on a batch of data.
1条答案
按热度按时间qv7cva1a1#
在
enc.summary()
之前,我不得不使用enc.build((None, 5, 68))
构建模型,并且它工作了。