valueerror:layer sequential的输入0与layer::expected min_ndim=4不兼容,found ndim=2接收到完整形状:(无,784)

dpiehjr4  于 2021-08-20  发布在  Java
关注(0)|答案(0)|浏览(307)
import tensorflow as tf
import cv2
from tensorflow.keras.models import load_model
import numpy as np
import math

def process(img_input):
    gray = cv2.cvtColor(img_input, cv2.COLOR_BGR2GRAY)
    gray = cv2.resize(gray, (28, 28), interpolation=cv2.INTER_AREA)
    (thresh, img_binary) = cv2.threshold(
        gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    h, w = img_binary.shape
    ratio = 100 / h
    new_h = 100
    new_w = w * ratio

    img_empty = np.zeros((110, 110), dtype=img_binary.dtype)
    img_binary = cv2.resize(
        img_binary, (int(new_w), int(new_h)), interpolation=cv2.INTER_AREA)
    img_empty[:img_binary.shape[0], :img_binary.shape[1]] = img_binary
    img_binary = img_empty

    cnts = cv2.findContours(
        img_binary.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    M = cv2.moments(cnts[0][0])
    center_x = (M["m10"] / M["m00"])
    center_y = (M["m01"] / M["m00"])

    height, width = img_binary.shape[:2]
    shiftx = width / 2 - center_x
    shifty = height / 2 - center_y

    Translation_Matrix = np.float32([[1, 0, shiftx], [0, 1, shifty]])
    img_binary = cv2.warpAffine(
        img_binary, Translation_Matrix, (width, height))

    img_binary = cv2.resize(img_binary, (28, 28), interpolation=cv2.INTER_AREA)
    flatten = img_binary.flatten() / 255.0
    return flatten

model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(16, (3, 3), activation='relu',
                           input_shape=(100, 100, 3)),
    tf.keras.layers.MaxPool2D(2, 2),
    ##
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D(2, 2),
    ##
    tf.keras.layers.Conv2D(
        64, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D(2, 2),
    ##
    tf.keras.layers.Conv2D(
        128, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D(2, 2),
    ##
    tf.keras.layers.Conv2D(
        256, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D(2, 2),
    ##
    tf.keras.layers.Flatten(),
    ##
    tf.keras.layers.Dense(512, activation='relu'),
    ##
    tf.keras.layers.Dense(3, activation='softmax')
])

model = load_model('model3.h5')

cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

while (True):

ret, img_color = cap.read()

if ret == False:
    break

img_input = img_color.copy()
cv2.rectangle(img_color, (250, 150),
              (width - 250, height - 150), (0, 0, 255), 3)
cv2.imshow('bgr', img_color)

img_roi = img_input[150:height - 150, 250:width - 250]

key = cv2.waitKey(1)

if key == 27:
    break
elif key == 32:
    flatten = process(img_roi)

    predictions = model.predict(flatten[np.newaxis, :])

    with tf.compat.v1.Session() as sess:
        print(tf.argmax(predictions, 1).eval())

    cv2.imshow('img_roi', img_roi)
    cv2.waitKey(0)

cap.release()
cv2.destroyAllWindows()
Traceback (most recent call last):
  File "C:/Users/TOTOYAA/Desktop/mnist/34.py", line 98, in <module>
    predictions = model.predict(flatten[np.newaxis, :])
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1629, in predict
    tmp_batch_outputs = self.predict_function(iterator)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
    result = self._call(*args,**kwds)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
    self._initialize(args, kwds, add_initializers_to=initializers)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\def_function.py", line 726, in _initialize
    *args,**kwds))
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
    graph_function, _ = self._maybe_define_function(args, kwargs)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
    graph_function = self._create_graph_function(args, kwargs)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\function.py", line 3206, in _create_graph_function
    capture_by_value=self._capture_by_value),
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
    func_outputs = python_func(*func_args,**func_kwargs)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
    out = weak_wrapped_fn().__wrapped__(*args,**kwds)
  File "C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
    raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:

    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\training.py:1478 predict_function  *
        return step_function(self, iterator)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\training.py:1468 step_function**
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
        return fn(*args,**kwargs)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\training.py:1461 run_step**
        outputs = model.predict_step(data)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\training.py:1434 predict_step
        return self(x, training=False)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
    C:\ProgramData\Anaconda3\envs\Seongwoo\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:239 assert_input_compatibility
        str(tuple(shape)))

    ValueError: Input 0 of layer sequential is incompatible with the layer: : expected min_ndim=4, found ndim=2. Full shape received: (None, 784)

[ WARN:1] global C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-fgndhvyk\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback

Process finished with exit code 1

如果我运行了我的相机代码,我会得到像标题这样的错误。 (predictions = model.predict(flatten[np.newaxis, :])) 线路出错。
请给我一些帮助。

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题