PyTorch运行时错误:给定groups=1,权重大小为[16,395,3],预期输入[1,16,395]有395个通道,但实际上得到了16个通道

mnowg1ta  于 2023-04-30  发布在  其他
关注(0)|答案(1)|浏览(165)

因此,我试图构建一个1d信号的分类器,如下所示:

class M5(nn.Module):
    def __init__(self, n_input=1, n_output=3, stride=4, n_channel=16):
        super().__init__()
        self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=3)
        self.bn1 = nn.BatchNorm1d(n_channel)
        self.pool1 = nn.MaxPool1d(3)
        self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=2)
        self.bn2 = nn.BatchNorm1d(n_channel)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=2)
        self.bn3 = nn.BatchNorm1d(2 * n_channel)
        self.pool3 = nn.MaxPool1d(2)
        self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=2)
        self.bn4 = nn.BatchNorm1d(2 * n_channel)
        self.pool4 = nn.MaxPool1d(2)
        self.fc1 = nn.Linear(2 * n_channel, n_output)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(self.bn1(x))
        x = self.pool1(x)
        x = self.conv2(x)
        x = F.relu(self.bn2(x))
        x = self.pool2(x)
        x = self.conv3(x)
        x = F.relu(self.bn3(x))
        x = self.pool3(x)
        x = self.conv4(x)
        x = F.relu(self.bn4(x))
        x = self.pool4(x)
        x = F.avg_pool1d(x, x.shape[-1])
        x = x.permute(0, 1)
        x = self.fc1(x)
        return F.log_softmax(x, dim=2)

model = M5(n_input=ex.shape[0], n_output=len(labels))

例如,shape[0]这里是395,len(labels)对应于唯一类的数量。我的输入数据是一个大小为16的批,批中每个Tensor的长度是395。这个错误表明我的NN的第一层没有得到适当大小的输入,但我一生都不知道为什么。有人能建议吗?

n6lpvg4x

n6lpvg4x1#

conv1d的输入应该具有(N,Cin​,L)的形状,而您的输入具有(N,395)的形状。因此,您需要添加额外的调光。我将输入转换为(N, 1, 395)的形状,如下所示:

import torch
from torch import nn
from torch.nn import functional as F
class M5(nn.Module):
    def __init__(self, n_input=1, n_output=3, stride=4, n_channel=16):
        super().__init__()
        self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=3)
        self.bn1 = nn.BatchNorm1d(n_channel)
        self.pool1 = nn.MaxPool1d(3)
        self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=2)
        self.bn2 = nn.BatchNorm1d(n_channel)
        self.pool2 = nn.MaxPool1d(2)
        self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=2)
        self.bn3 = nn.BatchNorm1d(2 * n_channel)
        self.pool3 = nn.MaxPool1d(2)
        self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=2)
        self.bn4 = nn.BatchNorm1d(2 * n_channel)
        self.pool4 = nn.MaxPool1d(2)
        self.fc1 = nn.Linear(2 * n_channel, n_output)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(self.bn1(x))
        x = self.pool1(x)
        x = self.conv2(x)
        x = F.relu(self.bn2(x))
        x = self.pool2(x)
        x = self.conv3(x)
        x = F.relu(self.bn3(x))
        x = self.pool3(x)
        x = self.conv4(x)
        x = F.relu(self.bn4(x))
        x = self.pool4(x)
        x = F.avg_pool1d(x, x.shape[-1]).squeeze()
        # x = x.permute(0, 1)  ## commented this line
        x = self.fc1(x)
        return F.log_softmax(x, dim=-1) # pass dim=-1 as last dim

model = M5(n_input=1, n_output=10)
inputs = torch.randn(16, 395)
model(inputs.unsqueeze(1)).shape

相关问题