神经网络训练太慢

x8diyxa7  于 2021-08-25  发布在  Java
关注(0)|答案(0)|浏览(140)

我正在用python从头开始实现一个神经网络,以便在手写数据集的mnist数据集上对其进行训练。我试着调试它,但我仍然不明白为什么它训练得这么慢。当我绘制损失图时,它会下降,但当预测数字时,它似乎是随机预测的。我已经用iris数据集对其进行了测试,它正确地预测了类,但不是在手写数据集中…下面是代码:

import numpy as np
import mnist
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from sklearn.metrics import accuracy_score

def ReLU(z):
    z[z<0] = 0
    return z 

def ReLU_prime(z):
    z[z<0] = 0
    z[z>=0] = 1
    return z

def softmax(z):
    return np.exp(z)/np.sum(np.exp(z),axis=0)

class NN:
    def __init__(self,layers,lr=0.0001):
        self.layers = layers
        self.lr = lr
        self.n_layers = len(layers)

        self.weights = [np.random.randn(y,x)/np.sqrt(y) for x,y in zip(layers[:-1],layers[1:])]
        self.biases = [np.zeros((y,1)) for y in layers[1:]]

    def forward(self,X):
        A = X.copy()#nxm
        if len(A.shape) == 1:

            A = A[:,np.newaxis]
        cache = [(None,A)]

        for l in range(self.n_layers - 1):
            Z = np.dot(self.weights[l],A) + self.biases[l]
            if l < self.n_layers - 2:
                A = ReLU(Z)
            else:
                A = softmax(Z)

            cache.append((Z,A))
        return A,cache

    def backprop(self,X,Y):
        A,cache = self.forward(X)
        m = X.shape[1]
        dz = A - Y
        dwdbs = []
        for l in reversed(range(1,self.n_layers)):
            dw = (1/m)*np.dot(dz,cache[l-1][1].T)
            db = (1/m)*np.sum(dz,axis=1)[:,np.newaxis]

            if l > 1:
                da = np.dot(self.weights[l-1].T,dz)
                dz = da*ReLU_prime(cache[l-1][0])
            dwdbs.append((dw,db))

        for i,(w,b) in enumerate(dwdbs):
            self.weights[self.n_layers - 2 - i] -= self.lr*dwdbs[i][0]
            self.biases[self.n_layers - 2 - i] -= self.lr*dwdbs[i][1]
        return self.loss(X,Y)

    def loss(self,X,Y):
        A,_ = self.forward(X)
        m = Y.shape[1]
        idxs = np.where(Y==1)

        return -(1/m)*np.sum(Y[idxs]*np.log(A[idxs]),axis=0)
    def predict(self,a):

        return np.argmax(a)

if __name__ == '__main__':                
    X,y = mnist.train_images()[:10_000],mnist.train_labels()[:10_000]

    X_train,X_test, y_train,y_test = train_test_split(X,y,test_size=0.1)
    X_train_orig = X_train.copy()

    X_test_orig = X_test.copy()

    X_train = X_train/255
    X_train = np.reshape(X_train,(784,-1))
    N = X_train.shape[1]

    X_test = X_test/255
    X_test = np.reshape(X_test,(784,-1))
    y_train_ = np.zeros((10,y_train.shape[0]))
    y_train_[y_train,[x for x in range(y_train.shape[0])]] = 1
    y_train = y_train_.copy()
    y_test = y_test
    nn = NN([784,150,150,50,10],0.1)

    epochs = 10_000
    losses = []

    for epoch in range(epochs):
        print('epoch',epoch,'lr',nn.lr)
        nn.lr *=0.99992
        idxs = np.random.permutation(N)

        loss = nn.backprop(X_train[:,idxs],y_train[:,idxs])
        losses.append(loss)
        print(loss)

    plt.plot([x for x in range(len(losses))],losses)
    plt.show()
    acc =  0
    N_test = X_test.shape[1]
    preds = []
    for i in range(10):
        idx = np.random.choice(N_test)
        a,_ = nn.forward(X_test[:,idx])
        plt.imshow(X_test_orig[idx])
        plt.show()
        a = nn.predict(a)
        print('a',a,'y',y_test[idx])

        if a == y_test[idx]:
            acc +=1
    print('acc',acc)

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题