我用Python编写了一个用于CNNs的通用卷积函数。
事实证明,这一功能所需的时间几乎是Keras Conv 2D所需时间的5倍。
所以我很好奇是否有人知道为什么会有速度差异?
(It对于我的卷积函数,MNIST数据集的1个历元几乎需要10- 15分钟,而Keras几乎需要3- 4分钟)
这是我的Conv类:
class Convolutional2D(Layer):
def __init__(self, kernel_size, feature_maps):
self.kernel_size = kernel_size
self.feature_maps = feature_maps
self.b = np.zeros((feature_maps))#np.random.rand(feature_maps)
def connect(self, to_layer):
if len(to_layer.layer_shape) == 2:
kernel_shape = [self.feature_maps, self.kernel_size, self.kernel_size]
self.layer_shape = [self.feature_maps] + list(np.array(to_layer.layer_shape)-self.kernel_size+1)
else:
kernel_shape = [self.feature_maps, to_layer.layer_shape[0], self.kernel_size, self.kernel_size]
self.layer_shape = [self.feature_maps] + list(np.array(to_layer.layer_shape[1:])-self.kernel_size+1)
self.kernel = np.random.random(kernel_shape)
super().init_adam_params(self.kernel, self.b)
def convolve(self, x, k, mode='forward'):
if mode == 'forward':
ksize = k.shape[-1]
if len(x.shape) == 3:
out = np.zeros((x.shape[0], k.shape[0], x.shape[1]-k.shape[1]+1, x.shape[2]-k.shape[2]+1))
else:
out = np.zeros((x.shape[0], k.shape[0], x.shape[2]-k.shape[2]+1, x.shape[3]-k.shape[3]+1))
for i in range(out.shape[2]):
for j in range(out.shape[3]):
if len(x.shape) == 3:
window = x[:,i:i+ksize,j:j+ksize]
m = np.reshape(window, (window.shape[0], 1, window.shape[1], window.shape[2]))*k
m = np.sum(m, axis=(2,3))
else:
window = x[:,:,i:i+ksize,j:j+ksize]
m = np.reshape(window, (window.shape[0], 1, window.shape[1], window.shape[2], window.shape[3]))*k
m = np.sum(m, axis=(2,3,4))
out[:,:,i,j] = m
return out
elif mode == 'backward_i':
if len(k.shape) == 3:
out = np.zeros((x.shape[0], x.shape[2]+k.shape[1]-1, x.shape[3]+k.shape[2]-1))
x = np.pad(x, ((0, 0), (0, 0), (k.shape[1]-1, k.shape[1]-1), (k.shape[2]-1, k.shape[2]-1)))
else:
out = np.zeros((x.shape[0], k.shape[1], x.shape[2]+k.shape[2]-1, x.shape[3]+k.shape[3]-1))
x = np.pad(x, ((0, 0), (0, 0), (k.shape[2]-1, k.shape[2]-1), (k.shape[3]-1, k.shape[3]-1)))
fk = np.transpose(k, axes=(1,0,2,3))
x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2], x.shape[3]))
ksize = k.shape[-1]
for i in range(out.shape[-2]):
for j in range(out.shape[-1]):
if len(k.shape) == 3:
window = x[:,:,i:i+ksize,j:j+ksize]
m = window*k
m = np.sum(m, axis=(1,2,3))
out[:,i,j] = m
else:
window = x[:,:,:,i:i+ksize,j:j+ksize]
m = window*fk
m = np.sum(m, axis=(2,3,4))
out[:,:,i,j] = m
return out
elif mode == 'backward_k':
if len(x.shape) == 3:
out = np.zeros((k.shape[1], x.shape[1]-k.shape[2]+1, x.shape[2]-k.shape[3]+1))
else:
out = np.zeros((k.shape[1], x.shape[1], x.shape[2]-k.shape[2]+1, x.shape[3]-k.shape[3]+1))
x = np.transpose(x, axes=(1,0,2,3))
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3]))
ksize = k.shape[-1]
k = np.transpose(k, axes=(1,0,2,3))
if len(x.shape) != 3:
fk = np.reshape(k, (k.shape[0], 1, k.shape[1], k.shape[2], k.shape[3]))
for i in range(out.shape[-2]):
for j in range(out.shape[-1]):
if len(x.shape) == 3:
window = x[:,i:i+ksize,j:j+ksize]
m = window*k
m = np.sum(m, axis=(1,2,3))
out[:,i,j] = m
else:
window = x[:,:,i:i+ksize,j:j+ksize]
m = window*fk
m = np.sum(m, axis=(2,3,4))
out[:,:,i,j] = m
return out
def forward(self, x):
return self.convolve(x, self.kernel)
def backward(self, x, loss_grad, params):
if len(self.kernel.shape) == 3:
flipped_kernel = np.flip(self.kernel, axis=(1,2))
flipped_loss_grad = np.flip(loss_grad, axis=(1,2))
else:
flipped_kernel = np.flip(self.kernel, axis=(2,3))
flipped_loss_grad = np.flip(loss_grad, axis=(2,3))
i_grad = self.convolve(loss_grad, flipped_kernel, mode='backward_i')
k_grad = self.convolve(x, flipped_loss_grad, mode='backward_k')
self.vw = params['beta1']*self.vw + (1-params['beta1'])*k_grad
self.sw = params['beta2']*self.sw + (1-params['beta2'])*(k_grad**2)
self.kernel += params['lr']*self.vw/np.sqrt(self.sw+params['eps'])
return i_grad
def get_save_data(self):
return {'type':'Convolutional2D', 'shape':np.array(self.layer_shape).tolist(), 'data':[self.kernel_size, self.feature_maps, self.kernel.tolist()]}
def load_saved_data(data):
obj = Convolutional2D(data['data'][0], data['data'][1])
obj.layer_shape = data['shape']
obj.kernel = np.array(data['data'][2])
obj.init_adam_params(obj.kernel, obj.b)
return obj
1条答案
按热度按时间gudnpqoy1#
Keras和Pytorch的效率要高得多,因为它们利用了矢量化,而且矩阵乘法也得到了很好的优化。它们通过展平过滤器并创建一个新的矩阵(其列值为每个块的值),基本上将卷积转换为矩阵乘法。它们还利用了数据在内存中的存储方式。您可以在本文中找到更多信息:https://towardsdatascience.com/how-are-convolutions-actually-performed-under-the-hood-226523ce7fbf