python 带NumPy的矢量化groupby

2izufjch  于 2022-11-28  发布在  Python
关注(0)|答案(5)|浏览(224)

Pandas有一个广泛使用的groupby工具,可以根据相应的Map拆分DataFrame,从中可以对每个子组应用计算并重新组合结果。
在NumPy中,如果没有原生的Python for循环,是否可以灵活地实现这一点?如果使用Python循环,则如下所示:

>>> import numpy as np

>>> X = np.arange(10).reshape(5, 2)
>>> groups = np.array([0, 0, 0, 1, 1])

# Split up elements (rows) of `X` based on their element wise group
>>> np.array([X[groups==i].sum() for i in np.unique(groups)])
array([15, 30])

上面15是X的前三行之和,而30是其余两行之和。
所谓“灵活”,我的意思是我们并不关注一个特定的计算,如求和、计数、最大值等,而是将任何计算传递给分组数组。
如果没有,是否有比上述方法更快的方法?

u4vypkhs

u4vypkhs1#

如何使用scipy稀疏矩阵

import numpy as np
from scipy import sparse
import time

x_len = 500000
g_len = 100

X = np.arange(x_len * 2).reshape(x_len, 2)
groups = np.random.randint(0, g_len, x_len)

# original
s = time.time()

a = np.array([X[groups==i].sum() for i in np.unique(groups)])

print(time.time() - s)

# using scipy sparse matrix
s = time.time()

x_sum = X.sum(axis=1)
b = np.array(sparse.coo_matrix(
    (
        x_sum,
        (groups, np.arange(len(x_sum)))
    ),
    shape=(g_len, x_len)
).sum(axis=1)).ravel()

print(time.time() - s)

#compare
print(np.abs((a-b)).sum())

我PC上的结果

0.15915322303771973
0.012875080108642578
0

速度提高10倍以上。
更新!
让我们对“保罗·潘泽”和“丹尼尔·F”的答案进行基准测试。这是一个仅限求和的基准测试。

import numpy as np
from scipy import sparse
import time

# by @Daniel F
def groupby_np(X, groups, axis = 0, uf = np.add, out = None, minlength = 0, identity = None):
    if minlength < groups.max() + 1:
        minlength = groups.max() + 1
    if identity is None:
        identity = uf.identity
    i = list(range(X.ndim))
    del i[axis]
    i = tuple(i)
    n = out is None
    if n:
        if identity is None:  # fallback to loops over 0-index for identity
            assert np.all(np.in1d(np.arange(minlength), groups)), "No valid identity for unassinged groups"
            s = [slice(None)] * X.ndim
            for i_ in i:
                s[i_] = 0
            out = np.array([uf.reduce(X[tuple(s)][groups == i]) for i in range(minlength)])
        else:
            out = np.full((minlength,), identity, dtype = X.dtype)
    uf.at(out, groups, uf.reduce(X, i))
    if n:
        return out

x_len = 500000
g_len = 200

X = np.arange(x_len * 2).reshape(x_len, 2)
groups = np.random.randint(0, g_len, x_len)

print("original")
s = time.time()

a = np.array([X[groups==i].sum() for i in np.unique(groups)])

print(time.time() - s)

print("use scipy coo matrix")
s = time.time()

x_sum = X.sum(axis=1)
b = np.array(sparse.coo_matrix(
    (
        x_sum,
        (groups, np.arange(len(x_sum)))
    ),
    shape=(g_len, x_len)
).sum(axis=1)).ravel()

print(time.time() - s)

#compare
print(np.abs((a-b)).sum())

print("use scipy csr matrix @Daniel F")
s = time.time()
x_sum = X.sum(axis=1)
c = np.array(sparse.csr_matrix(
    (
        x_sum,
        groups,
        np.arange(len(groups)+1)
    ),
    shape=(len(groups), g_len)
).sum(axis=0)).ravel()

print(time.time() - s)

#compare
print(np.abs((a-c)).sum())

print("use bincount @Paul Panzer @Daniel F")
s = time.time()
d = np.bincount(groups, X.sum(axis=1), g_len)
print(time.time() - s)

#compare
print(np.abs((a-d)).sum())

print("use ufunc @Daniel F")
s = time.time()
e = groupby_np(X, groups)
print(time.time() - s)

#compare
print(np.abs((a-e)).sum())

标准输出

original
0.2882847785949707
use scipy coo matrix
0.012301445007324219
0
use scipy csr matrix @Daniel F
0.01046299934387207
0
use bincount @Paul Panzer @Daniel F
0.007468223571777344
0.0
use ufunc @Daniel F
0.04431319236755371
0

赢家是bincount解。但csr矩阵解也很有趣。

vuv7lop3

vuv7lop32#

@klim的稀疏矩阵解乍一看似乎与求和有关。然而,我们可以通过在csrcsc之间进行转换来在一般情况下使用它:
让我们看一个小例子:

>>> m, n = 3, 8                                                                                                     
>>> idx = np.random.randint(0, m, (n,))
>>> data = np.arange(n)
>>>                                                                                                                 
>>> M = sparse.csr_matrix((data, idx, np.arange(n+1)), (n, m))                                                      
>>>                                                                                                                 
>>> idx                                                                                                             
array([0, 2, 2, 1, 1, 2, 2, 0])                                                                                     
>>> 
>>> M = M.tocsc()
>>> 
>>> M.indptr, M.indices
(array([0, 2, 4, 8], dtype=int32), array([0, 7, 3, 4, 1, 2, 5, 6], dtype=int32))

正如我们可以看到的,在转换之后,稀疏矩阵的内部表示产生分组和排序的索引:

>>> groups = np.split(M.indices, M.indptr[1:-1])
>>> groups
[array([0, 7], dtype=int32), array([3, 4], dtype=int32), array([1, 2, 5, 6], dtype=int32)]
>>>

我们可以使用稳定的argsort得到相同的结果:

>>> np.argsort(idx, kind='mergesort')
array([0, 7, 3, 4, 1, 2, 5, 6])
>>>

但是稀疏矩阵实际上更快,即使我们允许argsort使用更快的非稳定算法:

>>> m, n = 1000, 100000
>>> idx = np.random.randint(0, m, (n,))
>>> data = np.arange(n)
>>> 
>>> timeit('sparse.csr_matrix((data, idx, np.arange(n+1)), (n, m)).tocsc()', **kwds)
2.250748165184632
>>> timeit('np.argsort(idx)', **kwds)
5.783584725111723

如果我们需要argsort来保持组的排序,则差异会更大:

>>> timeit('np.argsort(idx, kind="mergesort")', **kwds)
10.507467685034499
vaqhlq81

vaqhlq813#

如果您希望更灵活地实现groupby,并且可以使用numpy的任何ufunc进行分组:

def groupby_np(X, groups, axis = 0, uf = np.add, out = None, minlength = 0, identity = None):
    if minlength < groups.max() + 1:
        minlength = groups.max() + 1
    if identity is None:
        identity = uf.identity
    i = list(range(X.ndim))
    del i[axis]
    i = tuple(i)
    n = out is None
    if n:
        if identity is None:  # fallback to loops over 0-index for identity
            assert np.all(np.in1d(np.arange(minlength), groups)), "No valid identity for unassinged groups"
            s = [slice(None)] * X.ndim
            for i_ in i:
                s[i_] = 0
            out = np.array([uf.reduce(X[tuple(s)][groups == i]) for i in range(minlength)])
        else:
            out = np.full((minlength,), identity, dtype = X.dtype)
    uf.at(out, groups, uf.reduce(X, i))
    if n:
        return out

groupby_np(X, groups)
array([15, 30])

groupby_np(X, groups, uf = np.multiply)
array([   0, 3024])

groupby_np(X, groups, uf = np.maximum)
array([5, 9])

groupby_np(X, groups, uf = np.minimum)
array([0, 6])
v440hwme

v440hwme4#

可能有比这更快的方法(两个操作数现在都在复制),但是:

np.bincount(np.broadcast_to(groups, X.T.shape).ravel(), X.T.ravel())

array([ 15.,  30.])
laik7k3q

laik7k3q5#

如果你想把答案扩展到一个ndarray,并且仍然有一个快速的计算,你可以扩展丹尼尔的解决方案:

x_len = 500000
g_len = 200
y_len = 2

X = np.arange(x_len * y_len).reshape(x_len, y_len)
groups = np.random.randint(0, g_len, x_len)

# original
a = np.array([X[groups==i].sum(axis=0) for i in np.unique(groups)])

# alternative
bins = [0] + list(np.bincount(groups, minlength=g_len).cumsum())
Z = np.argsort(groups)
d = np.array([X.take(Z[bins[i]:bins[i+1]],0).sum(axis=0) for i in range(g_len)])

在本例中,它花费了大约30 ms(创建bin 15 ms+求和15 ms),而不是原始方式中的280 ms。

d.shape
>>> (1000, 2)

相关问题