NextJS速率限制不适用于中间件

mgdq6dx1  于 12个月前  发布在  其他
关注(0)|答案(1)|浏览(117)

我使用lru-cache的速率限制作为Vercel的官方示例,它工作得很好。
但是,我不想在每条路由上都做try-catch,而是想在中间件中使用它来处理所有路由。但是当我在中间件中做同样的尝试时,它不起作用。该高速缓存_TOKEN在API路由中递增1,但在中间件中该值不变。

Vercel示例https://github.com/vercel/next.js/tree/canary/examples/api-routes-rate-limit
SRC/ HELPERS/ RATELIMIT.JS

import LRU from 'lru-cache'

export default function rateLimit() {
  const tokenCache = new LRU({
    max: 500, // Max 500 users per second
    ttl: 1 * 60000, // 1 minute in milliseconds
  })

  const token = 'CACHE_TOKEN'
  const limit = 3

  return {
    check: (res) =>
      new Promise((resolve, reject) => {
        const tokenCount = tokenCache.get(token) || [0]
        if (tokenCount[0] === 0) {
          tokenCache.set(token, tokenCount)
        }

        tokenCount[0] += 1
        const currentUsage = tokenCount[0]
        const isRateLimited = currentUsage >= limit

        res.headers.set('X-RateLimit-Limit', limit)
        res.headers.set('X-RateLimit-Remaining', isRateLimited ? 0 : limit - currentUsage)

        console.log(tokenCache.get(token))
        /* 
        using api route: [ 1 ] [ 2 ] [ 3 ] 
        using middleware: [ 1 ] [ 1 ] [ 1 ] [ 1 ] [ 1 ] ...
        */

        return isRateLimited ? reject() : resolve()
      }),
  }
}

SRC/PAGE/ API/ HELLO.JS

import { NextResponse } from 'next/server'
import rateLimit from '@/helpers/rateLimit'

const limiter = rateLimit()

export default async function handler(req, res) {
  const response = NextResponse.next()

  try {
    await limiter.check(response) // 10 requests per minute
  } catch (error) {
    console.log(error)
    return res.status(429).json({ error: 'Rate limit exceeded' })
  }

  return res.status(200).json({ message: 'Hello World' })
}

SRC/ MIDDLEWARE.JS

import { NextResponse } from 'next/server'
import rateLimit from '@/helpers/rateLimit'

const limiter = rateLimit()

export async function middleware(req) {
  const response = NextResponse.next()

  try {
    await limiter.check(response) // 10 requests per minute
  } catch (error) {
    console.log(error)
    return NextResponse.json({ error: 'Rate limit exceeded' })
  }
}
5jdjgkvh

5jdjgkvh1#

所以我设法使它适应中间件的工作。
这是我的ratelimiter实现

import { LRUCache } from 'lru-cache';

type Options = {
  uniqueTokenPerInterval?: number; // max number of unique tokens in the time period
  interval?: number; // interval in milliseconds
  limit: number; // max number of requests within interval
};

export default function rateLimit(options?: Options) {
  const tokenCache = new LRUCache({
    max: options?.uniqueTokenPerInterval || 50,
    ttl: options?.interval || 60 * 1000,
  });

  return {
    check: (token: string, limit = options?.limit || 100) => {
      const tokenCount = (tokenCache.get(token) as number[]) || [0];
      if (tokenCount[0] === 0) {
        tokenCache.set(token, tokenCount);
      }
      tokenCount[0] += 1;

      const currentUsage = tokenCount[0];
      const isRateLimited = currentUsage >= limit;

      return {
        isRateLimited,
        currentUsage,
        limit,
      };
    },
  };
}

这是我在中间件中使用它的方式

import rateLimit from 'lib/ratelimit';
import { NextResponse } from 'next/server';
import { getToken } from 'next-auth/jwt';
import { NextRequestWithAuth, withAuth } from 'next-auth/middleware';

const limiter = rateLimit({
  limit: 1000,
});

const middleware = async (req: NextRequestWithAuth) => {
  const token = await getToken({
    req,
  });

  const { access_token } = token ?? {};

  if (!access_token) {
    return new Response('Unauthorized', {
      status: 401,
      statusText: 'Unauthorized',
    });
  }

   const { isRateLimited, currentUsage, limit } = limiter.check(access_token);
  // console.log(`Rate limit: ${currentUsage}/${limit}`);

  if (isRateLimited) {
    return new Response('Rate limit reached', {
      status: 429,
      statusText: 'Too Many Requests',
    });
  }

 

  return NextResponse.next();
};

export default withAuth(middleware);

export const config = { matcher: ['/api/v1/:path*'] };

相关问题