我使用lru-cache的速率限制作为Vercel的官方示例,它工作得很好。
但是,我不想在每条路由上都做try-catch,而是想在中间件中使用它来处理所有路由。但是当我在中间件中做同样的尝试时,它不起作用。该高速缓存_TOKEN在API路由中递增1,但在中间件中该值不变。
Vercel示例:https://github.com/vercel/next.js/tree/canary/examples/api-routes-rate-limit
SRC/ HELPERS/ RATELIMIT.JS
import LRU from 'lru-cache'
export default function rateLimit() {
const tokenCache = new LRU({
max: 500, // Max 500 users per second
ttl: 1 * 60000, // 1 minute in milliseconds
})
const token = 'CACHE_TOKEN'
const limit = 3
return {
check: (res) =>
new Promise((resolve, reject) => {
const tokenCount = tokenCache.get(token) || [0]
if (tokenCount[0] === 0) {
tokenCache.set(token, tokenCount)
}
tokenCount[0] += 1
const currentUsage = tokenCount[0]
const isRateLimited = currentUsage >= limit
res.headers.set('X-RateLimit-Limit', limit)
res.headers.set('X-RateLimit-Remaining', isRateLimited ? 0 : limit - currentUsage)
console.log(tokenCache.get(token))
/*
using api route: [ 1 ] [ 2 ] [ 3 ]
using middleware: [ 1 ] [ 1 ] [ 1 ] [ 1 ] [ 1 ] ...
*/
return isRateLimited ? reject() : resolve()
}),
}
}
SRC/PAGE/ API/ HELLO.JS
import { NextResponse } from 'next/server'
import rateLimit from '@/helpers/rateLimit'
const limiter = rateLimit()
export default async function handler(req, res) {
const response = NextResponse.next()
try {
await limiter.check(response) // 10 requests per minute
} catch (error) {
console.log(error)
return res.status(429).json({ error: 'Rate limit exceeded' })
}
return res.status(200).json({ message: 'Hello World' })
}
SRC/ MIDDLEWARE.JS
import { NextResponse } from 'next/server'
import rateLimit from '@/helpers/rateLimit'
const limiter = rateLimit()
export async function middleware(req) {
const response = NextResponse.next()
try {
await limiter.check(response) // 10 requests per minute
} catch (error) {
console.log(error)
return NextResponse.json({ error: 'Rate limit exceeded' })
}
}
1条答案
按热度按时间5jdjgkvh1#
所以我设法使它适应中间件的工作。
这是我的ratelimiter实现
这是我在中间件中使用它的方式