场景:
限制最大连接数300,单url每秒最多处理100个请求,同客户端最大请求数不超过5
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
# 限制单个url每秒只能处理100个请求
limit_req_zone $uri zone=api_read:20m rate=100r/s;
# 限制单个服务器
limit_conn_zone $server_name zone=perserver:10m;
# 限制同一个客户端的请求数
limit_conn_zone $binary_remote_addr zone=perip:10m;
gzip on;
upstream web-app {
server localhost:8882;
}
server {
listen 8000;
limit_conn perip 5;
limit_conn perserver 300;
access_log logs/access.log;
error_log logs/error.log;
location ^~ /web-app/ {
proxy_pass $scheme://web-app;
#bust=20表示这个配置的意思是设置一个大小为5的缓冲区当有大量请求(爆发)过来时,超过了访问频次限制的请求可以先放到这个缓冲区内
limit_req zone=api_read burst=20 nodelay;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}