测试环境按需动态加载(三)
#紧接上文:https://www.51niux.com/?id=329 我们已经手工创建网关文件并将入口流量进行按需路由
一、自动创建网关配置文件
#比如现在我们在web管理平台页面新增一个网关域名,或者是又新增了一个location,又或者是location改变了后端集群(比如集群合并了),那么都需要触发一个web接口去新增或者修改线下和沙箱网关的域名,这时候就需要提供一个web接口进行传参调用了。
1.1 创建模版文件
# cat /opt/web/pod_consul/template/gateway_service.conf #可见模版文件中只需要替换一个域名就可以了
server {
listen 80;
listen 443 ssl;
server_name Domain_Name;
# 强制HTTPS时开启
if ($scheme != "https") {
return 301 https://$host$request_uri;
}
#关于SSL部分的内容请参照前面的配置
# 指定 dnsmasq 的地址 127.0.0.1,优先解析 hosts 里的域名
resolver 127.0.0.1 valid=10s ipv6=off;
resolver_timeout 5s;
include /opt/soft/nginx/conf.d/location/Domain_Name/*.location.conf;
access_log /opt/log/nginx/Domain_Name/Domain_Name_access.log main;
error_log /opt/log/nginx/Domain_Name/Domain_Name_error.log error;
}# cat /opt/web/pod_consul/template/gateway_location.conf #驼峰式的变量就是需要替换的,值就来自于咱们前面插入的那些数据
location / {
# ===== 固定配置(直接写死)=====
set $demand_number "ReqId"; # 初始化默认值
set $trace_tag "ReqId";
set $client_ip ""; # 核心:初始化自定义变量client_ip
set $proxy_env "Env_Name"; # 环境(固定:offline/mirror)
set $stable_domain "ModuleName-Env_Name-ReqId.test.com"; # stable后端域名(固定)
set $module_name "ModuleName"; # 模块名(固定)
set $cache_expire 60;
# 执行核心业务逻辑(Lua会写入专属日志)
access_by_lua_file /opt/soft/nginx/conf.d/lua/ip_demand_query.lua;
# 域名拼接(直接使用Nginx固定变量)
if ($demand_number = "stable") {
set $proxy_domain $stable_domain;
}
if ($demand_number != "stable") {
set $proxy_domain "$module_name-$proxy_env-$trace_tag.test.com";
}
# 代理转发
proxy_pass https://$proxy_domain$request_uri;
proxy_set_header Host $proxy_domain;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_next_upstream http_502 error timeout invalid_header;
proxy_set_header X-Forwarded-Proto $scheme;
# 基础代理配置
proxy_connect_timeout 5s;
proxy_send_timeout 5s;
proxy_read_timeout 5s;
}1.2 基于域名的网关域名修改
博文来自:www.51niux.com
#pip install django djangorestframework pymysql paramiko
#cd /opt/web/pod_consul/ && django-admin startproject gateway_api
# vim pod_consul/settings.py
INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # 新增以下内容 'rest_framework', 'nginx_conf_app', 'gateway_api', ]
# vi pod_consul/urls.py
urlpatterns = [
......
# 挂载应用路由,前缀为 /gateway/
path('gateway/',include('gateway_api.urls')),
]# vi gateway_api/urls.py
from django.urls import path
from .views import GatewayConfigGenerateView
urlpatterns = [
# 接口访问路径:http://域名/gateway/generate-config/
path('generate-config/', GatewayConfigGenerateView.as_view(), name='generate_gateway_config'),
]#vi gateway_api/views.py
# -*- coding: utf-8 -*-
import os
import re
import paramiko
import pymysql
import logging
import logging.handlers
from datetime import datetime
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
# ==================== 本地配置(根据实际环境修改) ====================
TEMPLATE_DIR = "/opt/web/pod_consul/template" # 模板文件目录(干净的模板,无rewrite)
LOCAL_TEMP_DIR = "/opt/web/temp" # 本地临时目录
LOG_DIR = "/opt/web/pod_consul/logs" # 日志目录
LOG_FILE = os.path.join(LOG_DIR, "gateway_api.log")
# 自动创建目录
for dir_path in [LOCAL_TEMP_DIR, LOG_DIR]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# ==================== 日志配置 ====================
logger = logging.getLogger("gateway_api")
logger.setLevel(logging.DEBUG)
if not logger.handlers:
# 日志格式:时间 + 级别 + 内容
handler = logging.handlers.RotatingFileHandler(
LOG_FILE, maxBytes=50*1024*1024, backupCount=5, encoding="utf-8"
)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
# ==================== 数据库配置(根据实际环境修改) ====================
DB_CONFIG = {
"host": "192.168.1.101",
"port": 3306,
"user": "route",
"password": "route123456",
"database": "route",
"charset": "utf8mb4"
}
# ==================== 远端Nginx服务器配置(根据实际环境修改) ====================
REMOTE_NGINX_SERVERS = [
{
"name": "nginx-server-1",
"host": "192.168.1.102", # 远端Nginx服务器IP
"port": 22, # SSH端口
"username": "root", # SSH登录用户名
"key_filename": "~/.ssh/id_rsa",# 本地SSH私钥路径(优先用密钥,也可替换为password)
# "password": "your_ssh_password", # 若不用密钥,取消注释并填写密码
"remote_service_dir": "/opt/soft/nginx/conf.d", # 远端service.conf存放目录
"remote_location_root": "/opt/soft/nginx/conf.d/location", # 远端location目录根路径
"nginx_reload_cmd": "/opt/soft/nginx/sbin/nginx -s reload" # Nginx重载命令
}
]
# ==================== 核心工具函数 ====================
def is_valid_rewrite_rule(rule):
"""
校验rewrite规则是否有效:
1. 非None
2. 非字符串"NULL"(大小写不敏感)
3. 去除空格后非空
"""
if rule is None or (isinstance(rule, str) and rule.strip().upper() == "NULL"):
return False
if not isinstance(rule, str) or not rule.strip():
return False
return True
def ensure_remote_dir_exists(sftp, remote_dir):
"""
确保远端目录存在(SFTP方式创建,比SSH命令更可靠,且能立即生效)
:param sftp: paramiko的SFTP客户端对象
:param remote_dir: 远端目录路径
"""
try:
# 尝试列出目录,判断是否存在
sftp.listdir(remote_dir)
logger.info(f"远端目录【{remote_dir}】已存在")
except IOError:
# 目录不存在,递归创建(mkdir -p 效果)
# 拆分目录层级
dir_parts = remote_dir.split('/')
current_path = ""
for part in dir_parts:
if not part:
current_path += "/"
continue
current_path += part + "/"
try:
sftp.stat(current_path)
except IOError:
sftp.mkdir(current_path)
logger.info(f"创建远端目录:{current_path}")
logger.info(f"远端目录【{remote_dir}】创建成功")
class GatewayConfigGenerateView(APIView):
"""
核心功能:生成线下/沙箱网关的Nginx配置,仅当rewrite_rule有效时才添加rewrite行
传参:domain_name(线下网关域名)
"""
def post(self, request):
# 记录请求日志
logger.info(f"接收到配置生成请求,参数:{request.data}")
# 1. 基础参数验证
domain_name = request.data.get("domain_name")
if not domain_name or not isinstance(domain_name, str) or len(domain_name) > 255:
logger.error(f"参数错误:线下网关域名无效,传入值:{domain_name}")
return Response({
"code": 400,
"msg": "线下网关域名不能为空,且长度不能超过255个字符",
"data": None
}, status=status.HTTP_400_BAD_REQUEST)
try:
# 2. 数据库查询(获取线下网关+location+沙箱网关信息)
db_conn = pymysql.connect(**DB_CONFIG)
cursor = db_conn.cursor(pymysql.cursors.DictCursor)
# 查询线下网关关联的location信息
offline_sql = """
SELECT
o.id AS offline_gateway_id,
o.domain_name AS offline_domain,
l.location_path,
l.module_name,
l.cluster_name,
l.rewrite_rule
FROM offline_gateway o
JOIN offline_location l ON o.id = l.offline_gateway_id
WHERE o.domain_name = %s
ORDER BY l.location_path;
"""
cursor.execute(offline_sql, (domain_name,))
offline_location_list = cursor.fetchall()
# 无location配置直接返回
if not offline_location_list:
logger.warning(f"未查询到线下网关【{domain_name}】关联的location配置")
cursor.close()
db_conn.close()
return Response({
"code": 404,
"msg": f"未查询到线下网关【{domain_name}】关联的location配置",
"data": None
}, status=status.HTTP_404_NOT_FOUND)
# 查询关联的沙箱网关
mirror_sql = """
SELECT domain_name AS mirror_domain
FROM mirror_gateway
WHERE offline_gateway_id = %s;
"""
cursor.execute(mirror_sql, (offline_location_list[0]["offline_gateway_id"],))
mirror_gateway_list = cursor.fetchall()
# 关闭数据库连接
cursor.close()
db_conn.close()
logger.info(f"数据库查询完成:线下location数量={len(offline_location_list)},沙箱网关数量={len(mirror_gateway_list)}")
# 3. 整理需要生成配置的域名列表(线下+沙箱)
domain_list = []
# 添加线下网关
domain_list.append({
"domain": offline_location_list[0]["offline_domain"],
"domain_type": "offline",
"location_list": offline_location_list
})
# 添加沙箱网关
for mirror in mirror_gateway_list:
domain_list.append({
"domain": mirror["mirror_domain"],
"domain_type": "mirror",
"location_list": offline_location_list
})
# 4. 生成配置文件并上传到远端Nginx服务器
upload_results = []
for server in REMOTE_NGINX_SERVERS:
server_result = {
"server_name": server["name"],
"server_host": server["host"],
"status": "success",
"msg": "",
"data": {}
}
try:
logger.info(f"开始处理远端服务器:{server['name']}({server['host']})")
# 建立SSH连接
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_connect_kwargs = {
"hostname": server["host"],
"port": server["port"],
"username": server["username"]
}
if "key_filename" in server and server["key_filename"]:
ssh_connect_kwargs["key_filename"] = os.path.expanduser(server["key_filename"])
elif "password" in server and server["password"]:
ssh_connect_kwargs["password"] = server["password"]
ssh.connect(**ssh_connect_kwargs)
sftp = ssh.open_sftp()
logger.info(f"成功连接远端服务器:{server['host']}")
# 遍历每个域名生成配置
for domain_item in domain_list:
current_domain = domain_item["domain"]
domain_type = domain_item["domain_type"]
location_list = domain_item["location_list"]
server_result["data"][current_domain] = {
"service_conf": "",
"location_dir": "",
"location_files": []
}
logger.info(f"开始生成域名【{current_domain}】的配置文件")
# ========== 生成service.conf文件 ==========
service_template_path = os.path.join(TEMPLATE_DIR, "gateway_service.conf")
with open(service_template_path, "r", encoding="utf-8") as f:
service_content = f.read()
service_content = service_content.replace("Domain_Name", current_domain)
local_service_file = os.path.join(LOCAL_TEMP_DIR, f"{current_domain}.conf")
with open(local_service_file, "w", encoding="utf-8") as f:
f.write(service_content)
remote_service_file = os.path.join(server["remote_service_dir"], f"{current_domain}.conf")
sftp.put(local_service_file, remote_service_file)
os.remove(local_service_file)
server_result["data"][current_domain]["service_conf"] = remote_service_file
logger.info(f"成功上传service文件:{remote_service_file}")
# ========== 生成location.conf文件 ==========
# 远端location目录
remote_location_dir = os.path.join(server["remote_location_root"], current_domain)
# 用SFTP方式确保目录存在(立即生效,无时序问题)
ensure_remote_dir_exists(sftp, remote_location_dir)
server_result["data"][current_domain]["location_dir"] = remote_location_dir
# 按集群名分组location
cluster_loc_map = {}
for loc in location_list:
cluster_name = loc["cluster_name"]
if cluster_name not in cluster_loc_map:
cluster_loc_map[cluster_name] = []
cluster_loc_map[cluster_name].append(loc)
# 遍历每个集群生成带序号的location文件
for cluster_name, locs in cluster_loc_map.items():
for idx, loc in enumerate(locs, 1):
# 读取模板并替换变量
location_template_path = os.path.join(TEMPLATE_DIR, "gateway_location.conf")
with open(location_template_path, "r", encoding="utf-8") as f:
location_content = f.read()
location_content = location_content.replace("ReqId", "stable")
location_content = location_content.replace("Env_Name", domain_type)
location_content = location_content.replace("ModuleName", loc["module_name"])
# ========== 非/路径改为正则匹配(location ~) ==========
loc_path = loc["location_path"]
if loc_path != "/":
# 替换为正则匹配形式:location ~ 路径 {
# 注意:正则中的/无需转义,常规路径直接拼接即可
location_content = re.sub(
r"location / {",
f"location ~ {loc_path} {{", # 关键:添加~符号,改为正则匹配
location_content
)
logger.info(f"【{current_domain}】的location【{loc_path}】设置正则匹配形式:location ~ {loc_path} {{")
# 路径为/时,保留原普通匹配形式
else:
logger.info(f"【{current_domain}】的location【{loc_path}】保留普通匹配形式:location / {{")
# ========== rewrite规则插入到access_by_lua_file上方 ==========
rewrite_rule = loc["rewrite_rule"]
is_rewrite_valid = is_valid_rewrite_rule(rewrite_rule)
if is_rewrite_valid:
clean_rule = rewrite_rule.strip()
rewrite_line = f" rewrite {clean_rule} break;\n"
# 找到access_by_lua_file的位置,在其上方插入rewrite
lua_file_pos = location_content.find(" access_by_lua_file")
if lua_file_pos != -1:
location_content = location_content[:lua_file_pos] + rewrite_line + location_content[lua_file_pos:]
logger.info(f"【{current_domain}】的location【{loc['location_path']}】添加rewrite规则到access_by_lua_file上方:{clean_rule}")
# ========== 仅当有有效rewrite时,替换proxy_pass ==========
location_content = re.sub(
r"proxy_pass https://\$proxy_domain\$request_uri;",
"proxy_pass https://$proxy_domain$uri?$args;",
location_content
)
logger.info(f"【{current_domain}】的location【{loc['location_path']}】更新proxy_pass为:proxy_pass https://$proxy_domain$uri?$args;")
else:
# 若模板中无access_by_lua_file,仍插入到proxy_pass上方
proxy_pass_pos = location_content.find(" proxy_pass")
if proxy_pass_pos != -1:
location_content = location_content[:proxy_pass_pos] + rewrite_line + location_content[proxy_pass_pos:]
logger.warning(f"模板中未找到access_by_lua_file,为【{current_domain}】的location【{loc['location_path']}】添加rewrite规则到proxy_pass上方:{clean_rule}")
# ==========仅当有有效rewrite时,替换proxy_pass ==========
location_content = re.sub(
r"proxy_pass https://\$proxy_domain\$request_uri;",
"proxy_pass https://$proxy_domain$uri?$args;",
location_content
)
logger.info(f"【{current_domain}】的location【{loc['location_path']}】更新proxy_pass为:proxy_pass https://$proxy_domain$uri?$args;")
else:
logger.info(f"【{current_domain}】的location【{loc['location_path']}】rewrite规则无效(值:{rewrite_rule}),保留原始proxy_pass格式")
# 生成文件并上传
loc_filename = f"{cluster_name}-{idx:02d}.location.conf"
local_loc_file = os.path.join(LOCAL_TEMP_DIR, loc_filename)
with open(local_loc_file, "w", encoding="utf-8") as f:
f.write(location_content)
# 远端文件路径
remote_loc_file = os.path.join(remote_location_dir, loc_filename)
# 上传文件(添加异常捕获,确保上传失败能记录日志)
try:
sftp.put(local_loc_file, remote_loc_file)
logger.info(f"成功上传location文件:{remote_loc_file}")
except Exception as upload_err:
logger.error(f"上传location文件【{remote_loc_file}】失败:{str(upload_err)}")
raise upload_err # 抛出异常,让外层捕获
# 删除本地临时文件
os.remove(local_loc_file)
# 记录文件信息
server_result["data"][current_domain]["location_files"].append({
"filename": loc_filename,
"remote_path": remote_loc_file,
"location_path": loc["location_path"],
"rewrite_rule": rewrite_rule,
"is_rewrite_valid": is_rewrite_valid,
"location_match_type": "regex" if loc_path != "/" else "normal", # 标记匹配类型
"proxy_pass_format": "https://$proxy_domain$uri?$args;" if is_rewrite_valid else "https://$proxy_domain$request_uri;" # 新增:标记proxy_pass格式
})
# ========== 重载Nginx配置 ==========
logger.info(f"执行Nginx重载命令:{server['nginx_reload_cmd']}")
stdin, stdout, stderr = ssh.exec_command(server["nginx_reload_cmd"])
reload_stderr = stderr.read().decode().strip()
if reload_stderr:
server_result["msg"] = f"Nginx重载警告:{reload_stderr}"
logger.warning(f"服务器【{server['host']}】Nginx重载警告:{reload_stderr}")
# 关闭连接
sftp.close()
ssh.close()
logger.info(f"完成远端服务器【{server['host']}】的配置生成和上传")
except Exception as e:
server_result["status"] = "failed"
server_result["msg"] = str(e)
logger.error(f"处理远端服务器【{server['host']}】失败:{str(e)}", exc_info=True)
upload_results.append(server_result)
# 返回结果
logger.info(f"配置生成请求处理完成,线下网关域名:{domain_name}")
return Response({
"code": 200,
"msg": "配置生成并上传完成",
"data": {
"offline_domain": domain_name,
"mirror_domains": [m["mirror_domain"] for m in mirror_gateway_list],
"upload_results": upload_results
}
}, status=status.HTTP_200_OK)
except pymysql.MySQLError as e:
logger.error(f"数据库操作失败:{str(e)}", exc_info=True)
return Response({
"code": 500,
"msg": f"数据库操作失败:{str(e)}",
"data": None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"配置生成失败:{str(e)}", exc_info=True)
return Response({
"code": 500,
"msg": f"配置生成失败:{str(e)}",
"data": None
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)#nohup python3 manage.py runserver 0.0.0.0:8000 &
博文来自:www.51niux.com
1.3 调用下接口检查创建的文件
# curl -X POST http://127.0.0.1:8000/gateway/generate-config/ -H "Content-Type: application/json" -d '{"domain_name": "zhongtai.test.com"}'
# curl -X POST http://127.0.0.1:8000/gateway/generate-config/ -H "Content-Type: application/json" -d '{"domain_name": "houtai.test.com"}'
#可以去网关域名服务器查看一下conf.d下面是不是已经创建了线下和沙箱域名,然后location下面的域名下面是不是location文件也有了,然后要注意的是我的location文件都是集群名-01.location.conf,集群名-02.location.conf依次类推的形式,并且location /对应的集群必须是集群名-01.location.conf的命名形式,为啥要多加一个01/02依次累加呢,因为了兼顾一个集群可能会存在于多个location下面。
#然后你可以访问网关域名查看一下不同的location路径是不是到了指定集群下面,包括rewrite配置,可以看一个产生的location文件
# cat /opt/soft/nginx/conf.d/location/houtai.test.com/集群名-02.location.conf
location ~ /api/token/ {
# ===== 固定配置(直接写死)=====
set $demand_number "stable"; # 初始化默认值
set $trace_tag "stable";
set $client_ip ""; # 核心:初始化自定义变量client_ip
set $proxy_env "offline"; # 环境(固定:offline/mirror)
set $stable_domain "模块名-offline-stable.test.com"; # stable后端域名(固定)
set $module_name "模块名"; # 模块名(固定)
set $cache_expire 60;
# 执行核心业务逻辑(Lua会写入专属日志)
rewrite /api/(.*) /api/auth/$1 break;
access_by_lua_file /opt/soft/nginx/conf.d/lua/ip_demand_query.lua;
# 域名拼接(直接使用Nginx固定变量)
if ($demand_number = "stable") {
set $proxy_domain $stable_domain;
}
if ($demand_number != "stable") {
set $proxy_domain "$module_name-$proxy_env-$trace_tag.test.com";
}
# 代理转发
proxy_pass https://$proxy_domain$uri?$args;
proxy_set_header Host $proxy_domain;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_next_upstream http_502 error timeout invalid_header;
proxy_set_header X-Forwarded-Proto $scheme;
# 基础代理配置
proxy_connect_timeout 5s;
proxy_send_timeout 5s;
proxy_read_timeout 5s;
}#这里注意一下$request_uri和$uri的区别:
$request_uri:原始请求的完整路径 + 参数(不含锚点 #),只读、永不改变 $uri:Nginx 内部处理后的路径部分(无参数)
我们以https://houtai.test.com/api/token/dasdsada?a=123&b=456 请求举例:
$request_uri: 从网关nginx到后端nginx接收到的请求都是GET /api/token/dasdsada?a=123&b=456 #rewrite并没有生效参数也传递了
$uri: 网关是/api/token/dasdsada?a=123&b=456 后端Nginx是/api/auth/token/dasdsada #可以看到rewrite生效但是参数无法传递
$uri?$args:网关是/api/token/dasdsada?a=123&b=456 后端nginx是/api/auth/token/dasdsada?a=123&b=456 #可以看到rewrite生效参数也传递了
1.4 测试一下trace_tag是否实现了透传
#我们采用一个简单的方式,直接修改nginx的日志格式:
# ===== 自定义日志格式(包含常用Header + 核心变量)===== log_format custom_format '$remote_addr [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_real_ip" "$http_x_forwarded_for" ' # 客户端IP相关Header '"$http_host" "$http_trace_tag" ' # 自定义业务Header(按需改) '$request_uri $uri $args'; # 路径/参数变量(排查rewrite用)
#然后让我们测试的网关域名和后端nginx域名都引用这个日志格式,然后开始浏览器访问请求看日志输出:
#https://houtai.test.com/api/auth/token/dasdsada?a=123&b=456 #有rewrite的域名
# tail -f /opt/log/nginx/houtai.test.com/houtai.test.com_access.log
"houtai.test.com" "35184" /api/token/dasdsada?a=123&b=456 /api/auth/token/dasdsada a=123&b=456
# tail -f /opt/log/nginx/http-develop-offline-stable.test.com/http-develop-offline-stable.test.com_access.log #估计没创建35184的需求后端让其走稳定
"http-develop-offline-stable.test.com" "35184" /api/auth/token/dasdsada?a=123&b=456 /api/auth/token/dasdsada a=123&b=456
#从上面可以看到trace_tag这个自定义变量是往后透传的,虽然请求的是stable域名,但是trace_tag还是需求号,这样交给后端集群后还是tag染色的
#https://houtai.test.com/api/service/dadsada #再来一个没有rewrite的域名
# tail -f /opt/log/nginx/houtai.test.com/houtai.test.com_access.log
"houtai.test.com" "35184" /api/service/dadsada /api/service/dadsada -
# tail -f /opt/log/nginx/http-develop-offline-stable.test.com/http-develop-offline-stable.test.com_access.log
"http-develop-offline-stable.test.com" "35184" /api/service/dadsada /api/service/dadsada -
#重点说一下,可以看到trace_tag已经可以传递下去了,其实也可以再优化一下,比如你的需求环境和稳定环境有所区分,需求环境的location会赋值一个trace_tag的默认值,lua脚本做个判断如果是稳定环境再往header里面写trace_tag,如果是需求环境就直接用自己配置文件中固定好的trace_tag,这样能保证不会因为程序错误导致trace_tag赋值错了,因为既然都到需求环境的域名了,这trace_tag肯定就是需求号了。
#好了到此,我们按需环境从网关流量入口到后端域名创建以及与容器相关联就介绍完毕了,当然这只是一个大体的流程,细节点还需要自己完善,怎么跟平台结合使用还需要结合实际场景来,比如需求环境销毁后如何及时的清理consul和nginx中的无效配置文件。
