From dcfef61d47e7acee084b077fcd200b59c088d8e0 Mon Sep 17 00:00:00 2001 From: iSecloud <869820505@qq.com> Date: Wed, 20 Nov 2024 20:18:52 +0800 Subject: [PATCH] =?UTF-8?q?fix(backend):=20=E5=8D=95=E6=8D=AE=E6=A0=87?= =?UTF-8?q?=E5=87=86=E5=8C=96=E5=8D=8F=E8=AE=AE=E4=BF=AE=E6=94=B9=20#7747?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backend/components/dbresource/client.py | 3 + dbm-ui/backend/db_dirty/handlers.py | 182 +----------------- dbm-ui/backend/db_dirty/models.py | 21 +- dbm-ui/backend/db_meta/api/machine/apis.py | 6 +- ..._20241014_1042_0043_auto_20241015_2128.py} | 2 +- dbm-ui/backend/db_meta/models/cluster.py | 15 +- dbm-ui/backend/db_package/views.py | 6 +- .../db_services/dbresource/serializers.py | 9 +- .../mysql/fixpoint_rollback/views.py | 16 +- dbm-ui/backend/db_services/tag/filters.py | 5 + .../backend/db_services/taskflow/handlers.py | 8 - .../mysql_migrate_cluster_remote_flow.py | 2 +- .../flow/utils/spider/spider_db_meta.py | 2 +- dbm-ui/backend/ticket/builders/__init__.py | 22 ++- dbm-ui/backend/ticket/builders/common/base.py | 7 +- .../ticket/builders/doris/doris_shrink.py | 6 +- .../backend/ticket/builders/es/es_shrink.py | 6 +- .../ticket/builders/hdfs/hdfs_shrink.py | 2 +- .../ticket/builders/kafka/kafka_shrink.py | 2 +- .../builders/mysql/mysql_fixpoint_rollback.py | 15 +- .../builders/mysql/mysql_migrate_cluster.py | 32 +-- .../builders/mysql/mysql_migrate_upgrade.py | 90 ++++++--- .../builders/mysql/mysql_restore_slave.py | 2 +- .../ticket/builders/pulsar/pulsar_shrink.py | 6 +- dbm-ui/backend/ticket/builders/redis/base.py | 17 +- .../builders/redis/redis_toolbox_cut_off.py | 1 - .../redis_toolbox_datastruct_task_delete.py | 26 +-- .../redis/redis_toolbox_proxy_scale_down.py | 32 +-- .../redis/redis_toolbox_shard_update.py | 3 + .../redis/redis_toolbox_type_update.py | 5 +- .../builders/sqlserver/sqlserver_destroy.py | 2 +- .../sqlserver/sqlserver_restore_slave.py | 2 +- .../ticket/builders/tendbcluster/base.py | 12 +- .../tendbcluster/tendb_fixpoint_rollback.py | 29 ++- .../builders/tendbcluster/tendb_mnt_apply.py | 2 +- .../tendbcluster/tendb_mnt_destroy.py | 8 +- .../tendbcluster/tendb_node_reblance.py | 3 + .../tendbcluster/tendb_spider_reduce_nodes.py | 8 +- dbm-ui/backend/ticket/models/ticket.py | 40 ++-- dbm-ui/backend/ticket/signals.py | 8 +- 40 files changed, 326 insertions(+), 339 deletions(-) rename dbm-ui/backend/db_meta/migrations/{0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py => 0045_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py} (82%) diff --git a/dbm-ui/backend/components/dbresource/client.py b/dbm-ui/backend/components/dbresource/client.py index 54d01528b6..dd5ae1322b 100644 --- a/dbm-ui/backend/components/dbresource/client.py +++ b/dbm-ui/backend/components/dbresource/client.py @@ -106,6 +106,9 @@ def __init__(self): self.resource_summary = self.generate_data_api( method="POST", url="/statistic/summary", description=_("按照条件聚合资源统计") ) + self.resource_label_count = self.generate_data_api( + method="POST", url="/groupby/label/count", description=_("按照标签统计资源数量") + ) DBResourceApi = _DBResourceApi() diff --git a/dbm-ui/backend/db_dirty/handlers.py b/dbm-ui/backend/db_dirty/handlers.py index fc2004179a..2fe7c00d20 100644 --- a/dbm-ui/backend/db_dirty/handlers.py +++ b/dbm-ui/backend/db_dirty/handlers.py @@ -10,28 +10,18 @@ """ import itertools import logging -from collections import defaultdict -from typing import Any, Dict, List +from typing import List from django.utils.translation import ugettext as _ -from backend import env -from backend.components import CCApi -from backend.configuration.constants import SystemSettingsEnum -from backend.configuration.models import SystemSettings from backend.db_dirty.constants import MachineEventType, PoolType from backend.db_dirty.exceptions import PoolTransferException from backend.db_dirty.models import DirtyMachine, MachineEvent -from backend.db_meta.models import AppCache -from backend.db_services.ipchooser.constants import IDLE_HOST_MODULE -from backend.db_services.ipchooser.handlers.topo_handler import TopoHandler -from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.flow.consts import FAILED_STATES from backend.flow.utils.cc_manage import CcManage from backend.ticket.builders import BuilderFactory from backend.ticket.builders.common.base import fetch_apply_hosts from backend.ticket.models import Flow, Ticket -from backend.utils.batch_request import request_multi_thread logger = logging.getLogger("root") @@ -67,172 +57,14 @@ def transfer_hosts_to_pool(cls, operator: str, bk_host_ids: List[int], source: P raise PoolTransferException(_("{}--->{}转移不合法").format(source, target)) @classmethod - def query_dirty_machine_records(cls, bk_host_ids: List[int]): - """ - 查询污点池主机信息 TODO: 污点池废弃,代码将被移除 - @param bk_host_ids: 主机列表 - """ - - def get_module_data(data): - params, res = data - params = params["params"] - return [{"bk_biz_id": params["bk_biz_id"], **d} for d in res] - - if not bk_host_ids: - return [] - - # 如果传入的列表已经是DirtyMachine,则直接用 - if not isinstance(bk_host_ids[0], DirtyMachine): - dirty_machines = DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids) - else: - dirty_machines = bk_host_ids - bk_host_ids = [dirty.bk_host_id for dirty in dirty_machines] - - # 缓存云区域和业务信息 - bk_biz_ids = [dirty_machine.bk_biz_id for dirty_machine in dirty_machines] - for_biz_infos = AppCache.batch_get_app_attr(bk_biz_ids=bk_biz_ids, attr_name="bk_biz_name") - cloud_info = ResourceQueryHelper.search_cc_cloud(get_cache=True) - - # 查询污点主机当前所处的模块 - host_topo_infos = CCApi.find_host_biz_relations(params={"bk_host_id": bk_host_ids}) - host__topo_info_map: Dict[int, List] = defaultdict(list) - biz__modules_map: Dict[int, List] = defaultdict(list) - for topo in host_topo_infos: - host__topo_info_map[topo["bk_host_id"]].append(topo) - biz__modules_map[topo["bk_biz_id"]].append(topo["bk_module_id"]) - # 批量获取业务下模块信息 - module_infos = request_multi_thread( - func=CCApi.find_module_batch, - params_list=[ - { - "params": {"bk_biz_id": biz, "bk_ids": modules, "fields": ["bk_module_id", "bk_module_name"]}, - "use_admin": True, - } - for biz, modules in biz__modules_map.items() - ], - get_data=get_module_data, - in_order=True, - ) - module_infos = list(itertools.chain(*module_infos)) - biz__module__module_name: Dict[int, Dict[int, str]] = defaultdict(dict) - for info in module_infos: - biz__module__module_name[info["bk_biz_id"]][info["bk_module_id"]] = info["bk_module_name"] - - # 获取污点池模块 - system_manage_topo = SystemSettings.get_setting_value(key=SystemSettingsEnum.MANAGE_TOPO.value) - dirty_module = system_manage_topo["dirty_module_id"] - - # 获取污点池列表信息 - dirty_machine_list: List[Dict] = [] - for dirty in dirty_machines: - # 填充污点池主机基础信息 - dirty_machine_info = { - "ip": dirty.ip, - "bk_host_id": dirty.bk_host_id, - "bk_cloud_name": cloud_info[str(dirty.bk_cloud_id)]["bk_cloud_name"], - "bk_cloud_id": dirty.bk_cloud_id, - "bk_biz_name": for_biz_infos[int(dirty.bk_biz_id)], - "bk_biz_id": dirty.bk_biz_id, - "ticket_type": dirty.ticket.ticket_type, - "ticket_id": dirty.ticket.id, - "ticket_type_display": dirty.ticket.get_ticket_type_display(), - "task_id": dirty.flow.flow_obj_id, - "operator": dirty.ticket.creator, - "is_dirty": True, - } - - # 如果主机已经不存在于cc,则仅能删除记录 - if dirty.bk_host_id not in host__topo_info_map: - dirty_machine_info.update(is_dirty=False) - dirty_machine_list.append(dirty_machine_info) - continue - - # 补充主机所在的模块信息 - host_in_module = [ - { - "bk_module_id": h["bk_module_id"], - "bk_module_name": biz__module__module_name[h["bk_biz_id"]].get(h["bk_module_id"], ""), - } - for h in host__topo_info_map[dirty.bk_host_id] - ] - dirty_machine_info.update(bk_module_infos=host_in_module) - - # 如果主机 不处于/不仅仅处于【污点池】中,则不允许移入待回收 - host = host__topo_info_map[dirty.bk_host_id][0] - if len(host__topo_info_map[dirty.bk_host_id]) > 1: - dirty_machine_info.update(is_dirty=False) - elif host["bk_biz_id"] != env.DBA_APP_BK_BIZ_ID or host["bk_module_id"] != dirty_module: - dirty_machine_info.update(is_dirty=False) - - dirty_machine_list.append(dirty_machine_info) - - dirty_machine_list.sort(key=lambda x: x["ticket_id"], reverse=True) - return dirty_machine_list - - @classmethod - def insert_dirty_machines(cls, bk_biz_id: int, bk_host_ids: List[Dict[str, Any]], ticket: Ticket, flow: Flow): + def handle_dirty_machine(cls, ticket_id, root_id, origin_tree_status, target_tree_status): """ - 将机器导入到污点池中 TODO: 污点池废弃,代码将被移除 - @param bk_biz_id: 业务ID - @param bk_host_ids: 主机列表 - @param ticket: 关联的单据 - @param flow: 关联的flow任务 + 处理执行失败/重试成功涉及的污点池机器 + @param ticket_id: 单据ID + @param root_id: 流程ID + @param origin_tree_status: 流程源状态 + @param target_tree_status: 流程目标状态 """ - # 查询污点机器信息 - host_property_filter = { - "condition": "AND", - "rules": [{"field": "bk_host_id", "operator": "in", "value": bk_host_ids}], - } - dirty_host_infos = CCApi.list_hosts_without_biz( - { - # 默认一次性录入的机器不会超过500 - "page": {"start": 0, "limit": 500, "sort": "bk_host_id"}, - "host_property_filter": host_property_filter, - "fields": ["bk_host_id", "bk_cloud_id", "bk_host_innerip"], - }, - use_admin=True, - )["info"] - - # 获取业务空闲机模块,资源池模块和污点池模块 - idle_module = CcManage(bk_biz_id, "").get_biz_internal_module(bk_biz_id)[IDLE_HOST_MODULE]["bk_module_id"] - system_manage_topo = SystemSettings.get_setting_value(key=SystemSettingsEnum.MANAGE_TOPO.value) - resource_module, dirty_module = system_manage_topo["resource_module_id"], system_manage_topo["dirty_module_id"] - # 获取主机的拓扑信息(注:这里不能带上业务信息,因为主机可能转移业务) - host_topo_infos = TopoHandler.query_host_set_module(bk_host_ids=bk_host_ids)["hosts_topo_info"] - # 将污点机器信息转移至DBA污点池模(如果污点机器不在空闲机/资源池,则放弃转移,认为已到正确拓扑) - transfer_host_ids = [ - info["bk_host_id"] - for info in host_topo_infos - if not set(info["bk_module_ids"]) - {resource_module, idle_module} - ] - if transfer_host_ids: - update_host_properties = {"dbm_meta": [], "need_monitor": False, "update_operator": False} - CcManage(bk_biz_id=env.DBA_APP_BK_BIZ_ID, cluster_type="").transfer_host_module( - transfer_host_ids, target_module_ids=[dirty_module], update_host_properties=update_host_properties - ) - - # 录入污点池表中 - exist_dirty_machine_ids = list( - DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids).values_list("bk_host_id", flat=True) - ) - DirtyMachine.objects.bulk_create( - [ - DirtyMachine( - ticket=ticket, - flow=flow, - ip=host["bk_host_innerip"], - bk_biz_id=bk_biz_id, - bk_host_id=host["bk_host_id"], - bk_cloud_id=host["bk_cloud_id"], - ) - for host in dirty_host_infos - if host["bk_host_id"] not in exist_dirty_machine_ids - ] - ) - - @classmethod - def handle_dirty_machine(cls, ticket_id, root_id, origin_tree_status, target_tree_status): - """处理执行失败/重试成功涉及的污点池机器""" if (origin_tree_status not in FAILED_STATES) and (target_tree_status not in FAILED_STATES): return diff --git a/dbm-ui/backend/db_dirty/models.py b/dbm-ui/backend/db_dirty/models.py index f083086e6e..aded49753e 100644 --- a/dbm-ui/backend/db_dirty/models.py +++ b/dbm-ui/backend/db_dirty/models.py @@ -59,15 +59,18 @@ def hosts_pool_transfer(cls, bk_biz_id, hosts, pool, operator="", ticket=None): host_ids = [host["bk_host_id"] for host in hosts] # 主机转入污点/故障池,说明第一次被纳管到池 - if pool in [PoolType.Fault, PoolType.Dirty]: - hosts_pool = [ - cls(bk_biz_id=bk_biz_id, pool=pool, ticket=ticket, creator=operator, updater=operator, **host) - for host in hosts - ] - cls.objects.bulk_create(hosts_pool) - # 待回收只会从故障池转移 - elif pool == PoolType.Recycle: - cls.objects.filter(bk_host_id__in=host_ids).update(pool=pool, ticket=ticket) + # 待回收会从故障池、资源池转移 + # 因此这里判断主机不存在就创建,否则更新 + if pool in [PoolType.Fault, PoolType.Dirty, PoolType.Recycle]: + handle_hosts = cls.objects.filter(bk_host_id__in=host_ids) + if handle_hosts.count() == len(host_ids): + handle_hosts.update(pool=pool, ticket=ticket) + else: + handle_hosts = [ + cls(bk_biz_id=bk_biz_id, pool=pool, ticket=ticket, creator=operator, updater=operator, **host) + for host in hosts + ] + cls.objects.bulk_create(handle_hosts) # 回收机器只能从待回收转移,删除池纳管记录 # 重导入回资源池,删除池纳管记录 elif pool in [PoolType.Recycled, PoolType.Resource]: diff --git a/dbm-ui/backend/db_meta/api/machine/apis.py b/dbm-ui/backend/db_meta/api/machine/apis.py index 7412c8717b..7737fe31da 100644 --- a/dbm-ui/backend/db_meta/api/machine/apis.py +++ b/dbm-ui/backend/db_meta/api/machine/apis.py @@ -193,14 +193,16 @@ def clear_info_for_machine(machines: Optional[List]): # 清理proxy相关信息 for p in proxys: - p.tendbclusterspiderext.delete() + if hasattr(p, "tendbclusterspiderext"): + p.tendbclusterspiderext.delete() p.delete(keep_parents=True) # 清理storage相关信息 for s in storages: for info in StorageInstanceTuple.objects.filter(Q(ejector=s) | Q(receiver=s)): # 先删除额外关联信息,否则会报ProtectedError 异常 - info.tendbclusterstorageset.delete() + if hasattr(info, "tendbclusterstorageset"): + info.tendbclusterstorageset.delete() info.delete() s.delete(keep_parents=True) machine.delete(keep_parents=True) diff --git a/dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py b/dbm-ui/backend/db_meta/migrations/0045_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py similarity index 82% rename from dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py rename to dbm-ui/backend/db_meta/migrations/0045_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py index c6fe2d2ee4..b341e2d078 100644 --- a/dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py +++ b/dbm-ui/backend/db_meta/migrations/0045_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ("db_meta", "0043_auto_20241014_1042"), + ("db_meta", "0044_deviceclass"), ("db_meta", "0043_auto_20241015_2128"), ] diff --git a/dbm-ui/backend/db_meta/models/cluster.py b/dbm-ui/backend/db_meta/models/cluster.py index 03c18c4c58..6c5f04acb0 100644 --- a/dbm-ui/backend/db_meta/models/cluster.py +++ b/dbm-ui/backend/db_meta/models/cluster.py @@ -82,7 +82,10 @@ def __str__(self): def to_dict(self): """将集群所有字段转为字段""" - return {**model_to_dict(self), "cluster_type_name": str(ClusterType.get_choice_label(self.cluster_type))} + return { + **model_to_dict(self, exclude=["tags"]), + "cluster_type_name": str(ClusterType.get_choice_label(self.cluster_type)), + } @property def simple_desc(self): @@ -402,16 +405,18 @@ def enable_dbha(self): self.refresh_from_db() @classmethod - def get_cluster_related_machines(cls, cluster_ids: List[int]) -> List: + def get_cluster_related_machines(cls, cluster_ids: List[int], role: str = None) -> List: """ 通过集群id查询集群关联的所有主机信息,即实例所在的主机 """ from backend.db_meta.models import Machine clusters = Cluster.objects.filter(id__in=cluster_ids) - host_ids = set(clusters.values_list("storageinstance__machine__bk_host_id", flat=True)) | set( - clusters.values_list("proxyinstance__machine__bk_host_id", flat=True) - ) + host_ids = set() + if not role or role == "backend": + host_ids |= set(clusters.values_list("storageinstance__machine__bk_host_id", flat=True)) + if not role or role == "proxy": + host_ids |= set(clusters.values_list("proxyinstance__machine__bk_host_id", flat=True)) machines = Machine.objects.filter(bk_host_id__in=host_ids) return machines diff --git a/dbm-ui/backend/db_package/views.py b/dbm-ui/backend/db_package/views.py index 9bf4e6be2e..61d7688a34 100644 --- a/dbm-ui/backend/db_package/views.py +++ b/dbm-ui/backend/db_package/views.py @@ -33,6 +33,7 @@ SyncMediumSerializer, UploadPackageSerializer, ) +from backend.exceptions import ApiRequestError from backend.flow.consts import MediumEnum from backend.iam_app.dataclass import ResourceEnum from backend.iam_app.dataclass.actions import ActionEnum @@ -168,7 +169,10 @@ def partial_update(self, request, *args, **kwargs): ) def destroy(self, request, *args, **kwargs): # 删除制品库文件 - StorageHandler().delete_file(self.get_object().path) + try: + StorageHandler().delete_file(self.get_object().path) + except ApiRequestError as e: + logger.error(_("文件删除异常,错误信息: {}").format(e)) # 删除本地记录 super().destroy(request, *args, **kwargs) return Response() diff --git a/dbm-ui/backend/db_services/dbresource/serializers.py b/dbm-ui/backend/db_services/dbresource/serializers.py index 9fdb87ce72..d92b6f0407 100644 --- a/dbm-ui/backend/db_services/dbresource/serializers.py +++ b/dbm-ui/backend/db_services/dbresource/serializers.py @@ -19,7 +19,7 @@ from backend.db_meta.enums import InstanceRole from backend.db_meta.enums.spec import SpecClusterType, SpecMachineType from backend.db_meta.models import Spec -from backend.db_meta.models.machine import DeviceClass +from backend.db_meta.models.machine import DeviceClass, Machine from backend.db_services.dbresource import mock from backend.db_services.dbresource.constants import ResourceGroupByEnum, ResourceOperation from backend.db_services.dbresource.mock import ( @@ -47,6 +47,13 @@ class HostInfoSerializer(serializers.Serializer): hosts = serializers.ListSerializer(help_text=_("主机"), child=HostInfoSerializer()) labels = serializers.ListField(help_text=_("标签"), child=serializers.CharField(), required=False) + def validate(self, attrs): + # 如果主机存在源数据,则拒绝导入 + host_ids = [host["host_id"] for host in attrs["hosts"]] + exist_hosts = list(Machine.objects.filter(bk_host_id__in=host_ids).values_list("ip", flat=True)) + if exist_hosts: + raise serializers.ValidationError(_("导入主机{}存在元数据,请检查后重新导入").format(exist_hosts)) + class ResourceApplySerializer(serializers.Serializer): class HostDetailSerializer(serializers.Serializer): diff --git a/dbm-ui/backend/db_services/mysql/fixpoint_rollback/views.py b/dbm-ui/backend/db_services/mysql/fixpoint_rollback/views.py index ddea2c285f..d07983cd4c 100644 --- a/dbm-ui/backend/db_services/mysql/fixpoint_rollback/views.py +++ b/dbm-ui/backend/db_services/mysql/fixpoint_rollback/views.py @@ -108,7 +108,7 @@ def query_fixpoint_log(self, requests, *args, **kwargs): limit, offset = validated_data["limit"], validated_data["offset"] # 查询目前定点回档临时集群 - tmp_clusters = Cluster.objects.filter(cluster_type=ClusterType.TenDBCluster, tag__name=SystemTagEnum.TEMPORARY) + tmp_clusters = Cluster.objects.filter(cluster_type=ClusterType.TenDBCluster, tags__key=SystemTagEnum.TEMPORARY) tmp_clusters_count = tmp_clusters.count() # 查询定点回档记录 temp_clusters = tmp_clusters[offset : limit + offset] @@ -120,18 +120,26 @@ def query_fixpoint_log(self, requests, *args, **kwargs): fixpoint_logs: List[Dict[str, Any]] = [] for record in records: ticket_data = record.ticket.details["infos"][0] - clusters = record.ticket.details["clusters"] target_cluster = Cluster.objects.get(id=record.cluster_id) + # 获取回档的节点信息 + if "rollback_host" in ticket_data: + rollback_host = ticket_data["rollback_host"] + else: + rollback_host = { + "remote_hosts": ticket_data["resource_spec"]["remote_hosts"]["hosts"], + "spider_host": ticket_data["resource_spec"]["spider_host"]["hosts"][0], + } + # 填充回档记录 fixpoint_logs.append( { "databases": ticket_data["databases"], "databases_ignore": ticket_data["databases_ignore"], "tables": ticket_data["tables"], "tables_ignore": ticket_data["tables_ignore"], - "source_cluster": clusters[str(ticket_data["cluster_id"])], + "source_cluster": record.ticket.details["clusters"][str(ticket_data["cluster_id"])], "target_cluster": { "cluster_id": record.cluster_id, - "nodes": ticket_data["rollback_host"], + "nodes": rollback_host, "status": target_cluster.status, "phase": target_cluster.phase, "operations": ClusterOperateRecord.objects.get_cluster_operations(record.cluster_id), diff --git a/dbm-ui/backend/db_services/tag/filters.py b/dbm-ui/backend/db_services/tag/filters.py index 07118a040a..e4b31c3feb 100644 --- a/dbm-ui/backend/db_services/tag/filters.py +++ b/dbm-ui/backend/db_services/tag/filters.py @@ -18,6 +18,7 @@ class TagListFilter(filters.FilterSet): key = filters.CharFilter(field_name="key", lookup_expr="icontains", label=_("键")) value = filters.CharFilter(field_name="value", lookup_expr="icontains", label=_("值")) + ids = filters.CharFilter(field_name="ids", method="filter_ids", label=_("tag id列表")) class Meta: model = Tag @@ -25,3 +26,7 @@ class Meta: "bk_biz_id": ["exact"], "type": ["exact"], } + + def filter_ids(self, queryset, name, value): + ids = list(map(int, value.split(","))) + return queryset.filter(id__in=ids) diff --git a/dbm-ui/backend/db_services/taskflow/handlers.py b/dbm-ui/backend/db_services/taskflow/handlers.py index 359505ef89..62b9715318 100644 --- a/dbm-ui/backend/db_services/taskflow/handlers.py +++ b/dbm-ui/backend/db_services/taskflow/handlers.py @@ -26,7 +26,6 @@ from backend import env from backend.bk_web.constants import LogLevelName from backend.components import BKLogApi -from backend.db_services.dbbase.constants import IpDest from backend.db_services.taskflow import task from backend.db_services.taskflow.constants import LOG_START_STRIP_PATTERN from backend.db_services.taskflow.exceptions import ( @@ -38,7 +37,6 @@ from backend.flow.consts import StateType from backend.flow.engine.bamboo.engine import BambooEngine from backend.flow.models import FlowNode, FlowTree -from backend.ticket.models import Ticket from backend.utils.string import format_json_string from backend.utils.time import calculate_cost_time, datetime2str @@ -70,12 +68,6 @@ def revoke_pipeline(self): for node_id in running_node_ids: bamboo_engine.runtime.set_state(node_id=node_id, to_state=StateType.REVOKED) - # 非单据类任务,直接返回 - if not tree.uid: - return result - - # 回收单据涉及的新机到资源池 - Ticket.create_recycle_ticket(ticket_id=tree.uid, ip_dest=IpDest.Resource) return result def retry_node(self, node: str): diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py index b981566c42..7d21403d1f 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py @@ -234,7 +234,7 @@ def migrate_cluster_flow(self, use_for_upgrade=False): logger.error("cluster {} backup info not exists".format(cluster_model.id)) raise TendbGetBackupInfoFailedException(message=_("获取集群 {} 的备份信息失败".format(cluster_id))) cluster["backupinfo"] = backup_info - cluster["new_master_ip"] = self.data["new_master_ ip"] + cluster["new_master_ip"] = self.data["new_master_ip"] cluster["new_slave_ip"] = self.data["new_slave_ip"] cluster["new_master_port"] = master_model.port cluster["new_slave_port"] = master_model.port diff --git a/dbm-ui/backend/flow/utils/spider/spider_db_meta.py b/dbm-ui/backend/flow/utils/spider/spider_db_meta.py index e57ae60d31..1e5fccda33 100644 --- a/dbm-ui/backend/flow/utils/spider/spider_db_meta.py +++ b/dbm-ui/backend/flow/utils/spider/spider_db_meta.py @@ -111,7 +111,7 @@ def add_spider_nodes(self, spider_role: Optional[TenDBClusterSpiderRole], domain "domain": domain, "add_spiders": self.global_data["spider_ip_list"], "spider_role": spider_role, - "resource_spec": self.global_data.get("resource_spec", default_spider_spec), + "resource_spec": self.global_data.get("resource_spec") or default_spider_spec, "is_slave_cluster_create": False, } TenDBClusterClusterHandler.add_spiders(**kwargs) diff --git a/dbm-ui/backend/ticket/builders/__init__.py b/dbm-ui/backend/ticket/builders/__init__.py index 58badc4a43..cc07c337b1 100644 --- a/dbm-ui/backend/ticket/builders/__init__.py +++ b/dbm-ui/backend/ticket/builders/__init__.py @@ -254,8 +254,18 @@ class RecycleParamBuilder(FlowParamBuilder): """ controller_map = { - DBType.MySQL.value: "MySQLController.mysql_machine_clear_scene", - DBType.TenDBCluster.value: "MySQLController.mysql_machine_clear_scene", + DBType.MySQL.value: "mysql.MySQLController.mysql_machine_clear_scene", + DBType.TenDBCluster.value: "spider.SpiderController.tendbcluster_machine_clear_scene", + DBType.Doris.value: "doris.DorisController.doris_machine_clear_scene", + DBType.Kafka.value: "kafka.KafkaController.kafka_machine_clear_scene", + DBType.Es.value: "es.EsController.es_machine_clear_scene", + DBType.Hdfs.value: "hdfs.HdfsController.hdfs_machine_clear_scene", + DBType.Pulsar.value: "pulsar.PulsarController.pulsar_machine_clear_scene", + DBType.Vm.value: "vm.VmController.vm_machine_clear_scene", + # TODO redis,sqlserver,mongo清理流程暂时没有 + DBType.Redis.value: "", + DBType.Sqlserver.value: "", + DBType.MongoDB.value: "", } def __init__(self, ticket: Ticket): @@ -265,9 +275,13 @@ def __init__(self, ticket: Ticket): def build_controller_info(self) -> dict: db_type = self.ticket_data["db_type"] - class_name, flow_name = self.controller_map[db_type].split(".") - module = importlib.import_module(f"backend.flow.engine.controller.{db_type}") + # TODO: 暂时兼容没有清理流程的组件,默认用mysql + clear_db_type = db_type if self.controller_map.get(db_type) else DBType.MySQL.value + + file_name, class_name, flow_name = self.controller_map[clear_db_type].split(".") + module = importlib.import_module(f"backend.flow.engine.controller.{file_name}") self.controller = getattr(getattr(module, class_name), flow_name) + return super().build_controller_info() def format_ticket_data(self): diff --git a/dbm-ui/backend/ticket/builders/common/base.py b/dbm-ui/backend/ticket/builders/common/base.py index d79a7289cd..3ec80f386e 100644 --- a/dbm-ui/backend/ticket/builders/common/base.py +++ b/dbm-ui/backend/ticket/builders/common/base.py @@ -137,7 +137,8 @@ class InstanceInfoSerializer(HostInfoSerializer): class HostRecycleSerializer(serializers.Serializer): """主机回收信息""" - DEFAULT = {"for_biz": PLAT_BIZ_ID, "ip_dest": IpDest.Resource} + DEFAULT = {"for_biz": PLAT_BIZ_ID, "ip_dest": IpDest.Resource.value} + FAULT_DEFAULT = {"for_biz": PLAT_BIZ_ID, "ip_dest": IpDest.Fault.value} for_biz = serializers.IntegerField(help_text=_("目标业务"), required=False, default=PLAT_BIZ_ID) ip_dest = serializers.ChoiceField(help_text=_("机器流向"), choices=IpDest.get_choices(), default=IpDest.Fault) @@ -507,10 +508,10 @@ def patch_recycle_host_details(self): return self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host(recycle_hosts, bk_biz_id) - def patch_recycle_cluster_details(self): + def patch_recycle_cluster_details(self, role=None): """补充集群下架后回收主机信息,在下架类单据一定调用此方法""" bk_biz_id = self.ticket.bk_biz_id - recycle_hosts = Cluster.get_cluster_related_machines(fetch_cluster_ids(self.ticket.details)) + recycle_hosts = Cluster.get_cluster_related_machines(fetch_cluster_ids(self.ticket.details), role) recycle_hosts = [{"bk_host_id": host.bk_host_id} for host in recycle_hosts] self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host(recycle_hosts, bk_biz_id) diff --git a/dbm-ui/backend/ticket/builders/doris/doris_shrink.py b/dbm-ui/backend/ticket/builders/doris/doris_shrink.py index 225d4a6c4b..9072725a57 100644 --- a/dbm-ui/backend/ticket/builders/doris/doris_shrink.py +++ b/dbm-ui/backend/ticket/builders/doris/doris_shrink.py @@ -40,9 +40,9 @@ def validate(self, attrs): super().validate(attrs) role_hash = { - InstanceRole.DORIS_BACKEND_HOT: attrs["nodes"]["hot"], - InstanceRole.DORIS_BACKEND_COLD: attrs["nodes"]["cold"], - InstanceRole.DORIS_OBSERVER: attrs["nodes"]["observer"], + InstanceRole.DORIS_BACKEND_HOT: attrs["old_nodes"]["hot"], + InstanceRole.DORIS_BACKEND_COLD: attrs["old_nodes"]["cold"], + InstanceRole.DORIS_OBSERVER: attrs["old_nodes"]["observer"], } cluster = Cluster.objects.get(id=attrs["cluster_id"]) diff --git a/dbm-ui/backend/ticket/builders/es/es_shrink.py b/dbm-ui/backend/ticket/builders/es/es_shrink.py index 4e145386ef..7974c28e72 100644 --- a/dbm-ui/backend/ticket/builders/es/es_shrink.py +++ b/dbm-ui/backend/ticket/builders/es/es_shrink.py @@ -39,9 +39,9 @@ def validate(self, attrs): super().validate(attrs) role_hash = { - InstanceRole.ES_DATANODE_HOT: attrs["nodes"]["hot"], - InstanceRole.ES_DATANODE_COLD: attrs["nodes"]["cold"], - InstanceRole.ES_CLIENT: attrs["nodes"]["client"], + InstanceRole.ES_DATANODE_HOT: attrs["old_nodes"]["hot"], + InstanceRole.ES_DATANODE_COLD: attrs["old_nodes"]["cold"], + InstanceRole.ES_CLIENT: attrs["old_nodes"]["client"], } cluster = Cluster.objects.get(id=attrs["cluster_id"]) diff --git a/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py b/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py index f0374f9079..1da407c838 100644 --- a/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py +++ b/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py @@ -37,7 +37,7 @@ def validate(self, attrs): super().validate(attrs) role_hash = { - InstanceRole.HDFS_DATA_NODE: attrs["nodes"]["datanode"], + InstanceRole.HDFS_DATA_NODE: attrs["old_nodes"]["datanode"], } cluster = Cluster.objects.get(id=attrs["cluster_id"]) diff --git a/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py b/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py index f6b5d3d3a9..9469b70436 100644 --- a/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py +++ b/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py @@ -38,7 +38,7 @@ def validate(self, attrs): super().validate(attrs) role_hash = { - InstanceRole.BROKER: attrs["nodes"]["broker"], + InstanceRole.BROKER: attrs["old_nodes"]["broker"], } cluster = Cluster.objects.get(id=attrs["cluster_id"]) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py b/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py index 0475089988..e0de4c6227 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py @@ -57,7 +57,9 @@ class FixPointRollbackSerializer(serializers.Serializer): @classmethod def validate_rollback_info(cls, rollback_cluster_type, info, now): # 校验回档集群类型参数 - if rollback_cluster_type == RollbackBuildClusterType.BUILD_INTO_NEW_CLUSTER and not info.get("rollback_host"): + if rollback_cluster_type == RollbackBuildClusterType.BUILD_INTO_NEW_CLUSTER and not ( + info.get("rollback_host") or info.get("resource_spec") + ): raise serializers.ValidationError(_("请提供部署新集群的机器信息")) if rollback_cluster_type != RollbackBuildClusterType.BUILD_INTO_NEW_CLUSTER and not info.get( @@ -102,8 +104,9 @@ def format_ticket_data(self): info["rollback_type"] = f"{info['backup_source'].upper()}_AND_{op_type}" # 格式化定点回档部署的信息 if rollback_cluster_type == RollbackBuildClusterType.BUILD_INTO_NEW_CLUSTER: - info["rollback_ip"] = info["rollback_host"]["ip"] - info["bk_rollback"] = info.pop("rollback_host") + if self.ticket_data["ip_source"] == IpSource.MANUAL_INPUT: + info["rollback_ip"] = info["rollback_host"]["ip"] + info["bk_rollback"] = info.pop("rollback_host") else: info["rollback_cluster_id"] = info.pop("target_cluster_id") @@ -122,12 +125,12 @@ def format(self): def post_callback(self): next_flow = self.ticket.next_flow() for info in next_flow.details["ticket_data"]["infos"]: - info["rollback_ip"] = info["rollback_host"]["ip"] - info["bk_rollback"] = info.pop("rollback_host") + info["rollback_ip"] = info["rollback_host"][0]["ip"] + info["bk_rollback"] = info.pop("rollback_host")[0] next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.MYSQL_ROLLBACK_CLUSTER) +@builders.BuilderFactory.register(TicketType.MYSQL_ROLLBACK_CLUSTER, is_apply=True) class MysqlFixPointRollbackFlowBuilder(BaseMySQLTicketFlowBuilder): serializer = MySQLFixPointRollbackDetailSerializer inner_flow_builder = MySQLFixPointRollbackFlowParamBuilder diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py index e5802c2b78..7c2e0c546d 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py @@ -8,6 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import itertools from django.utils.translation import gettext_lazy as _ from rest_framework import serializers @@ -82,14 +83,18 @@ def format_ticket_data(self): class MysqlMigrateClusterResourceParamBuilder(BaseOperateResourceParamBuilder): def format(self): - self.patch_info_affinity_location(roles=["backend_group"]) + self.patch_info_affinity_location() def post_callback(self): next_flow = self.ticket.next_flow() ticket_data = next_flow.details["ticket_data"] for info in ticket_data["infos"]: - backend = info.pop("backend_group")[0] - info["bk_new_master"], info["bk_new_slave"] = backend["master"], backend["slave"] + # 兼容资源池手动选择和自动匹配的协议 + if "backend_group" in info: + backend = info.pop("backend_group")[0] + info["bk_new_master"], info["bk_new_slave"] = backend["master"], backend["slave"] + else: + info["bk_new_master"], info["bk_new_slave"] = info.pop("new_master")[0], info.pop("new_slave")[0] info["new_master_ip"], info["new_slave_ip"] = info["bk_new_master"]["ip"], info["bk_new_slave"]["ip"] next_flow.save(update_fields=["details"]) @@ -103,17 +108,18 @@ class MysqlMigrateClusterFlowBuilder(MysqlMasterSlaveSwitchFlowBuilder): need_patch_recycle_host_details = True @staticmethod - def get_old_master_slave_host(info): - # 同机关联情况下,任取一台集群 - cluster = Cluster.objects.get(id=info["cluster_ids"][0]) - master = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.MASTER) - slave = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.SLAVE, is_stand_by=True) - # 补充下架的机器信息 - info["old_nodes"] = {"old_master": [master.machine.simple_desc], "old_slave": [slave.machine.simple_desc]} - return info + def get_old_master_slave_host(infos, cluster_map): + for info in infos: + # 同机关联情况下,任取一台集群 + insts = cluster_map[info["cluster_ids"][0]].storageinstance_set.all() + master = next(i for i in insts if i.instance_inner_role == InstanceInnerRole.MASTER) + slave = next(i for i in insts if i.instance_inner_role == InstanceInnerRole.SLAVE and i.is_stand_by) + # 补充下架的机器信息 + info["old_nodes"] = {"old_master": [master.machine.simple_desc], "old_slave": [slave.machine.simple_desc]} def patch_ticket_detail(self): + cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) + cluster_map = Cluster.objects.prefetch_related("storageinstance_set").in_bulk(cluster_ids, field_name="id") # mysql主从迁移会下架掉master和slave(stand by) - for info in self.ticket.details["infos"]: - self.get_old_master_slave_host(info) + self.get_old_master_slave_host(self.ticket.details["infos"], cluster_map) super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py index 7bb7d506b9..43a3547581 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py @@ -97,12 +97,37 @@ def format_ticket_data(self): class MysqlMigrateUpgradeResourceParamBuilder(BaseOperateResourceParamBuilder): + def auto_patch_info(self, info, info_index, nodes, cluster): + info["new_master"] = nodes[f"{info_index}_backend_group"][0]["master"] + info["new_slave"] = nodes[f"{info_index}_backend_group"][0]["slave"] + info["ro_slaves"] = [ + { + "old_ro_slave": { + "bk_cloud_id": slave.machine.bk_cloud_id, + "bk_host_id": slave.machine.bk_host_id, + "ip": slave.machine.ip, + }, + "new_ro_slave": nodes[f"{info_index}_{slave.machine.bk_host_id}"][0], + } + for slave in cluster.storageinstance_set.all() + if slave.instance_role == InstanceRole.BACKEND_SLAVE and not slave.is_stand_by + ] + + def manual_patch_info(self, info, info_index, cluster, nodes): + info["new_master"] = info["new_master"][0] + info["new_slave"] = info["new_slave"][0] + info["ro_slaves"] = [ + {"old_ro_slave": slave["old_slave"], "new_ro_slave": slave["new_slave"]} + for slave in info.pop("read_only_slaves", []) + ] + # 弹出read_only_new_slave,这个key仅作资源池申请 + info.pop("read_only_new_slave") + def post_callback(self): # 通过资源池获取到的节点 nodes = self.ticket_data.pop("nodes", []) cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) - id_cluster_map = Cluster.objects.prefetch_related( "storageinstance_set", "storageinstance_set__machine" ).in_bulk(cluster_ids, field_name="id") @@ -112,21 +137,10 @@ def post_callback(self): ticket_data = next_flow.details["ticket_data"] for info_index, info in enumerate(ticket_data["infos"]): + # 兼容资源池手动输入和自动匹配的协议 cluster = id_cluster_map[info["cluster_ids"][0]] - info["new_master"] = nodes[f"{info_index}_backend_group"][0]["master"] - info["new_slave"] = nodes[f"{info_index}_backend_group"][0]["slave"] - info["ro_slaves"] = [ - { - "old_ro_slave": { - "bk_cloud_id": slave.machine.bk_cloud_id, - "bk_host_id": slave.machine.bk_host_id, - "ip": slave.machine.ip, - }, - "new_ro_slave": nodes[f"{info_index}_{slave.machine.bk_host_id}"][0], - } - for slave in cluster.storageinstance_set.all() - if slave.instance_role == InstanceRole.BACKEND_SLAVE and not slave.is_stand_by - ] + # self.auto_patch_info(info, info_index, nodes, cluster) + self.manual_patch_info(info, info_index, cluster, nodes) ticket_data["infos"][info_index] = info next_flow.save(update_fields=["details"]) @@ -141,21 +155,12 @@ class MysqlMigrateUpgradeFlowBuilder(MysqlMasterSlaveSwitchFlowBuilder): resource_batch_apply_builder = MysqlMigrateUpgradeResourceParamBuilder need_patch_recycle_host_details = True - def patch_ticket_detail(self): - """mysql_master -> backend_group""" - # 主从构成 backend group - # 只读从库(非 standby) 各自单独成组 - super().patch_ticket_detail() - + def patch_auto_match_resource_spec(self, id_cluster_map): + # 自动匹配补充规格信息 resource_spec = {} - cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) - id_cluster_map = Cluster.objects.prefetch_related( - "storageinstance_set", "storageinstance_set__machine" - ).in_bulk(cluster_ids, field_name="id") - for info in self.ticket.details["infos"]: - cluster = id_cluster_map[info["cluster_ids"][0]] # 主从规格 + cluster = id_cluster_map[info["cluster_ids"][0]] ins = cluster.storageinstance_set.first() resource_spec["backend_group"] = { "spec_id": ins.machine.spec_id, @@ -172,8 +177,33 @@ def patch_ticket_detail(self): "location_spec": {"city": cluster.region, "sub_zone_ids": [ins.machine.bk_sub_zone_id]}, "affinity": AffinityEnum.NONE.value, } + info["old_nodes"]["old_slave"].append(ins.machine.simple_desc) + # 覆写resource_spec info["resource_spec"] = resource_spec - # 补充下架机器的信息 - MysqlMigrateClusterFlowBuilder.get_old_master_slave_host(info) - self.ticket.save(update_fields=["details"]) + def patch_manual_match_resource_spec(self, id_cluster_map): + # 手动匹配补充规格信息 + for info in self.ticket.details["infos"]: + read_only_new_slave = [slave["new_slave"] for slave in info["read_only_slaves"]] + read_only_old_slave = [slave["old_slave"] for slave in info["read_only_slaves"]] + info["old_nodes"]["old_slave"].extend(read_only_old_slave) + info["resource_spec"]["read_only_new_slave"] = {"spec_id": 0, "hosts": read_only_new_slave} + + def patch_ticket_detail(self): + """mysql_master -> backend_group""" + # 主从构成 backend group + # 只读从库(非 standby) 各自单独成组 + + cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) + id_cluster_map = Cluster.objects.prefetch_related( + "storageinstance_set", "storageinstance_set__machine" + ).in_bulk(cluster_ids, field_name="id") + + # 补充下架机器的信息 + MysqlMigrateClusterFlowBuilder.get_old_master_slave_host(self.ticket.details["infos"], id_cluster_map) + # 补充自动匹配的资源池信息 + # self.patch_auto_match_resource_spec(id_cluster_map) + # 兼容方案,先走资源池手动匹配协议 + self.patch_manual_match_resource_spec(id_cluster_map) + # 补充通用单据信息 + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py b/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py index 3986a1540a..45b7a601b3 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py @@ -87,7 +87,7 @@ def patch_slave_subzone(cls, ticket_data): # TODO: 后续改造为,尽量与原slave一致,不一致再满足亲和性 slave_host_ids = [s["bk_host_id"] for info in ticket_data["infos"] for s in info["old_nodes"]["old_slave"]] slaves = StorageInstance.objects.prefetch_related("as_receiver__ejector__machine", "machine").filter( - machine__bk_host_id__in=slave_host_ids, cluster_type=ClusterType.TenDBHA + machine__bk_host_id__in=slave_host_ids ) slave_host_map = {slave.machine.bk_host_id: slave for slave in slaves} for info in ticket_data["infos"]: diff --git a/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py b/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py index 911b997d60..350317dbd7 100644 --- a/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py +++ b/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py @@ -32,15 +32,15 @@ class NodesSerializer(serializers.Serializer): broker = serializers.ListField(help_text=_("broker信息列表"), child=serializers.DictField()) bookkeeper = serializers.ListField(help_text=_("bookkeeper信息列表"), child=serializers.DictField()) - old_nodes = serializers.JSONField(help_text=_("节点列表信息"), required=False) + old_nodes = NodesSerializer(help_text=_("下架节点信息")) ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT) def validate(self, attrs): super().validate(attrs) role_hash = { - InstanceRole.PULSAR_BROKER: attrs["nodes"]["broker"], - InstanceRole.PULSAR_BOOKKEEPER: attrs["nodes"]["bookkeeper"], + InstanceRole.PULSAR_BROKER: attrs["old_nodes"]["broker"], + InstanceRole.PULSAR_BOOKKEEPER: attrs["old_nodes"]["bookkeeper"], } at_least = { diff --git a/dbm-ui/backend/ticket/builders/redis/base.py b/dbm-ui/backend/ticket/builders/redis/base.py index 69fd4a341d..30621bd290 100644 --- a/dbm-ui/backend/ticket/builders/redis/base.py +++ b/dbm-ui/backend/ticket/builders/redis/base.py @@ -22,7 +22,7 @@ from backend.ticket import builders from backend.ticket.builders import TicketFlowBuilder from backend.ticket.builders.common.base import RedisTicketFlowBuilderPatchMixin, SkipToRepresentationMixin -from backend.ticket.constants import CheckRepairFrequencyType, DataCheckRepairSettingType +from backend.ticket.constants import CheckRepairFrequencyType, DataCheckRepairSettingType, FlowType KEY_FILE_PREFIX = "/redis/keyfiles/{biz}" @@ -120,6 +120,7 @@ class DataCheckRepairSettingSerializer(serializers.Serializer): class RedisUpdateApplyResourceParamBuilder(builders.ResourceApplyParamBuilder): def post_callback(self): next_flow = self.ticket.next_flow() + drop_proxy_hosts = [] for info in next_flow.details["ticket_data"]["infos"]: group_num = info["resource_spec"]["backend_group"]["count"] shard_num = info["cluster_shard_num"] @@ -139,8 +140,22 @@ def post_callback(self): # 分片数 shard_num=shard_num, ) + # 新proxy也会下架,这里需要加入到old_nodes里面 + drop_proxy_hosts.extend(info["proxy"]) + next_flow.save(update_fields=["details"]) + # 将下架的新proxy更新到清理流程中 + recycle_flow = self.ticket.flows.get(flow_type=FlowType.HOST_RECYCLE) + recycle_flow.details["ticket_data"]["clear_hosts"].extend(drop_proxy_hosts) + recycle_flow.save(update_fields=["details"]) + + # 如果有导入资源池流程,则将新proxy加入 + resource_flow = self.ticket.flows.filter(flow_type=FlowType.HOST_IMPORT_RESOURCE).first() + if resource_flow: + resource_flow.details["ticket_data"]["hosts"].extend(drop_proxy_hosts) + resource_flow.save(update_fields=["details"]) + class ClusterValidateMixin(object): """全局校验cluster状态""" diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py index a334d7bbcc..a9dcd7ed7e 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py @@ -56,7 +56,6 @@ class RedisClusterCutOffParamBuilder(builders.FlowParamBuilder): controller = RedisController.redis_cluster_cutoff_scene def format_ticket_data(self): - self.ticket_data.pop("old_nodes") super().format_ticket_data() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py index 2f3d8fa9bf..f53be88f56 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import itertools import operator from functools import reduce @@ -21,8 +20,7 @@ from backend.db_services.redis.rollback.models import TbTendisRollbackTasks from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import DisplayInfoSerializer -from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin +from backend.ticket.builders.common.base import DisplayInfoSerializer, HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder, RedisBasePauseParamBuilder from backend.ticket.constants import TicketType @@ -55,8 +53,6 @@ def validate(self, attr): # 填写域名 attr["prod_cluster"] = prod_cluster.immute_domain - # 填写构造任务,patch函数用 - attr["datastruct_tasks"] = tasks return attr infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) @@ -78,18 +74,22 @@ class RedisDataStructureTaskDeleteFlowBuilder(BaseRedisTicketFlowBuilder): def patch_datastruct_delete_nodes(self): drop_machine_filters = [] for info in self.ticket.details["infos"]: - tasks = info.pop("datastruct_tasks") - instances = itertools.chain(*[task.temp_instance_range for task in tasks]) + # 获取销毁任务 + task = TbTendisRollbackTasks.objects.get( + related_rollback_bill_id=info.get("related_rollback_bill_id"), + prod_cluster=info["prod_cluster"], + bk_cloud_id=info.get("bk_cloud_id"), + destroyed_status=DestroyedStatus.NOT_DESTROYED, + ) + # 过滤销毁实例的主机 filters = [ - Q(bk_biz_id=tasks[0].bk_biz_id, bk_cloud_id=tasks[0].bk_cloud_id, ip=instance.split(":")[0]) - for instance in instances + Q(bk_biz_id=task.bk_biz_id, bk_cloud_id=task.bk_cloud_id, ip=instance.split(":")[0]) + for instance in task.temp_instance_range ] drop_machine_filters.extend(filters) - drop_machines = Machine.objects.filter(reduce(operator.or_, drop_machine_filters)) - self.ticket.details["old_nodes"]["datastruct_hosts"] = [ - {"ip": host.ip, "bk_host_id": host.bk_host_id} for host in drop_machines - ] + old_nodes = Machine.objects.filter(reduce(operator.or_, drop_machine_filters)).values("ip", "bk_host_id") + self.ticket.details["old_nodes"] = {"datastruct_hosts": list(old_nodes)} def patch_ticket_detail(self): self.patch_datastruct_delete_nodes() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py index b3c9ca6ef7..be8c2ce0c0 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py @@ -37,7 +37,6 @@ class OldProxySerializer(serializers.Serializer): cluster_id = serializers.IntegerField(help_text=_("集群ID")) target_proxy_count = serializers.IntegerField(help_text=_("目标proxy数量"), min_value=2, required=False) - proxy_reduce_count = serializers.IntegerField(help_text=_("缩容proxy数量"), required=False) old_nodes = OldProxySerializer(help_text=_("缩容指定proxy"), required=False) online_switch_type = serializers.ChoiceField( help_text=_("切换类型"), choices=SwitchConfirmType.get_choices(), default=SwitchConfirmType.NO_CONFIRM @@ -47,19 +46,18 @@ class OldProxySerializer(serializers.Serializer): ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT) def validate(self, attrs): - clusters = Cluster.objects.filter(id__in=fetch_cluster_ids(attrs)).prefetch_related("proxyinstance_set") - cluster_id__cluster_map = {cluster.id: cluster for cluster in clusters} - + cluster_ids = fetch_cluster_ids(attrs) + cluster_map = Cluster.objects.prefetch_related("proxyinstance_set").in_bulk(cluster_ids, field_name="id") # 验证缩容后数量至少为2 for info in attrs["infos"]: - cluster = cluster_id__cluster_map[info["cluster_id"]] - if info.get("proxy_reduced_hosts"): - info["target_proxy_count"] = cluster.proxyinstance_set.count() - len(info["proxy_reduced_hosts"]) + cluster = cluster_map[info["cluster_id"]] + if info.get("old_nodes"): + proxy_reduced_hosts = info["old_nodes"]["proxy_reduced_hosts"] + info["target_proxy_count"] = cluster.proxyinstance_set.count() - len(proxy_reduced_hosts) if info["target_proxy_count"] < 2: raise serializers.ValidationError(_("请保证集群{}缩容后proxy数量不小于2").format(cluster.immute_domain)) # 提前存入proxy信息用于后续patch - attrs.update(proxy_insts=cluster.proxyinstance_set.all(), bk_cloud_id=cluster.bk_cloud_id) - + attrs.update(bk_cloud_id=cluster.bk_cloud_id) return attrs @@ -67,7 +65,8 @@ class ProxyScaleDownParamBuilder(builders.FlowParamBuilder): controller = RedisController.redis_proxy_scale def format_ticket_data(self): - super().format_ticket_data() + for info in self.ticket_data["infos"]: + info["proxy_reduced_hosts"] = info["old_nodes"]["proxy_reduced_hosts"] @builders.BuilderFactory.register(TicketType.REDIS_PROXY_SCALE_DOWN, is_recycle=True) @@ -78,20 +77,23 @@ class ProxyScaleDownFlowBuilder(BaseRedisTicketFlowBuilder): need_patch_recycle_host_details = True def patch_old_proxy_nodes(self): + cluster_ids = fetch_cluster_ids(self.ticket.details) + cluster_map = Cluster.objects.prefetch_related("proxyinstance_set").in_bulk(cluster_ids, field_name="id") for info in self.ticket.details["infos"]: - proxy_insts = info.pop("proxy_insts") - if info.get("old_nodes"): continue - # 获取proxy ip和ip与host id的映射 - proxy_ip__host = {proxy.machine.ip: proxy.machine for proxy in proxy_insts} + proxy_insts = cluster_map[info["cluster_id"]].proxyinstance_set.all() + proxy_ip__host = {proxy.machine.ip: proxy.machine.bk_host_id for proxy in proxy_insts} proxy_ips = list(proxy_insts.values_list("machine__ip", flat=True)) # 获取实际下架的ip target_proxy_count = info["target_proxy_count"] down_ips = RedisProxyScaleFlow.calc_scale_down_ips(self.ticket.bk_biz_id, proxy_ips, target_proxy_count) # 补充old proxy nodes信息 - info["old_nodes"] = {"proxy": [{"bk_host_id": proxy_ip__host[ip], "ip": ip} for ip in down_ips]} + info["old_nodes"] = { + "proxy_reduced_hosts": [{"bk_host_id": proxy_ip__host[ip], "ip": ip} for ip in down_ips] + } + info.pop("target_proxy_count") def patch_ticket_detail(self): self.patch_old_proxy_nodes() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py index ebc63fc61b..af5b2a2676 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py @@ -108,3 +108,6 @@ class RedisShardUpdateFlowBuilder(BaseRedisTicketFlowBuilder): inner_flow_name = _("Redis 集群分片变更") resource_batch_apply_builder = RedisShardUpdateResourceParamBuilder need_patch_recycle_cluster_details = True + + def patch_recycle_cluster_details(self): + super().patch_recycle_cluster_details(role="backend") diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py index e3df1fef7a..e6e1bd05e4 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py @@ -110,4 +110,7 @@ class RedisTypeUpdateFlowBuilder(BaseRedisTicketFlowBuilder): inner_flow_builder = RedisTypeUpdateParamBuilder inner_flow_name = _("Redis 集群类型变更") resource_batch_apply_builder = RedisTypeUpdateResourceParamBuilder - need_patch_recycle_cluster_details = True + need_patch_cluster_details = True + + def patch_recycle_cluster_details(self): + super().patch_recycle_cluster_details(role="backend") diff --git a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py index 179631f145..a8b878fbd9 100644 --- a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py +++ b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py @@ -20,7 +20,7 @@ class SQLServerDestroyDetailSerializer(SQLServerTakeDownDetailsSerializer): - ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.FAULT_DEFAULT) class SQLServerDestroyFlowParamBuilder(builders.FlowParamBuilder): diff --git a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py index 97613566c4..f06a8044b8 100644 --- a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py +++ b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py @@ -43,7 +43,7 @@ class OldSlaveSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL ) - ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.FAULT_DEFAULT) def validate(self, attrs): # 校验实例的角色为slave diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/base.py b/dbm-ui/backend/ticket/builders/tendbcluster/base.py index 632f3a42f6..d0f17235a7 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/base.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/base.py @@ -105,10 +105,14 @@ def validate_max_spider_master_mnt_count(self, attrs): Q(tendbclusterspiderext__spider_role=TenDBClusterSpiderRole.SPIDER_MASTER) | Q(tendbclusterspiderext__spider_role=TenDBClusterSpiderRole.SPIDER_MNT) ).count() - if self.context["ticket_type"] == TicketType.TENDBCLUSTER_SPIDER_ADD_NODES: - new_add_count = info["resource_spec"]["spider_ip_list"]["count"] - else: + # 获取扩容的spider数量 + if "spider_ip_list" in info: new_add_count = len(info["spider_ip_list"]) + else: + spider_ip_list = info["resource_spec"]["spider_ip_list"] + if "count" not in info["resource_spec"]["spider_ip_list"]: + spider_ip_list["count"] = len(spider_ip_list["hosts"]) + new_add_count = spider_ip_list["count"] if spider_master_mnt_count + new_add_count > MAX_SPIDER_MASTER_COUNT: raise serializers.ValidationError(_("【{}】请保证集群部署的接入层主节点和运维节点的总和小于37").format(cluster.name)) @@ -124,7 +128,7 @@ def validate_min_spider_count(self, attrs): ).count() if not info.get("spider_reduced_to_count"): - info["spider_reduced_to_count"] = spider_node_count - len(info["spider_reduced_hosts"]) + info["spider_reduced_to_count"] = spider_node_count - len(info["old_nodes"]["spider_reduced_hosts"]) spider_reduced_to_count = info["spider_reduced_to_count"] if spider_reduced_to_count >= spider_node_count: diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_fixpoint_rollback.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_fixpoint_rollback.py index a207f7f0b2..b813444732 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_fixpoint_rollback.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_fixpoint_rollback.py @@ -22,7 +22,7 @@ from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer from backend.ticket.builders.common.constants import FixpointRollbackType, RollbackBuildClusterType from backend.ticket.builders.common.field import DBTimezoneField from backend.ticket.builders.mysql.base import DBTableField @@ -40,9 +40,12 @@ class RollbackHostSerializer(serializers.Serializer): remote_hosts = serializers.ListSerializer(help_text=_("remote新机器"), child=HostInfoSerializer()) cluster_id = serializers.IntegerField(help_text=_("集群ID")) - target_cluster_id = serializers.IntegerField(help_text=_("回档集群ID"), default=False) - rollback_host = RollbackHostSerializer(help_text=_("备份新机器"), default=False) + + target_cluster_id = serializers.IntegerField(help_text=_("回档集群ID"), required=False) + rollback_host = RollbackHostSerializer(help_text=_("备份新机器"), required=False) + resource_spec = serializers.JSONField(help_text=_("备份资源规格"), required=False) rollback_type = serializers.ChoiceField(help_text=_("回档类型"), choices=FixpointRollbackType.get_choices()) + rollback_time = DBTimezoneField( help_text=_("回档时间"), required=False, allow_blank=True, allow_null=True, default="" ) @@ -113,16 +116,26 @@ class TendbApplyTemporaryFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.spider_cluster_apply_no_slave def format_ticket_data(self): + resource_spec = self.ticket_data["infos"][0]["resource_spec"] self.ticket_data = self.ticket_data["apply_details"] # 填充common参数 super().add_common_params() - # 修改单据类型为部署类型 - self.ticket_data["ticket_type"] = TicketType.TENDBCLUSTER_APPLY + # 修改单据类型为部署类型填充规格 + self.ticket_data.update(resource_spec=resource_spec, ticket_type=TicketType.TENDBCLUSTER_APPLY) + + +class TendbFixPointRollbackResourceParamBuilder(BaseOperateResourceParamBuilder): + def format(self): + cluster = Cluster.objects.get(id=self.ticket_data["infos"][0]["cluster_id"]) + self.ticket_data = {"resource_spec": self.ticket_data["infos"][0]["resource_spec"]} + self.ticket_data.update(bk_cloud_id=cluster.bk_cloud_id) @builders.BuilderFactory.register(TicketType.TENDBCLUSTER_ROLLBACK_CLUSTER, is_apply=True) class TendbFixPointRollbackFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbFixPointRollbackDetailSerializer + # 这里不要用batch_builder,因为下一节点是部署临时集群 + resource_apply_builder = TendbFixPointRollbackResourceParamBuilder def get_cluster_config(self, cluster, details): db_config = DBConfigApi.query_conf_item( @@ -158,7 +171,11 @@ def get_cluster_apply_spec(self, cluster, details): if details["ip_source"] == IpSource.MANUAL_INPUT: rollback_host = self.ticket.details["infos"][0]["rollback_host"] else: - rollback_host = self.ticket.details["infos"][0]["resource_spec"] + resource_spec = self.ticket.details["infos"][0]["resource_spec"] + rollback_host = { + "remote_hosts": resource_spec["remote_hosts"]["hosts"], + "spider_host": resource_spec["spider_host"]["hosts"][0], + } remote_machine_count = len(rollback_host["remote_hosts"]) details.update( diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py index ad740cff66..26203704d6 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py @@ -25,7 +25,7 @@ class TendbMNTApplyDetailSerializer(TendbBaseOperateDetailSerializer): class MNTApplySerializer(serializers.Serializer): cluster_id = serializers.IntegerField(help_text=_("集群ID")) bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID")) - spider_ip_list = serializers.ListField(help_text=_("运维节点信息"), child=serializers.DictField()) + spider_ip_list = serializers.ListField(help_text=_("运维节点信息"), child=serializers.DictField(), required=False) resource_spec = serializers.JSONField(help_text=_("资源规格参数"), required=False) infos = serializers.ListField(help_text=_("添加spider运维节点信息"), child=MNTApplySerializer()) diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py index d9a69885d1..c460e9ae18 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py @@ -14,7 +14,7 @@ from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import HostRecycleSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType @@ -22,7 +22,7 @@ class TendbMNTDestroyDetailSerializer(TendbBaseOperateDetailSerializer): class MNTDestroySerializer(serializers.Serializer): class OldMNTSerializer(serializers.Serializer): - spider_ip_list = serializers.ListField(child=serializers.DictField()) + spider_ip_list = serializers.ListField(child=HostInfoSerializer()) cluster_id = serializers.IntegerField(help_text=_("集群ID")) old_nodes = OldMNTSerializer(help_text=_("运维节点信息")) @@ -39,6 +39,10 @@ def validate(self, attrs): class TendbMNTDestroyParamBuilder(builders.FlowParamBuilder): controller = SpiderController.reduce_spider_mnt_scene + def format_ticket_data(self): + for info in self.ticket_data["infos"]: + info["spider_ip_list"] = info.pop("old_nodes")["spider_ip_list"] + @builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_MNT_DESTROY, is_recycle=True) class TendbMNTDestroyFlowBuilder(BaseTendbTicketFlowBuilder): diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py index fff2f43a1a..aa608826fd 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py @@ -94,3 +94,6 @@ class TendbMNTApplyFlowBuilder(BaseTendbTicketFlowBuilder): resource_batch_apply_builder = TendbNodeRebalanceResourceParamBuilder inner_flow_name = _("TendbCluster 集群容量变更") need_patch_recycle_cluster_details = True + + def patch_recycle_cluster_details(self): + super().patch_recycle_cluster_details(role="backend") diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py index 07896b82b1..a3fa329cf1 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py @@ -45,7 +45,7 @@ class TendbSpiderReduceNodesFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.reduce_spider_nodes_scene def format_ticket_data(self): - for info in self.ticket_data: + for info in self.ticket_data["infos"]: info["spider_reduced_hosts"] = info.pop("old_nodes")["spider_reduced_hosts"] @@ -68,10 +68,12 @@ def calc_reduce_spider(self): cluster = cluster_map[info["cluster_id"]] reduce_spider_role = info["reduce_spider_role"] + info["old_nodes"] = {} + # 获取目标角色的spider spider_set = [ proxy - for proxy in cluster.proxyinstance_set + for proxy in cluster.proxyinstance_set.all() if proxy.tendbclusterspiderext.spider_role == reduce_spider_role ] spider_count = len(spider_set) @@ -82,7 +84,7 @@ def calc_reduce_spider(self): except_reduce_spiders = [spider for spider in spider_set if spider.machine.ip != ctl_primary_ip] info["old_nodes"]["spider_reduced_hosts"] = [ {"ip": s.machine.ip, "bk_host_id": s.machine.bk_host_id} - for s in except_reduce_spiders[: spider_count - reduce_spider_role] + for s in except_reduce_spiders[: spider_count - info["spider_reduced_to_count"]] ] def patch_ticket_detail(self): diff --git a/dbm-ui/backend/ticket/models/ticket.py b/dbm-ui/backend/ticket/models/ticket.py index 8225e2c392..7a96830085 100644 --- a/dbm-ui/backend/ticket/models/ticket.py +++ b/dbm-ui/backend/ticket/models/ticket.py @@ -14,6 +14,7 @@ from typing import Any, Dict, List, Union from django.db import models, transaction +from django.db.models import Q from django.utils import timezone from django.utils.translation import ugettext_lazy as _ @@ -21,6 +22,7 @@ from backend.bk_web.constants import LEN_L_LONG, LEN_LONG, LEN_NORMAL, LEN_SHORT from backend.bk_web.models import AuditedModel from backend.configuration.constants import PLAT_BIZ_ID, DBType +from backend.db_meta.enums import InstancePhase from backend.db_monitor.exceptions import AutofixException from backend.db_services.dbbase.constants import IpDest from backend.ticket.constants import ( @@ -208,38 +210,50 @@ def create_ticket( return ticket @classmethod - def create_recycle_ticket(cls, ticket_id: int, ip_dest: IpDest): + def create_recycle_ticket(cls, revoke_ticket_id: int): """ 从一个终止单据派生产生另一个清理单据 - :param ticket_id: 终止单据ID - :param ip_dest: 机器流向 + :param revoke_ticket_id: 终止单据ID """ from backend.ticket.builders import BuilderFactory - ticket = cls.objects.get(id=ticket_id) + revoke_ticket = cls.objects.get(id=revoke_ticket_id) # 忽略非回收单据 - if ticket.ticket_type not in BuilderFactory.apply_ticket_type: + if revoke_ticket.ticket_type not in BuilderFactory.apply_ticket_type: return None - # 创建回收单据流程 + # 获取回收机器, + # 判定是否允许回收:主机的实例的phase不能是online + from backend.db_meta.models import Machine from backend.ticket.builders.common.base import fetch_apply_hosts + recycle_hosts = fetch_apply_hosts(revoke_ticket.details) + if not recycle_hosts: + return + + host_ids = [host["bk_host_id"] for host in recycle_hosts] + online_filter = Q(storageinstance__phase=InstancePhase.ONLINE) | Q(proxyinstance__phase=InstancePhase.ONLINE) + if Machine.objects.filter(online_filter, bk_host_id__in=host_ids).count(): + return + + # 创建回收单据流程,SQLServer暂时回收到故障池 + ip_dest = IpDest.Fault if revoke_ticket.group == DBType.Sqlserver else IpDest.Resource details = { - "recycle_hosts": fetch_apply_hosts(ticket.details), - "ip_recycle": {"ip_dest": ip_dest, "for_biz": ticket.bk_biz_id}, - "group": ticket.group, + "recycle_hosts": recycle_hosts, + "ip_recycle": {"ip_dest": ip_dest, "for_biz": revoke_ticket.bk_biz_id}, + "group": revoke_ticket.group, } recycle_ticket = cls.create_ticket( ticket_type=TicketType.RECYCLE_HOST, - creator=ticket.creator, - bk_biz_id=ticket.bk_biz_id, - remark=_("单据{}终止后自动发起清理机器单据").format(ticket.id), + creator=revoke_ticket.creator, + bk_biz_id=revoke_ticket.bk_biz_id, + remark=_("单据{}终止后自动发起清理机器单据").format(revoke_ticket.id), details=details, ) # 对原单据动态插入一个描述flow,关联这个回收单 Flow.objects.create( - ticket=ticket, + ticket=revoke_ticket, flow_type=FlowType.DELIVERY.value, details={"recycle_ticket": recycle_ticket.id}, flow_alias=_("申请主机清理释放"), diff --git a/dbm-ui/backend/ticket/signals.py b/dbm-ui/backend/ticket/signals.py index d1e07a7550..557a303ee8 100644 --- a/dbm-ui/backend/ticket/signals.py +++ b/dbm-ui/backend/ticket/signals.py @@ -8,8 +8,9 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +from backend.ticket.constants import FlowType, TicketFlowStatus from backend.ticket.flow_manager.manager import TicketFlowManager -from backend.ticket.models import Flow +from backend.ticket.models import Flow, Ticket def update_ticket_status(sender, instance: Flow, **kwargs): @@ -18,4 +19,9 @@ def update_ticket_status(sender, instance: Flow, **kwargs): """ if not instance.pk: return + + # 如果是inner flow的终止,要联动回收主机 + if instance.flow_type == FlowType.INNER_FLOW and instance.status == TicketFlowStatus.TERMINATED: + Ticket.create_recycle_ticket(revoke_ticket_id=instance.ticket.id) + TicketFlowManager(instance.ticket).update_ticket_status()