diff --git a/dbm-ui/backend/db_dirty/admin.py b/dbm-ui/backend/db_dirty/admin.py index 86f0e6ac8e..7d7e37f57e 100644 --- a/dbm-ui/backend/db_dirty/admin.py +++ b/dbm-ui/backend/db_dirty/admin.py @@ -15,6 +15,13 @@ @admin.register(models.DirtyMachine) class DirtyMachineAdmin(admin.ModelAdmin): - list_display = ("ip", "bk_biz_id", "bk_host_id", "flow", "ticket") - list_filter = ("ip", "bk_biz_id", "bk_host_id", "flow", "ticket") - search_fields = ("ip", "bk_biz_id", "bk_host_id", "flow", "ticket") + list_display = ("ip", "bk_biz_id", "bk_host_id", "ticket", "pool") + list_filter = ("ip", "ticket", "pool") + search_fields = ("ip", "bk_biz_id", "bk_host_id") + + +@admin.register(models.MachineEvent) +class MachineEventAdmin(admin.ModelAdmin): + list_display = ("ip", "bk_biz_id", "bk_host_id", "event", "to", "ticket") + list_filter = ("ip", "bk_biz_id", "to") + search_fields = ("ip", "bk_biz_id", "bk_host_id") diff --git a/dbm-ui/backend/db_dirty/constants.py b/dbm-ui/backend/db_dirty/constants.py index 339366e28c..54e9f91f3d 100644 --- a/dbm-ui/backend/db_dirty/constants.py +++ b/dbm-ui/backend/db_dirty/constants.py @@ -11,4 +11,42 @@ from django.utils.translation import ugettext_lazy as _ -SWAGGER_TAG = _("污点池") +from blue_krill.data_types.enum import EnumField, StructuredEnum + +SWAGGER_TAG = _("主机池") + + +class PoolType(str, StructuredEnum): + # 池管理:污点池,故障池,待回收池 + Dirty = EnumField("dirty", _("污点池")) + Fault = EnumField("fault", _("故障池")) + Recycle = EnumField("recycle", _("待回收池")) + # 资源池不由saas维护,单独由资源池服务维护 + Resource = EnumField("resource", _("资源池")) + # 回收池表示已经挪到cc待回收,不在dbm流转 + Recycled = EnumField("recycled", _("已回收")) + # 已部署表示主机正在使用 + APPLY = EnumField("apply", _("已部署")) + + +class MachineEventType(str, StructuredEnum): + ImportResource = EnumField("import_resource", _("导入资源池")) + ApplyResource = EnumField("apply_resource", _("申请资源")) + ReturnResource = EnumField("return_resource", _("退回资源")) + ToDirty = EnumField("to_dirty", _("转入污点池")) + ToRecycle = EnumField("to_recycle", _("转入待回收池")) + ToFault = EnumField("to_fault", _("转入故障池")) + UndoImport = EnumField("undo_import", _("撤销导入")) + Recycled = EnumField("recycled", _("回收")) + + +MACHINE_EVENT__POOL_MAP = { + MachineEventType.ToDirty: PoolType.Dirty, + MachineEventType.ToRecycle: PoolType.Recycle, + MachineEventType.ToFault: PoolType.Fault, + MachineEventType.ImportResource: PoolType.Resource, + MachineEventType.ReturnResource: PoolType.Resource, + MachineEventType.Recycled: PoolType.Recycled, + MachineEventType.UndoImport: PoolType.Recycled, + MachineEventType.ApplyResource: PoolType.APPLY, +} diff --git a/dbm-ui/backend/db_dirty/exceptions.py b/dbm-ui/backend/db_dirty/exceptions.py new file mode 100644 index 0000000000..9c27f85dbd --- /dev/null +++ b/dbm-ui/backend/db_dirty/exceptions.py @@ -0,0 +1,24 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import ugettext_lazy as _ + +from backend.exceptions import AppBaseException, ErrorCode + + +class DBDirtyPoolBaseException(AppBaseException): + MODULE_CODE = ErrorCode.DB_DIRTY_POOL_CODE + MESSAGE = _("主机池异常") + + +class PoolTransferException(DBDirtyPoolBaseException): + ERROR_CODE = "001" + MESSAGE = _("主机池转移异常") + MESSAGE_TPL = _("主机池转移异常") diff --git a/dbm-ui/backend/db_dirty/filters.py b/dbm-ui/backend/db_dirty/filters.py index e73bec5d1d..02610f1c67 100644 --- a/dbm-ui/backend/db_dirty/filters.py +++ b/dbm-ui/backend/db_dirty/filters.py @@ -13,7 +13,7 @@ from django_filters import rest_framework as filters from django_filters.filters import BaseInFilter, NumberFilter -from backend.db_dirty.models import DirtyMachine +from backend.db_dirty.models import DirtyMachine, MachineEvent class NumberInFilter(BaseInFilter, NumberFilter): @@ -50,3 +50,29 @@ def filter_task_ids(self, queryset, name, value): class Meta: model = DirtyMachine fields = ["ticket_types", "ticket_ids", "task_ids", "operator", "ip"] + + +class MachineEventFilter(filters.FilterSet): + operator = filters.CharFilter(field_name="creator", lookup_expr="icontains", label=_("操作者")) + bk_biz_id = filters.NumberFilter(field_name="bk_biz_id", label=_("业务")) + event = filters.CharFilter(field_name="event", lookup_expr="exact", label=_("事件类型")) + ips = filters.CharFilter(field_name="ip", method="filter_ips", label=_("过滤IP")) + + def filter_ips(self, queryset, name, value): + return queryset.filter(ip__in=value.split(",")) + + class Meta: + model = MachineEvent + fields = ["operator", "bk_biz_id", "event", "ips"] + + +class DirtyMachinePoolFilter(filters.FilterSet): + ips = filters.CharFilter(field_name="ip", method="filter_ips", label=_("过滤IP")) + pool = filters.CharFilter(field_name="pool", lookup_expr="exact", label=_("主机池类型")) + + def filter_ips(self, queryset, name, value): + return queryset.filter(ip__in=value.split(",")) + + class Meta: + model = DirtyMachine + fields = ["ips", "pool"] diff --git a/dbm-ui/backend/db_dirty/handlers.py b/dbm-ui/backend/db_dirty/handlers.py index 97d4e0d238..fc2004179a 100644 --- a/dbm-ui/backend/db_dirty/handlers.py +++ b/dbm-ui/backend/db_dirty/handlers.py @@ -17,10 +17,11 @@ from backend import env from backend.components import CCApi -from backend.components.dbresource.client import DBResourceApi from backend.configuration.constants import SystemSettingsEnum from backend.configuration.models import SystemSettings -from backend.db_dirty.models import DirtyMachine +from backend.db_dirty.constants import MachineEventType, PoolType +from backend.db_dirty.exceptions import PoolTransferException +from backend.db_dirty.models import DirtyMachine, MachineEvent from backend.db_meta.models import AppCache from backend.db_services.ipchooser.constants import IDLE_HOST_MODULE from backend.db_services.ipchooser.handlers.topo_handler import TopoHandler @@ -28,8 +29,8 @@ from backend.flow.consts import FAILED_STATES from backend.flow.utils.cc_manage import CcManage from backend.ticket.builders import BuilderFactory +from backend.ticket.builders.common.base import fetch_apply_hosts from backend.ticket.models import Flow, Ticket -from backend.utils.basic import get_target_items_from_details from backend.utils.batch_request import request_multi_thread logger = logging.getLogger("root") @@ -41,28 +42,34 @@ class DBDirtyMachineHandler(object): """ @classmethod - def transfer_dirty_machines(cls, bk_host_ids: List[int]): + def transfer_hosts_to_pool(cls, operator: str, bk_host_ids: List[int], source: PoolType, target: PoolType): """ - 将污点主机转移待回收模块,并从资源池移除 + 将主机转移待回收/故障池模块 @param bk_host_ids: 主机列表 + @param operator: 操作者 + @param source: 主机来源 + @param target: 主机去向 """ - # 将主机移动到待回收模块 - dirty_machines = DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids) - bk_biz_id__host_ids = defaultdict(list) - for machine in dirty_machines: - bk_biz_id__host_ids[machine.bk_biz_id].append(machine.bk_host_id) + # 将主机按照业务分组 + recycle_hosts = DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids) + biz_grouped_recycle_hosts = itertools.groupby(recycle_hosts, key=lambda x: x.bk_biz_id) - for bk_biz_id, bk_host_ids in bk_biz_id__host_ids.items(): - CcManage(int(bk_biz_id), "").recycle_host(bk_host_ids) - - # 删除污点池记录,并从资源池移除(忽略删除错误,因为机器可能不来自资源池) - dirty_machines.delete() - DBResourceApi.resource_delete(params={"bk_host_ids": bk_host_ids}, raise_exception=False) + for bk_biz_id, hosts in biz_grouped_recycle_hosts: + hosts = [{"bk_host_id": host.bk_host_id} for host in hosts] + # 故障池 ---> 待回收 + if source == PoolType.Recycle and target == PoolType.Recycled: + CcManage(bk_biz_id, "").recycle_host([h["bk_host_id"] for h in hosts]) + MachineEvent.host_event_trigger(bk_biz_id, hosts, event=MachineEventType.Recycled, operator=operator) + # 待回收 ---> 回收 + elif source == PoolType.Fault and target == PoolType.Recycle: + MachineEvent.host_event_trigger(bk_biz_id, hosts, event=MachineEventType.ToRecycle, operator=operator) + else: + raise PoolTransferException(_("{}--->{}转移不合法").format(source, target)) @classmethod def query_dirty_machine_records(cls, bk_host_ids: List[int]): """ - 查询污点池主机信息 + 查询污点池主机信息 TODO: 污点池废弃,代码将被移除 @param bk_host_ids: 主机列表 """ @@ -165,7 +172,7 @@ def get_module_data(data): @classmethod def insert_dirty_machines(cls, bk_biz_id: int, bk_host_ids: List[Dict[str, Any]], ticket: Ticket, flow: Flow): """ - 将机器导入到污点池中 + 将机器导入到污点池中 TODO: 污点池废弃,代码将被移除 @param bk_biz_id: 业务ID @param bk_host_ids: 主机列表 @param ticket: 关联的单据 @@ -223,18 +230,6 @@ def insert_dirty_machines(cls, bk_biz_id: int, bk_host_ids: List[Dict[str, Any]] ] ) - @classmethod - def remove_dirty_machines(cls, bk_host_ids: List[Dict[str, Any]]): - """ - 将机器从污点池挪走,一般是重试后会调用此函数。 - 这里只用删除记录,无需做其他挪模块的操作,原因如下: - 1. 如果重试依然失败,则机器会重新回归污点池,模块不变 - 2. 如果重试成功,则机器已经由flow挪到了对应的DB模块 - 3. 如果手动处理,则机器会被挪到待回收模块 - @param bk_host_ids: 主机列表 - """ - DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids).delete() - @classmethod def handle_dirty_machine(cls, ticket_id, root_id, origin_tree_status, target_tree_status): """处理执行失败/重试成功涉及的污点池机器""" @@ -243,7 +238,6 @@ def handle_dirty_machine(cls, ticket_id, root_id, origin_tree_status, target_tre try: ticket = Ticket.objects.get(id=ticket_id) - flow = Flow.objects.get(flow_obj_id=root_id) # 如果不是部署类单据,则无需处理 if ticket.ticket_type not in BuilderFactory.apply_ticket_type: return @@ -251,20 +245,19 @@ def handle_dirty_machine(cls, ticket_id, root_id, origin_tree_status, target_tre return # 如果初始状态是失败,则证明是重试,将机器从污点池中移除 - bk_host_ids = get_target_items_from_details( - obj=ticket.details, match_keys=["host_id", "bk_host_id", "bk_host_ids"] - ) + hosts = fetch_apply_hosts(ticket.details) + bk_host_ids = [h["bk_host_id"] for h in hosts] if not bk_host_ids: return + # 如果是原状态失败,则证明是重试,这里只用删除记录 if origin_tree_status in FAILED_STATES: logger.info(_("【污点池】主机列表:{} 将从污点池挪出").format(bk_host_ids)) - DBDirtyMachineHandler.remove_dirty_machines(bk_host_ids) + DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids).delete() # 如果是目标状态失败,则证明是执行失败,将机器加入污点池 if target_tree_status in FAILED_STATES: - logger.info(_("【污点池】单据-{}:任务-{}执行失败,主机列表:{}挪到污点池").format(ticket_id, root_id, bk_host_ids)) - DBDirtyMachineHandler.insert_dirty_machines( - bk_biz_id=ticket.bk_biz_id, bk_host_ids=bk_host_ids, ticket=ticket, flow=flow - ) + logger.info(_("【污点池】主机列表:{} 移入污点池").format(bk_host_ids)) + hosts = fetch_apply_hosts(ticket.details) + MachineEvent.host_event_trigger(ticket.bk_biz_id, hosts, MachineEventType.ToDirty, ticket.creator, ticket) diff --git a/dbm-ui/backend/db_dirty/migrations/0003_auto_20240925_1526.py b/dbm-ui/backend/db_dirty/migrations/0003_auto_20240925_1526.py new file mode 100644 index 0000000000..fa08cd3818 --- /dev/null +++ b/dbm-ui/backend/db_dirty/migrations/0003_auto_20240925_1526.py @@ -0,0 +1,126 @@ +# Generated by Django 3.2.25 on 2024-09-25 07:26 + +import django.db.models.deletion +from django.db import migrations, models + +from backend.db_dirty.constants import MachineEventType, PoolType + + +class Migration(migrations.Migration): + + dependencies = [ + ("ticket", "0012_alter_ticket_remark"), + ("db_dirty", "0002_alter_dirtymachine_options"), + ] + + operations = [ + migrations.RemoveField( + model_name="dirtymachine", + name="flow", + ), + migrations.AddField( + model_name="dirtymachine", + name="bk_cpu", + field=models.IntegerField(default=0, help_text="cpu"), + ), + migrations.AddField( + model_name="dirtymachine", + name="bk_disk", + field=models.IntegerField(default=0, help_text="磁盘"), + ), + migrations.AddField( + model_name="dirtymachine", + name="bk_mem", + field=models.IntegerField(default=0, help_text="内存"), + ), + migrations.AddField( + model_name="dirtymachine", + name="city", + field=models.CharField(blank=True, default="", help_text="城市", max_length=128, null=True), + ), + migrations.AddField( + model_name="dirtymachine", + name="device_class", + field=models.CharField(blank=True, default="", help_text="机架", max_length=128, null=True), + ), + migrations.AddField( + model_name="dirtymachine", + name="os_name", + field=models.CharField(blank=True, default="", help_text="操作系统", max_length=128, null=True), + ), + migrations.AddField( + model_name="dirtymachine", + name="pool", + field=models.CharField( + choices=PoolType.get_choices(), + default="dirty", + help_text="池类型", + max_length=128, + ), + preserve_default=False, + ), + migrations.AddField( + model_name="dirtymachine", + name="rack_id", + field=models.CharField(blank=True, default="", help_text="机架", max_length=128, null=True), + ), + migrations.AddField( + model_name="dirtymachine", + name="sub_zone", + field=models.CharField(blank=True, default="", help_text="园区", max_length=128, null=True), + ), + migrations.AlterField( + model_name="dirtymachine", + name="ticket", + field=models.ForeignKey( + blank=True, + help_text="关联单据", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="ticket.ticket", + ), + ), + migrations.CreateModel( + name="MachineEvent", + fields=[ + ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), + ("creator", models.CharField(max_length=64, verbose_name="创建人")), + ("create_at", models.DateTimeField(auto_now_add=True, verbose_name="创建时间")), + ("updater", models.CharField(max_length=64, verbose_name="修改人")), + ("update_at", models.DateTimeField(auto_now=True, verbose_name="更新时间")), + ("bk_biz_id", models.IntegerField(default=0, help_text="业务ID")), + ("ip", models.CharField(help_text="主机IP", max_length=128)), + ("bk_host_id", models.PositiveBigIntegerField(help_text="主机ID")), + ( + "event", + models.CharField( + choices=MachineEventType.get_choices(), + help_text="事件类型", + max_length=128, + ), + ), + ( + "to", + models.CharField( + choices=PoolType.get_choices(), + help_text="资源流向", + max_length=128, + ), + ), + ( + "ticket", + models.ForeignKey( + blank=True, + help_text="关联单据", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="ticket.ticket", + ), + ), + ], + options={ + "verbose_name": "机器事件记录", + "verbose_name_plural": "机器事件记录", + }, + ), + ] diff --git a/dbm-ui/backend/db_dirty/models.py b/dbm-ui/backend/db_dirty/models.py index adaa0e3a9e..f083086e6e 100644 --- a/dbm-ui/backend/db_dirty/models.py +++ b/dbm-ui/backend/db_dirty/models.py @@ -8,26 +8,130 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +from collections import defaultdict +from typing import Tuple from django.db import models from django.utils.translation import ugettext_lazy as _ from backend.bk_web.constants import LEN_MIDDLE from backend.bk_web.models import AuditedModel -from backend.ticket.models import Flow, Ticket +from backend.db_dirty.constants import MACHINE_EVENT__POOL_MAP, MachineEventType, PoolType +from backend.db_services.dbresource.handlers import ResourceHandler +from backend.ticket.models import Ticket class DirtyMachine(AuditedModel): """ - 污点机器记录:从资源池申请成功后,但是部署失败未处理的机器记录 + 机器池:污点池,故障池,待回收池 """ bk_biz_id = models.IntegerField(default=0, help_text=_("业务ID")) bk_host_id = models.PositiveBigIntegerField(primary_key=True, default=0, help_text=_("主机ID")) bk_cloud_id = models.IntegerField(default=0, help_text=_("主机云区域")) ip = models.CharField(max_length=LEN_MIDDLE, help_text=_("主机IP")) - flow = models.ForeignKey(Flow, on_delete=models.CASCADE, help_text=_("关联任务")) - ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE, help_text=_("关联单据")) + city = models.CharField(max_length=LEN_MIDDLE, default="", blank=True, null=True, help_text=_("城市")) + sub_zone = models.CharField(max_length=LEN_MIDDLE, default="", blank=True, null=True, help_text=_("园区")) + rack_id = models.CharField(max_length=LEN_MIDDLE, default="", blank=True, null=True, help_text=_("机架")) + device_class = models.CharField(max_length=LEN_MIDDLE, default="", blank=True, null=True, help_text=_("机型")) + os_name = models.CharField(max_length=LEN_MIDDLE, default="", blank=True, null=True, help_text=_("操作系统")) + bk_cpu = models.IntegerField(default=0, help_text=_("cpu")) + bk_mem = models.IntegerField(default=0, help_text=_("内存")) + bk_disk = models.IntegerField(default=0, help_text=_("磁盘")) + + ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE, help_text=_("关联单据"), null=True, blank=True) + + pool = models.CharField(help_text=_("池类型"), max_length=LEN_MIDDLE, choices=PoolType.get_choices()) class Meta: verbose_name = verbose_name_plural = _("污点池机器(DirtyMachine)") + + @classmethod + def host_fields(cls): + non_host_fields = ["bk_biz_id", "pool", "ticket", *AuditedModel.AUDITED_FIELDS] + fields = [field.name for field in cls._meta.fields if field.name not in non_host_fields] + return fields + + @classmethod + def hosts_pool_transfer(cls, bk_biz_id, hosts, pool, operator="", ticket=None): + """将机器转入主机池""" + hosts = [{field: host.get(field) for field in cls.host_fields()} for host in hosts] + host_ids = [host["bk_host_id"] for host in hosts] + + # 主机转入污点/故障池,说明第一次被纳管到池 + if pool in [PoolType.Fault, PoolType.Dirty]: + hosts_pool = [ + cls(bk_biz_id=bk_biz_id, pool=pool, ticket=ticket, creator=operator, updater=operator, **host) + for host in hosts + ] + cls.objects.bulk_create(hosts_pool) + # 待回收只会从故障池转移 + elif pool == PoolType.Recycle: + cls.objects.filter(bk_host_id__in=host_ids).update(pool=pool, ticket=ticket) + # 回收机器只能从待回收转移,删除池纳管记录 + # 重导入回资源池,删除池纳管记录 + elif pool in [PoolType.Recycled, PoolType.Resource]: + cls.objects.filter(bk_host_id__in=host_ids).delete() + + +class MachineEvent(AuditedModel): + """ + 机器事件,主要记录机器的流转记录 + """ + + bk_biz_id = models.IntegerField(default=0, help_text=_("业务ID")) + ip = models.CharField(max_length=LEN_MIDDLE, help_text=_("主机IP")) + bk_host_id = models.PositiveBigIntegerField(help_text=_("主机ID")) + event = models.CharField(help_text=_("事件类型"), max_length=LEN_MIDDLE, choices=MachineEventType.get_choices()) + to = models.CharField( + help_text=_("资源流向"), max_length=LEN_MIDDLE, choices=PoolType.get_choices(), null=True, blank=True + ) + ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE, help_text=_("关联单据"), null=True, blank=True) + + class Meta: + verbose_name = verbose_name_plural = _("机器事件记录") + + @classmethod + def hosts_can_return(cls, bk_host_ids) -> Tuple[bool, str]: + """判断机器是否能退回""" + host_events = cls.objects.filter(bk_host_id__in=bk_host_ids).order_by("id") + + grouped_events = defaultdict(list) + for event in host_events: + grouped_events[event.bk_host_id].append(event) + + # 如果最近一次的机器事件非导入,则无法退回 + for host_id, events in grouped_events.items(): + if events and events[-1].event != MachineEventType.ImportResource: + return False, _("主机经历过流转事件: {}".format(MachineEventType.get_choice_label(events[-1].event))) + + return True, "" + + @classmethod + def host_event_trigger(cls, bk_biz_id, hosts, event, operator="", ticket=None, standard=False): + """主机事件触发""" + pool = MACHINE_EVENT__POOL_MAP.get(event) + # 如果主机非标准话,则查询cc + if not standard: + hosts = ResourceHandler.standardized_resource_host(hosts, bk_biz_id) + # 主机池流转 + if pool: + DirtyMachine.hosts_pool_transfer(bk_biz_id, hosts, pool, operator, ticket) + # 流转污点池不记录主机事件 + if event == MachineEventType.ToDirty: + return + # 事件记录 + events = [ + MachineEvent( + bk_biz_id=bk_biz_id, + ip=host["ip"], + bk_host_id=host["bk_host_id"], + event=event, + to=pool, + ticket=ticket, + creator=operator, + updater=operator, + ) + for host in hosts + ] + MachineEvent.objects.bulk_create(events) diff --git a/dbm-ui/backend/db_dirty/serializers.py b/dbm-ui/backend/db_dirty/serializers.py index 2b9caccd27..934002fafa 100644 --- a/dbm-ui/backend/db_dirty/serializers.py +++ b/dbm-ui/backend/db_dirty/serializers.py @@ -12,8 +12,12 @@ from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers +from backend.db_dirty.constants import PoolType from backend.db_dirty.mock import DIRTY_MACHINE_LIST +from backend.db_dirty.models import DirtyMachine, MachineEvent +from backend.db_meta.models import AppCache from backend.ticket.constants import TicketType +from backend.ticket.models import Ticket class QueryDirtyMachineSerializer(serializers.Serializer): @@ -23,9 +27,6 @@ class QueryDirtyMachineSerializer(serializers.Serializer): ticket_type = serializers.ChoiceField(help_text=_("过滤的单据类型"), choices=TicketType.get_choices(), required=False) operator = serializers.CharField(help_text=_("操作人"), required=False) - limit = serializers.IntegerField(help_text=_("分页限制"), required=False, default=10) - offset = serializers.IntegerField(help_text=_("分页起始"), required=False, default=0) - def validate(self, attrs): if "ip_list" in attrs: attrs["ip_list"] = attrs["ip_list"].split(",") @@ -40,7 +41,54 @@ class Meta: class TransferDirtyMachineSerializer(serializers.Serializer): bk_host_ids = serializers.ListField(child=serializers.IntegerField(), help_text=_("待转移的主机ID列表")) + source = serializers.ChoiceField(help_text=_("主机来源"), choices=PoolType.get_choices()) + target = serializers.ChoiceField(help_text=_("主机去向"), choices=PoolType.get_choices()) class DeleteDirtyMachineSerializer(serializers.Serializer): bk_host_ids = serializers.ListField(child=serializers.IntegerField(), help_text=_("待删除的污点池记录主机ID")) + + +class ListMachineEventSerializer(serializers.ModelSerializer): + class Meta: + model = MachineEvent + fields = "__all__" + + @property + def biz_map(self): + if not hasattr(self, "_biz_map"): + biz_ids = [event.bk_biz_id for event in self.instance] + biz_map = {biz.bk_biz_id: biz for biz in AppCache.objects.filter(bk_biz_id__in=biz_ids)} + setattr(self, "_biz_map", biz_map) + return self._biz_map + + @property + def ticket_cluster_map(self): + if not hasattr(self, "_ticket_cluster_map"): + ticket_ids = [event.ticket.id for event in self.instance if event.ticket] + tickets = Ticket.objects.filter(id__in=ticket_ids) + ticket_cluster_map = {ticket.id: ticket.details.get("clusters", {}).values() for ticket in tickets} + setattr(self, "_ticket_cluster_map", ticket_cluster_map) + return self._ticket_cluster_map + + def to_representation(self, instance): + biz, ticket_data = self.biz_map[instance.bk_biz_id], self.ticket_cluster_map.get(instance.ticket_id, []) + instance = super().to_representation(instance) + instance.update(bk_biz_name=biz.bk_biz_name, db_app_abbr=biz.db_app_abbr, clusters=ticket_data) + return instance + + +class ListMachineEventResponseSerializer(serializers.Serializer): + class Meta: + swagger_schema_fields = {"example": {}} + + +class ListMachinePoolSerializer(serializers.ModelSerializer): + class Meta: + model = DirtyMachine + fields = "__all__" + + +class ListMachinePoolResponseSerializer(serializers.Serializer): + class Meta: + swagger_schema_fields = {"example": {}} diff --git a/dbm-ui/backend/db_dirty/views.py b/dbm-ui/backend/db_dirty/views.py index 42234f6326..f18f346568 100644 --- a/dbm-ui/backend/db_dirty/views.py +++ b/dbm-ui/backend/db_dirty/views.py @@ -19,11 +19,15 @@ from backend.bk_web.pagination import AuditedLimitOffsetPagination from backend.bk_web.swagger import common_swagger_auto_schema from backend.db_dirty.constants import SWAGGER_TAG -from backend.db_dirty.filters import DirtyMachineFilter +from backend.db_dirty.filters import DirtyMachineFilter, DirtyMachinePoolFilter, MachineEventFilter from backend.db_dirty.handlers import DBDirtyMachineHandler -from backend.db_dirty.models import DirtyMachine +from backend.db_dirty.models import DirtyMachine, MachineEvent from backend.db_dirty.serializers import ( DeleteDirtyMachineSerializer, + ListMachineEventResponseSerializer, + ListMachineEventSerializer, + ListMachinePoolResponseSerializer, + ListMachinePoolSerializer, QueryDirtyMachineResponseSerializer, QueryDirtyMachineSerializer, TransferDirtyMachineSerializer, @@ -35,14 +39,14 @@ class DBDirtyMachineViewSet(viewsets.SystemViewSet): - pagination_class = None + pagination_class = AuditedLimitOffsetPagination filter_class = None action_permission_map = {("query_operation_list",): []} default_permission_class = [ResourceActionPermission([ActionEnum.DIRTY_POLL_MANAGE])] @common_swagger_auto_schema( - operation_summary=_("查询污点池列表"), + operation_summary=_("[TODO待删除]查询污点池列表"), responses={status.HTTP_200_OK: QueryDirtyMachineResponseSerializer()}, tags=[SWAGGER_TAG], ) @@ -80,23 +84,7 @@ def query_operation_list(self, request): return self.paginator.get_paginated_response(data=dirty_machine_list) @common_swagger_auto_schema( - operation_summary=_("将污点池主机转移至待回收模块"), - request_body=TransferDirtyMachineSerializer(), - tags=[SWAGGER_TAG], - ) - @action( - detail=False, - methods=["POST"], - url_path="transfer_dirty_machines", - serializer_class=TransferDirtyMachineSerializer, - ) - def transfer_dirty_machines(self, request): - bk_host_ids = self.params_validate(self.get_serializer_class())["bk_host_ids"] - DBDirtyMachineHandler.transfer_dirty_machines(bk_host_ids) - return Response() - - @common_swagger_auto_schema( - operation_summary=_("删除污点池记录"), + operation_summary=_("[TODO待删除]删除污点池记录"), request_body=DeleteDirtyMachineSerializer(), tags=[SWAGGER_TAG], ) @@ -108,5 +96,38 @@ def transfer_dirty_machines(self, request): ) def delete_dirty_records(self, request): bk_host_ids = self.params_validate(self.get_serializer_class())["bk_host_ids"] - DBDirtyMachineHandler.remove_dirty_machines(bk_host_ids) + DirtyMachine.objects.filter(bk_host_id__in=bk_host_ids).delete() + return Response() + + @common_swagger_auto_schema( + operation_summary=_("将主机转移至待回收/故障池模块"), + request_body=TransferDirtyMachineSerializer(), + tags=[SWAGGER_TAG], + ) + @action(detail=False, methods=["POST"], serializer_class=TransferDirtyMachineSerializer) + def transfer_hosts_to_pool(self, request): + data = self.params_validate(self.get_serializer_class()) + DBDirtyMachineHandler.transfer_hosts_to_pool(operator=request.user.username, **data) return Response() + + @common_swagger_auto_schema( + operation_summary=_("机器事件列表"), + responses={status.HTTP_200_OK: ListMachineEventResponseSerializer()}, + tags=[SWAGGER_TAG], + ) + @action(detail=False, methods=["GET"], filter_class=MachineEventFilter, queryset=MachineEvent.objects.all()) + def list_machine_events(self, request): + events_qs = self.paginate_queryset(self.filter_queryset(self.get_queryset())) + events_data = ListMachineEventSerializer(events_qs, many=True).data + return self.paginator.get_paginated_response(data=events_data) + + @common_swagger_auto_schema( + operation_summary=_("主机池查询"), + responses={status.HTTP_200_OK: ListMachinePoolResponseSerializer()}, + tags=[SWAGGER_TAG], + ) + @action(detail=False, methods=["GET"], filter_class=DirtyMachinePoolFilter, queryset=DirtyMachine.objects.all()) + def query_machine_pool(self, request): + machine_qs = self.paginate_queryset(self.filter_queryset(self.get_queryset())) + machine_data = ListMachinePoolSerializer(machine_qs, many=True).data + return self.paginator.get_paginated_response(data=machine_data) diff --git a/dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py b/dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py new file mode 100644 index 0000000000..c6fe2d2ee4 --- /dev/null +++ b/dbm-ui/backend/db_meta/migrations/0044_merge_0043_auto_20241014_1042_0043_auto_20241015_2128.py @@ -0,0 +1,13 @@ +# Generated by Django 3.2.25 on 2024-11-12 02:36 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("db_meta", "0043_auto_20241014_1042"), + ("db_meta", "0043_auto_20241015_2128"), + ] + + operations = [] diff --git a/dbm-ui/backend/db_meta/models/cluster.py b/dbm-ui/backend/db_meta/models/cluster.py index dde22ac8dd..cefa73f29e 100644 --- a/dbm-ui/backend/db_meta/models/cluster.py +++ b/dbm-ui/backend/db_meta/models/cluster.py @@ -17,7 +17,7 @@ from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist from django.db import models -from django.db.models import Count, Q, QuerySet +from django.db.models import Q, QuerySet from django.forms import model_to_dict from django.utils.translation import ugettext_lazy as _ @@ -101,29 +101,6 @@ def simple_desc(self): ], ) - @property - def extra_desc(self): - """追加额外信息,不适合大批量序列化场景""" - - simple_desc = self.simple_desc - - # 追加角色部署数量信息 - simple_desc["proxy_count"] = self.proxyinstance_set.all().count() - for storage in ( - self.storageinstance_set.values("instance_role") - .annotate(cnt=Count("machine__ip", distinct=True)) - .order_by() - ): - simple_desc["{}_count".format(storage["instance_role"])] = storage["cnt"] - - return simple_desc - - @classmethod - def get_cluster_id_immute_domain_map(cls, cluster_ids: List[int]) -> Dict[int, str]: - """查询集群ID和域名的映射关系""" - clusters = cls.objects.filter(id__in=cluster_ids).only("id", "immute_domain") - return {cluster.id: cluster.immute_domain for cluster in clusters} - @classmethod def is_exclusive(cls, cluster_id, ticket_type=None, **kwargs): if not ticket_type: @@ -347,13 +324,9 @@ def get_partition_port(self): return self.storageinstance_set.first().port elif self.cluster_type == ClusterType.TenDBHA: return self.proxyinstance_set.first().port - # TODO: tendbcluster的端口是spider master? elif self.cluster_type == ClusterType.TenDBCluster: - return ( - self.proxyinstance_set.filter(tendbclusterspiderext__spider_role=TenDBClusterSpiderRole.SPIDER_MASTER) - .first() - .port - ) + role = TenDBClusterSpiderRole.SPIDER_MASTER + return self.proxyinstance_set.filter(tendbclusterspiderext__spider_role=role).first().port def tendbcluster_ctl_primary_address(self) -> str: """ @@ -404,7 +377,6 @@ def get_cluster_stats(cls, bk_biz_id, cluster_types) -> dict: cluster_stats = {} for cluster_type in cluster_types: cluster_stats.update(json.loads(cache.get(f"{CACHE_CLUSTER_STATS}_{bk_biz_id}_{cluster_type}", "{}"))) - return cluster_stats def is_dbha_disabled(self) -> bool: @@ -429,6 +401,20 @@ def enable_dbha(self): ClusterDBHAExt.objects.filter(cluster=self).delete() self.refresh_from_db() + @classmethod + def get_cluster_related_machines(cls, cluster_ids: List[int]) -> List: + """ + 通过集群id查询集群关联的所有主机信息,即实例所在的主机 + """ + from backend.db_meta.models import Machine + + clusters = Cluster.objects.filter(id__in=cluster_ids) + host_ids = set(clusters.values_list("storageinstance__machine__bk_host_id", flat=True)) | set( + clusters.values_list("proxyinstance__machine__bk_host_id", flat=True) + ) + machines = Machine.objects.filter(bk_host_id__in=host_ids) + return machines + @classmethod def get_cluster_id__primary_address_map(cls, cluster_ids: List[int]) -> Dict[int, str]: """ diff --git a/dbm-ui/backend/db_meta/models/instance.py b/dbm-ui/backend/db_meta/models/instance.py index 0531384ce7..29ddd041d4 100644 --- a/dbm-ui/backend/db_meta/models/instance.py +++ b/dbm-ui/backend/db_meta/models/instance.py @@ -109,33 +109,6 @@ def find_insts_by_addresses(cls, addresses: List[Union[str, Dict]], divider: str ) return cls.objects.select_related("machine").filter(address_filters) - @classmethod - def filter_by_ips(cls, bk_biz_id: int, ips: List[str]): - """通过ip列表反查实例列表""" - instances = [] - unique_ip_roles = set() - for inst in cls.objects.filter(bk_biz_id=bk_biz_id, machine__ip__in=ips): - ip_role = IP_PORT_DIVIDER.join([inst.machine.ip, inst.instance_role]) - if ip_role in unique_ip_roles: - continue - - # 目前基本上一个实例仅属于一个集群,此处循环不会超过1次 - unique_ip_roles.add(ip_role) - for cluster in inst.cluster.all(): - instances.append( - { - "ip": inst.machine.ip, - "bk_host_id": inst.machine.bk_host_id, - "bk_cloud_id": inst.machine.bk_cloud_id, - "spec_id": inst.machine.spec_id, - "spec_config": inst.machine.spec_config, - "role": inst.instance_role, - "cluster": cluster.extra_desc, - } - ) - - return instances - class StorageInstance(InstanceMixin, AuditedModel): version = models.CharField(max_length=64, default="", help_text=_("版本号"), blank=True, null=True) diff --git a/dbm-ui/backend/db_services/dbbase/constants.py b/dbm-ui/backend/db_services/dbbase/constants.py index 1e15e48228..5c49ec4d49 100644 --- a/dbm-ui/backend/db_services/dbbase/constants.py +++ b/dbm-ui/backend/db_services/dbbase/constants.py @@ -10,6 +10,7 @@ """ from django.utils.translation import ugettext_lazy as _ +from backend.db_dirty.constants import PoolType from blue_krill.data_types.enum import EnumField, StructuredEnum ES_DEFAULT_PORT = 9200 @@ -25,10 +26,12 @@ DORIS_DEFAULT_HTTP_PORT = 8030 DORIS_DEFAULT_QUERY_PORT = 9030 - IP_PORT_DIVIDER = ":" SPACE_DIVIDER = " " +# 主机去向枚举 +IpDest = PoolType + class IpSource(str, StructuredEnum): """主机来源枚举""" diff --git a/dbm-ui/backend/db_services/dbbase/resources/query.py b/dbm-ui/backend/db_services/dbbase/resources/query.py index 2e34865a8d..9913905c5f 100644 --- a/dbm-ui/backend/db_services/dbbase/resources/query.py +++ b/dbm-ui/backend/db_services/dbbase/resources/query.py @@ -393,6 +393,8 @@ def _list_clusters( "exact_domain": Q(immute_domain__in=query_params.get("exact_domain", "").split(",")), # 域名 "domain": build_q_for_domain_by_cluster(domains=query_params.get("domain", "").split(",")), + # 标签 + "tags": Q(tags__in=query_params.get("tags", "").split(",")), } filter_params_map.update(inner_filter_params_map) @@ -402,13 +404,8 @@ def _list_clusters( if query_params.get(param): query_filters &= filter_params_map[param] - # 对标签进行过滤,标签“且”查询,需以追加 filter 的方式实现 - cluster_queryset = Cluster.objects.filter(query_filters) - for tag_id in query_params.get("tag_ids", "").split(","): - cluster_queryset = cluster_queryset.filter(tags__id=tag_id) - # 一join多的一方会有重复的数据,去重 - cluster_queryset = cluster_queryset.distinct() + cluster_queryset = Cluster.objects.filter(query_filters).distinct() # 实例筛选 def filter_instance_func(_query_params, _cluster_queryset, _proxy_queryset, _storage_queryset): diff --git a/dbm-ui/backend/db_services/dbresource/exceptions.py b/dbm-ui/backend/db_services/dbresource/exceptions.py index 1728f4c16e..dabc70dd0f 100644 --- a/dbm-ui/backend/db_services/dbresource/exceptions.py +++ b/dbm-ui/backend/db_services/dbresource/exceptions.py @@ -40,3 +40,9 @@ class SpecFilterClassDoesNotExistException(ResourcePoolBaseException): ERROR_CODE = "003" MESSAGE = _("规格筛选类不存在") MESSAGE_TPL = _("规格筛选类不存在") + + +class ResourceReturnException(ResourcePoolBaseException): + ERROR_CODE = "005" + MESSAGE = _("资源池退回异常") + MESSAGE_TPL = _("资源池退回异常") diff --git a/dbm-ui/backend/db_services/dbresource/handlers.py b/dbm-ui/backend/db_services/dbresource/handlers.py index 3303bf5f1f..f6259b027d 100644 --- a/dbm-ui/backend/db_services/dbresource/handlers.py +++ b/dbm-ui/backend/db_services/dbresource/handlers.py @@ -19,6 +19,7 @@ from backend.db_meta.enums.spec import SpecClusterType, SpecMachineType from backend.db_meta.models import Spec from backend.db_services.dbresource.exceptions import SpecOperateException +from backend.db_services.ipchooser.query.resource import ResourceQueryHelper class ClusterSpecFilter(object): @@ -411,3 +412,20 @@ def spec_resource_count(cls, bk_biz_id: int, bk_cloud_id: int, spec_ids: List[in spec_apply_count = DBResourceApi.apply_count(params=spec_count_params) spec_apply_count = {k.split("_")[0]: v for k, v in spec_apply_count.items()} return spec_apply_count + + @classmethod + def standardized_resource_host(cls, hosts, bk_biz_id=None): + """标准化主机信息,将cc字段统一成资源池字段""" + host_ids = [host["bk_host_id"] for host in hosts] + hosts = ResourceQueryHelper.search_cc_hosts(role_host_ids=host_ids, bk_biz_id=bk_biz_id) + for host in hosts: + host.update( + bk_biz_id=bk_biz_id, + ip=host.get("bk_host_innerip"), + city=host.get("idc_city_name"), + host_id=host.get("bk_host_id"), + os_name=host.get("bk_os_name"), + os_type=host.get("bk_os_type"), + device_class=host.get("svr_device_class"), + ) + return hosts diff --git a/dbm-ui/backend/db_services/dbresource/serializers.py b/dbm-ui/backend/db_services/dbresource/serializers.py index 54ab5b90d3..a34ab81f49 100644 --- a/dbm-ui/backend/db_services/dbresource/serializers.py +++ b/dbm-ui/backend/db_services/dbresource/serializers.py @@ -15,6 +15,7 @@ from backend import env from backend.constants import INT_MAX +from backend.db_dirty.constants import MachineEventType from backend.db_meta.enums import InstanceRole from backend.db_meta.enums.spec import SpecClusterType, SpecMachineType from backend.db_meta.models import Spec @@ -43,7 +44,7 @@ class HostInfoSerializer(serializers.Serializer): resource_type = serializers.CharField(help_text=_("专属DB"), allow_blank=True, allow_null=True) bk_biz_id = serializers.IntegerField(help_text=_("机器当前所属的业务id "), default=env.DBA_APP_BK_BIZ_ID) hosts = serializers.ListSerializer(help_text=_("主机"), child=HostInfoSerializer()) - labels = serializers.DictField(help_text=_("标签信息"), required=False) + labels = serializers.ListField(help_text=_("标签"), child=serializers.CharField(), required=False) class ResourceApplySerializer(serializers.Serializer): @@ -53,7 +54,7 @@ class HostDetailSerializer(serializers.Serializer): spec = serializers.DictField(help_text=_("cpu&mem参数"), required=False) storage_spec = serializers.ListField(help_text=_("磁盘参数"), child=serializers.DictField(), required=False) location_spec = serializers.DictField(help_text=_("位置匹配参数"), required=False) - labels = serializers.DictField(help_text=_("标签"), required=False) + labels = serializers.ListField(help_text=_("标签"), required=False, child=serializers.CharField()) affinity = serializers.CharField(help_text=_("亲和性"), required=False) count = serializers.IntegerField(help_text=_("数量")) @@ -90,7 +91,7 @@ class ResourceListSerializer(serializers.Serializer): spec_id = serializers.IntegerField(help_text=_("过滤的规格ID"), required=False) agent_status = serializers.BooleanField(help_text=_("agent状态"), required=False) - labels = serializers.DictField(help_text=_("标签信息"), required=False) + labels = serializers.CharField(help_text=_("标签列表id"), required=False) limit = serializers.IntegerField(help_text=_("单页数量")) offset = serializers.IntegerField(help_text=_("偏移量")) @@ -150,6 +151,7 @@ def validate(self, attrs): "mem", "disk", "bk_cloud_ids", + "labels", ], ) return attrs @@ -161,6 +163,8 @@ class Meta: class ListDBAHostsSerializer(QueryHostsBaseSer): + bk_biz_id = serializers.IntegerField(help_text=_("业务ID"), required=False, default=env.DBA_APP_BK_BIZ_ID) + def validate(self, attrs): attrs = super().validate(attrs) if not attrs.get("conditions"): @@ -184,12 +188,14 @@ class ResourceConfirmSerializer(serializers.Serializer): class ResourceDeleteSerializer(serializers.Serializer): + bk_biz_id = serializers.IntegerField(help_text=_("资源专用业务"), default=env.DBA_APP_BK_BIZ_ID, required=False) bk_host_ids = serializers.ListField(help_text=_("主机ID列表"), child=serializers.IntegerField()) + event = serializers.ChoiceField(help_text=_("删除事件(移入故障池/撤销导入)"), choices=MachineEventType.get_choices()) class ResourceUpdateSerializer(serializers.Serializer): bk_host_ids = serializers.ListField(help_text=_("主机ID列表"), child=serializers.IntegerField()) - labels = serializers.DictField(help_text=_("Labels"), required=False) + labels = serializers.ListField(help_text=_("标签"), required=False, child=serializers.CharField()) for_biz = serializers.IntegerField(help_text=_("专用业务ID"), required=False) resource_type = serializers.CharField(help_text=_("专属DB"), allow_blank=True, allow_null=True) storage_device = serializers.JSONField(help_text=_("磁盘挂载点信息"), required=False) diff --git a/dbm-ui/backend/db_services/dbresource/views/resource.py b/dbm-ui/backend/db_services/dbresource/views/resource.py index d1ea8e5a37..e29926bb33 100644 --- a/dbm-ui/backend/db_services/dbresource/views/resource.py +++ b/dbm-ui/backend/db_services/dbresource/views/resource.py @@ -8,7 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ - +import itertools import time from collections import defaultdict from typing import Dict, List @@ -21,18 +21,18 @@ from backend import env from backend.bk_web import viewsets from backend.bk_web.swagger import common_swagger_auto_schema -from backend.components import CCApi from backend.components.dbresource.client import DBResourceApi from backend.components.hcm.client import HCMApi -from backend.configuration.constants import SystemSettingsEnum -from backend.configuration.models import SystemSettings -from backend.db_meta.models import AppCache +from backend.db_dirty.constants import MachineEventType +from backend.db_dirty.models import MachineEvent +from backend.db_meta.models import AppCache, Tag from backend.db_services.dbresource.constants import ( GSE_AGENT_RUNNING_CODE, RESOURCE_IMPORT_EXPIRE_TIME, RESOURCE_IMPORT_TASK_FIELD, SWAGGER_TAG, ) +from backend.db_services.dbresource.exceptions import ResourceReturnException from backend.db_services.dbresource.handlers import ResourceHandler from backend.db_services.dbresource.serializers import ( GetDiskTypeResponseSerializer, @@ -65,6 +65,7 @@ from backend.flow.consts import FAILED_STATES, SUCCEED_STATES from backend.flow.engine.controller.base import BaseController from backend.flow.models import FlowTree +from backend.flow.utils.cc_manage import CcManage from backend.iam_app.dataclass import ResourceEnum from backend.iam_app.dataclass.actions import ActionEnum from backend.iam_app.handlers.drf_perm.base import ResourceActionPermission @@ -96,39 +97,39 @@ class DBResourceViewSet(viewsets.SystemViewSet): } default_permission_class = [ResourceActionPermission([ActionEnum.RESOURCE_MANAGE])] + @staticmethod + def _format_resource_fields(data, _cloud_info, _biz_infos, _tag_infos): + data.update( + { + "bk_cloud_name": _cloud_info[str(data["bk_cloud_id"])]["bk_cloud_name"], + "bk_host_innerip": data["ip"], + "bk_mem": data.pop("dram_cap"), + "bk_cpu": data.pop("cpu_num"), + "bk_disk": data.pop("total_storage_cap"), + "resource_type": data.pop("rs_type"), + "for_biz": { + "bk_biz_id": data["dedicated_biz"], + "bk_biz_name": _biz_infos.get(data["dedicated_biz"]), + }, + "agent_status": int((data.pop("gse_agent_status_code") == GSE_AGENT_RUNNING_CODE)), + "labels": [{"id": _tag, "name": _tag_infos.get(int(_tag))} for _tag in data.pop("labels") or []], + } + ) + return data + @common_swagger_auto_schema( operation_summary=_("资源池资源列表"), request_body=ResourceListSerializer(), responses={status.HTTP_200_OK: ResourceListResponseSerializer()}, tags=[SWAGGER_TAG], ) - @action( - detail=False, methods=["POST"], url_path="list", serializer_class=ResourceListSerializer, pagination_class=None - ) + @action(detail=False, methods=["POST"], url_path="list", serializer_class=ResourceListSerializer) @Permission.decorator_external_permission_field( param_field=lambda d: None, actions=[ActionEnum.RESOURCE_POLL_MANAGE], resource_meta=None, ) def resource_list(self, request): - def _format_resource_fields(data, _cloud_info, _biz_infos): - data.update( - { - "bk_cloud_name": _cloud_info[str(data["bk_cloud_id"])]["bk_cloud_name"], - "bk_host_innerip": data["ip"], - "bk_mem": data.pop("dram_cap"), - "bk_cpu": data.pop("cpu_num"), - "bk_disk": data.pop("total_storage_cap"), - "resource_type": data.pop("rs_type"), - "for_biz": { - "bk_biz_id": data["dedicated_biz"], - "bk_biz_name": _biz_infos.get(data["dedicated_biz"]), - }, - "agent_status": int((data.pop("gse_agent_status_code") == GSE_AGENT_RUNNING_CODE)), - } - ) - return data - resource_data = DBResourceApi.resource_list(params=self.params_validate(self.get_serializer_class())) if not resource_data["details"]: return Response({"count": 0, "results": []}) @@ -137,9 +138,13 @@ def _format_resource_fields(data, _cloud_info, _biz_infos): cloud_info = ResourceQueryHelper.search_cc_cloud(get_cache=True) for_biz_ids = [data["dedicated_biz"] for data in resource_data["details"] if data["dedicated_biz"]] for_biz_infos = AppCache.batch_get_app_attr(bk_biz_ids=for_biz_ids, attr_name="bk_biz_name") + # 获取标签信息 + label_ids = itertools.chain(*[data["labels"] for data in resource_data["details"] if data["labels"]]) + label_ids = [int(id) for id in label_ids if id.isdigit()] + tag_infos = {tag.id: tag.value for tag in Tag.objects.filter(id__in=label_ids)} # 格式化资源池字段信息 for data in resource_data.get("details") or []: - _format_resource_fields(data, cloud_info, for_biz_infos) + self._format_resource_fields(data, cloud_info, for_biz_infos, tag_infos) resource_data["results"] = resource_data.pop("details") return Response(resource_data) @@ -152,11 +157,10 @@ def _format_resource_fields(data, _cloud_info, _biz_infos): @action(detail=False, methods=["GET"], url_path="list_dba_hosts", serializer_class=ListDBAHostsSerializer) def list_dba_hosts(self, request): params = self.params_validate(self.get_serializer_class()) + bk_biz_id = params.pop("bk_biz_id") # 查询DBA空闲机模块的meta,构造查询空闲机参数的node_list - scope_list: ScopeList = [ - {"scope_id": env.DBA_APP_BK_BIZ_ID, "scope_type": "biz", "bk_biz_id": env.DBA_APP_BK_BIZ_ID} - ] + scope_list: ScopeList = [{"scope_id": bk_biz_id, "scope_type": "biz", "bk_biz_id": bk_biz_id}] trees: List[Dict] = TopoHandler.trees(all_scope=True, mode=ModeType.IDLE_ONLY.value, scope_list=scope_list) node_list: ScopeList = [ {"instance_id": trees[0]["instance_id"], "meta": trees[0]["meta"], "object_id": "module"} @@ -365,22 +369,24 @@ def resource_confirm(self, request): ) @action(detail=False, methods=["POST"], url_path="delete", serializer_class=ResourceDeleteSerializer) def resource_delete(self, request): - validated_data = self.params_validate(self.get_serializer_class()) - # 从资源池删除机器 - resp = DBResourceApi.resource_delete(params=validated_data) - # 将在资源池模块的机器移到空闲机,若机器处于其他模块,则忽略 - move_idle_hosts: List[int] = [] - resource_topo = SystemSettings.get_setting_value(key=SystemSettingsEnum.MANAGE_TOPO.value) - for topo in CCApi.find_host_biz_relations({"bk_host_id": validated_data["bk_host_ids"]}): - if ( - topo["bk_set_id"] == resource_topo["set_id"] - and topo["bk_module_id"] == resource_topo["resource_module_id"] - ): - move_idle_hosts.append(topo["bk_host_id"]) - - if move_idle_hosts: - CCApi.transfer_host_to_idlemodule({"bk_biz_id": env.DBA_APP_BK_BIZ_ID, "bk_host_id": move_idle_hosts}) - + params = self.params_validate(self.get_serializer_class()) + operator = request.user.username + bk_host_ids = params["bk_host_ids"] + bk_biz_id = params["bk_biz_id"] or env.DBA_APP_BK_BIZ_ID + + # 撤销导入需要判断机器是否可退回 + if params["event"] == MachineEventType.UndoImport: + ok, message = MachineEvent.hosts_can_return(bk_host_ids) + if not ok: + raise ResourceReturnException(message) + # 从资源池删除机器,并移入业务空闲机 + CcManage(bk_biz_id, "").transfer_host_to_idlemodule(bk_biz_id=bk_biz_id, bk_host_ids=bk_host_ids) + + # 删除资源 + resp = DBResourceApi.resource_delete(params={"bk_host_ids": bk_host_ids}) + # 记录撤销事件 + hosts = [{"bk_host_id": host} for host in bk_host_ids] + MachineEvent.host_event_trigger(bk_biz_id, hosts, event=params["event"], operator=operator) return Response(resp) @common_swagger_auto_schema( diff --git a/dbm-ui/backend/db_services/ipchooser/constants.py b/dbm-ui/backend/db_services/ipchooser/constants.py index dbcf2f0f31..aec9d9fa46 100644 --- a/dbm-ui/backend/db_services/ipchooser/constants.py +++ b/dbm-ui/backend/db_services/ipchooser/constants.py @@ -45,6 +45,9 @@ class CommonEnum(EnhanceEnum): "idc_name", "idc_city_id", "idc_city_name", + "sub_zone", + "rack_id", + "svr_device_class", "operator", ] diff --git a/dbm-ui/backend/db_services/ipchooser/query/resource.py b/dbm-ui/backend/db_services/ipchooser/query/resource.py index 4b399181c7..8127b3e292 100644 --- a/dbm-ui/backend/db_services/ipchooser/query/resource.py +++ b/dbm-ui/backend/db_services/ipchooser/query/resource.py @@ -241,20 +241,12 @@ def fill_agent_status(cc_hosts, fill_key="status"): return ResourceQueryHelper.query_agent_status_from_nodeman(cc_hosts, fill_key) - @staticmethod - def fill_cloud_name(cc_hosts): + @classmethod + def fill_cloud_name(cls, cc_hosts): if not cc_hosts: return - # 补充云区域名称 - resp = CCApi.search_cloud_area({"page": {"start": 0, "limit": 1000}}, use_admin=True) - - cloud_map = ( - {cloud_info["bk_cloud_id"]: cloud_info["bk_cloud_name"] for cloud_info in resp["info"]} - if resp.get("info") - else {} - ) - + cloud_map = {int(cloud): info["bk_cloud_name"] for cloud, info in cls.search_cc_cloud().items()} for host in cc_hosts: host["bk_cloud_name"] = cloud_map.get(host["bk_cloud_id"], host["bk_cloud_id"]) diff --git a/dbm-ui/backend/db_services/redis/autofix/bill.py b/dbm-ui/backend/db_services/redis/autofix/bill.py index 513e733f50..139b43e20a 100644 --- a/dbm-ui/backend/db_services/redis/autofix/bill.py +++ b/dbm-ui/backend/db_services/redis/autofix/bill.py @@ -68,6 +68,7 @@ def generate_autofix_ticket(fault_clusters: QuerySet): "instance_type": fault_machine["instance_type"], "spec_config": fault_obj.spec_config, "cluster_type": cluster.cluster_type, + "bk_host_id": fault_obj.bk_host_id, } if fault_machine["instance_type"] in [MachineType.TWEMPROXY.value, MachineType.PREDIXY.value]: redis_proxies.append(fault_info) diff --git a/dbm-ui/backend/db_services/tag/constants.py b/dbm-ui/backend/db_services/tag/constants.py index 03be4ecf3b..d67f495a00 100644 --- a/dbm-ui/backend/db_services/tag/constants.py +++ b/dbm-ui/backend/db_services/tag/constants.py @@ -10,8 +10,16 @@ """ from django.utils.translation import gettext_lazy as _ +from backend.db_meta.models import Cluster, Machine from blue_krill.data_types.enum import EnumField, StructuredEnum class TagResourceType(str, StructuredEnum): - DB_RESOURCE = EnumField("db_resource", _("资源池")) + DB_RESOURCE = EnumField("resource", _("资源池")) + CLUSTER = EnumField("cluster", _("集群")) + + +TAG_RELATED_RESOURCE_DISPLAY_FIELD = { + TagResourceType.CLUSTER: Cluster.immute_domain.field.name, + TagResourceType.DB_RESOURCE: Machine.ip.field.name, +} diff --git a/dbm-ui/backend/db_services/tag/filters.py b/dbm-ui/backend/db_services/tag/filters.py new file mode 100644 index 0000000000..07118a040a --- /dev/null +++ b/dbm-ui/backend/db_services/tag/filters.py @@ -0,0 +1,27 @@ +# -*- coding:utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import ugettext_lazy as _ +from django_filters import rest_framework as filters + +from backend.db_meta.models import Tag + + +class TagListFilter(filters.FilterSet): + key = filters.CharFilter(field_name="key", lookup_expr="icontains", label=_("键")) + value = filters.CharFilter(field_name="value", lookup_expr="icontains", label=_("值")) + + class Meta: + model = Tag + fields = { + "bk_biz_id": ["exact"], + "type": ["exact"], + } diff --git a/dbm-ui/backend/db_services/tag/handlers.py b/dbm-ui/backend/db_services/tag/handlers.py index 8a0a7aa794..afb770cd91 100644 --- a/dbm-ui/backend/db_services/tag/handlers.py +++ b/dbm-ui/backend/db_services/tag/handlers.py @@ -8,13 +8,14 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +from collections import defaultdict from typing import Dict, List from django.db.models import ManyToManyRel from django.utils.translation import gettext_lazy as _ from backend.db_meta.models import Tag -from backend.db_services.tag.constants import TagResourceType +from backend.db_services.tag.constants import TAG_RELATED_RESOURCE_DISPLAY_FIELD, TagResourceType from backend.exceptions import ValidationError @@ -49,30 +50,37 @@ def query_related_resources(cls, ids: List[int], resource_type: str = None): """ 查询关联资源 """ - # 1. 查询外键关联资源 - data = [] - for tag_id in ids: - info = {"id": tag_id, "related_resources": []} - for field in Tag._meta.get_fields(): - if isinstance(field, ManyToManyRel) and (field.name == resource_type or resource_type is None): - related_objs = field.related_model.objects.prefetch_related("tags").filter(tags__id=tag_id) - info["related_resources"].append( - { - "resource_type": field.name, - "count": related_objs.count(), - } - ) - - # 2. 查询第三方服务关联资源(如资源池、后续可能扩展的别的服务) - if resource_type == TagResourceType.DB_RESOURCE.value or resource_type is None: - info["related_resources"].append( - { - "resource_type": TagResourceType.DB_RESOURCE.value, - # TODO 请求资源池接口得到统计数量 - "count": 0, - } - ) - data.append(info) + if not resource_type: + return [] + + # 资源类型与展示字段映射 + data: List[Dict] = [] + # 1. 查询dbm内部关联资源 + for field in Tag._meta.get_fields(): + # 非此关联资源,忽略 + if not isinstance(field, ManyToManyRel) or (resource_type and field.name != resource_type): + continue + + # 查询关联资源并按照标签聚合 + tag__resource_list = defaultdict(list) + related_objs = field.related_model.objects.prefetch_related("tags").filter(tags__in=ids) + for obj in related_objs: + for tag in obj.tags: + tag__resource_list[tag.id].append(obj) + + # 填充关联资源信息 + display_field = TAG_RELATED_RESOURCE_DISPLAY_FIELD[resource_type] + for tag_id in ids: + related_objs = tag__resource_list[tag_id] + related_resources = [{"id": obj.pk, "display": getattr(obj, display_field)} for obj in related_objs] + data.append({"id": tag_id, "related_resources": related_resources}) + + # 2. 查询第三方服务关联资源(如资源池、后续可能扩展的别的服务) + + if resource_type == TagResourceType.DB_RESOURCE.value: + # 资源池根据标签聚合数量 + data = [{"id": tag_id, "ip_count": 0} for tag_id in ids] + return data @classmethod diff --git a/dbm-ui/backend/db_services/tag/mock.py b/dbm-ui/backend/db_services/tag/mock.py new file mode 100644 index 0000000000..ced76d5296 --- /dev/null +++ b/dbm-ui/backend/db_services/tag/mock.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + + +RELATED_RESOURCE_DATA = [ + {"id": 1, "related_resources": [{"id": 15, "display": "127.0.0.1"}, {"id": 16, "display": "127.0.0.2"}]}, + {"id": 2, "related_resources": [{"id": 15, "display": "127.0.0.1"}, {"id": 16, "display": "127.0.0.2"}]}, +] diff --git a/dbm-ui/backend/db_services/tag/serializers.py b/dbm-ui/backend/db_services/tag/serializers.py index 564c001e98..a22fd43790 100644 --- a/dbm-ui/backend/db_services/tag/serializers.py +++ b/dbm-ui/backend/db_services/tag/serializers.py @@ -14,6 +14,8 @@ from backend.bk_web.serializers import AuditedSerializer from backend.db_meta.models import Tag +from backend.db_services.tag import mock +from backend.db_services.tag.constants import TagResourceType class TagSerializer(AuditedSerializer, serializers.ModelSerializer): @@ -48,4 +50,9 @@ class DeleteTagsSerializer(serializers.Serializer): class QueryRelatedResourceSerializer(serializers.Serializer): ids = serializers.ListSerializer(child=serializers.IntegerField(help_text=_("标签 ID")), help_text=_("标签 ID 列表")) - resource_type = serializers.CharField(help_text=_("资源类型"), required=False) + resource_type = serializers.ChoiceField(help_text=_("资源类型"), choices=TagResourceType.get_choices()) + + +class RelatedResourceResponseSerializer(serializers.Serializer): + class Meta: + swagger_schema_fields = {"example": mock.RELATED_RESOURCE_DATA} diff --git a/dbm-ui/backend/db_services/tag/views.py b/dbm-ui/backend/db_services/tag/views.py index a79a98a6c9..abf5f4107d 100644 --- a/dbm-ui/backend/db_services/tag/views.py +++ b/dbm-ui/backend/db_services/tag/views.py @@ -19,6 +19,7 @@ from backend.bk_web.viewsets import AuditedModelViewSet from backend.db_meta.models import Tag from backend.db_services.tag import serializers +from backend.db_services.tag.filters import TagListFilter from backend.db_services.tag.handlers import TagHandler SWAGGER_TAG = _("标签") @@ -41,8 +42,9 @@ class TagViewSet(AuditedModelViewSet): queryset = Tag.objects.all() serializer_class = serializers.TagSerializer - filter_backends = [filters.SearchFilter, DjangoFilterBackend] - filter_fields = ("bk_biz_id", "key", "value", "type") + filter_backends = [DjangoFilterBackend, filters.OrderingFilter] + filter_class = TagListFilter + ordering_fields = ["create_at", "creator"] @common_swagger_auto_schema( operation_summary=_("查询标签关联资源"), request_body=serializers.QueryRelatedResourceSerializer(), tags=[SWAGGER_TAG] diff --git a/dbm-ui/backend/db_services/taskflow/handlers.py b/dbm-ui/backend/db_services/taskflow/handlers.py index 5ccc310b45..359505ef89 100644 --- a/dbm-ui/backend/db_services/taskflow/handlers.py +++ b/dbm-ui/backend/db_services/taskflow/handlers.py @@ -26,6 +26,7 @@ from backend import env from backend.bk_web.constants import LogLevelName from backend.components import BKLogApi +from backend.db_services.dbbase.constants import IpDest from backend.db_services.taskflow import task from backend.db_services.taskflow.constants import LOG_START_STRIP_PATTERN from backend.db_services.taskflow.exceptions import ( @@ -37,6 +38,7 @@ from backend.flow.consts import StateType from backend.flow.engine.bamboo.engine import BambooEngine from backend.flow.models import FlowNode, FlowTree +from backend.ticket.models import Ticket from backend.utils.string import format_json_string from backend.utils.time import calculate_cost_time, datetime2str @@ -55,24 +57,25 @@ def revoke_pipeline(self): if tree.status in [StateType.CREATED, StateType.READY]: tree.status = StateType.REVOKED tree.save() - return EngineAPIResult(result=True, message=_("pipeline未创建,仅更新FlowTree")) - - # 撤销pipeline - bamboo_engine = BambooEngine(root_id=self.root_id) - result = bamboo_engine.revoke_pipeline() - if not result.result: - raise RevokePipelineException(",".join(result.exc.args)) - - # 终止正在运行的节点,并将节点状态设置为revoke - running_node_ids = list( - FlowNode.objects.filter(root_id=self.root_id, status=StateType.RUNNING).values_list("node_id", flat=True) - ) - for node_id in running_node_ids: - # TODO 这里无法强制失败节点以后再设置节点的状态为revoke,这里需要强制失败吗? - # self.force_fail_node(node_id) - # 更新节点状态为revoke - bamboo_engine.runtime.set_state(node_id=node_id, to_state=StateType.REVOKED) - + result = EngineAPIResult(result=True, message=_("pipeline未创建,仅更新FlowTree")) + else: + # 撤销pipeline + bamboo_engine = BambooEngine(root_id=self.root_id) + result = bamboo_engine.revoke_pipeline() + if not result.result: + raise RevokePipelineException(",".join(result.exc.args)) + # 终止正在运行的节点,并将节点状态设置为revoke + running_nodes = FlowNode.objects.filter(root_id=self.root_id, status=StateType.RUNNING) + running_node_ids = list(running_nodes.values_list("node_id", flat=True)) + for node_id in running_node_ids: + bamboo_engine.runtime.set_state(node_id=node_id, to_state=StateType.REVOKED) + + # 非单据类任务,直接返回 + if not tree.uid: + return result + + # 回收单据涉及的新机到资源池 + Ticket.create_recycle_ticket(ticket_id=tree.uid, ip_dest=IpDest.Resource) return result def retry_node(self, node: str): diff --git a/dbm-ui/backend/env/dev.py b/dbm-ui/backend/env/dev.py index 088ae481b6..ddddcb0708 100644 --- a/dbm-ui/backend/env/dev.py +++ b/dbm-ui/backend/env/dev.py @@ -21,8 +21,5 @@ WEBCONSOLE_USERNAME = get_type_env(key="WEBCONSOLE_USERNAME", _type=str, default="") WEBCONSOLE_PASSWORD = get_type_env(key="WEBCONSOLE_PASSWORD", _type=str, default="") -# 资源池伪造开关 -FAKE_RESOURCE_APPLY_ENABLE = get_type_env(key="FAKE_RESOURCE_APPLY_ENABLE", _type=bool, default=False) - # 跳过审批开关,默认关闭,方便本地联调 ITSM_FLOW_SKIP = get_type_env(key="ITSM_FLOW_SKIP", _type=bool, default=False) diff --git a/dbm-ui/backend/exceptions.py b/dbm-ui/backend/exceptions.py index 58c4f0ba76..7d9dfa9a2d 100644 --- a/dbm-ui/backend/exceptions.py +++ b/dbm-ui/backend/exceptions.py @@ -45,6 +45,7 @@ class ErrorCode(object): SQL_IMPORT_CODE = "17" EXTERNAL_PROXY_CODE = "18" CONFIGURATION_CODE = "20" + DB_DIRTY_POOL_CODE = "21" class AppBaseException(Exception): diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/common/machine_os_init.py b/dbm-ui/backend/flow/engine/bamboo/scene/common/machine_os_init.py index b43f35c00b..cd8faddd31 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/common/machine_os_init.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/common/machine_os_init.py @@ -18,6 +18,8 @@ from backend.components.dbresource.client import DBResourceApi from backend.configuration.constants import SystemSettingsEnum from backend.configuration.models import SystemSettings +from backend.db_dirty.constants import MachineEventType +from backend.db_dirty.models import MachineEvent from backend.db_services.ipchooser.constants import BkOsType from backend.flow.consts import LINUX_ADMIN_USER_FOR_CHECK, WINDOW_ADMIN_USER_FOR_CHECK from backend.flow.engine.bamboo.scene.common.builder import Builder @@ -26,6 +28,16 @@ from backend.flow.plugins.components.collections.common.sa_init import SaInitComponent from backend.flow.plugins.components.collections.common.transfer_host_service import TransferHostServiceComponent from backend.flow.utils.mysql.mysql_act_dataclass import InitCheckForResourceKwargs +from backend.ticket.models import Ticket + + +def insert_host_event(params, data, kwargs, global_data): + """导入资源池成功后,记录主机事件""" + bk_biz_id, hosts, operator = global_data["bk_biz_id"], global_data["hosts"], global_data["operator"] + ticket = Ticket.objects.filter(id=global_data.get("ticket_id", 0)).first() + event = MachineEventType.ReturnResource if global_data.get("return_resource") else MachineEventType.ImportResource + hosts = [{"bk_host_id": host["host_id"], **host} for host in hosts] + MachineEvent.host_event_trigger(bk_biz_id, hosts, event=event, operator=operator, ticket=ticket, standard=True) class ImportResourceInitStepFlow(object): @@ -78,6 +90,7 @@ def machine_init_flow(self): "api_import_path": DBResourceApi.__module__, "api_import_module": "DBResourceApi", "api_call_func": "resource_import", + "success_callback_path": f"{insert_host_event.__module__}.{insert_host_event.__name__}", }, ) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_fake_sql_semantic_check.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_fake_sql_semantic_check.py index 8ca20600b1..d1c7ed1557 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_fake_sql_semantic_check.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_fake_sql_semantic_check.py @@ -14,6 +14,7 @@ from django.utils.translation import ugettext as _ from backend.flow.engine.bamboo.scene.common.builder import Builder +from backend.flow.plugins.components.collections.common.pause import PauseComponent from backend.flow.plugins.components.collections.mysql.fake_semantic_check import FakeSemanticCheckComponent logger = logging.getLogger("flow") @@ -42,7 +43,7 @@ def fake_semantic_check(self): fake_semantic_check.add_act(act_name=_("串行1"), act_component_code=FakeSemanticCheckComponent.code, kwargs={}) fake_semantic_check.add_act(act_name=_("串行2"), act_component_code=FakeSemanticCheckComponent.code, kwargs={}) fake_semantic_check.add_act(act_name=_("串行3"), act_component_code=FakeSemanticCheckComponent.code, kwargs={}) - + fake_semantic_check.add_act(act_name=_("人工确认卸载实例"), act_component_code=PauseComponent.code, kwargs={}) parallel_acts = [ { "act_name": _("并行1"), diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py index 7d21403d1f..b981566c42 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py @@ -234,7 +234,7 @@ def migrate_cluster_flow(self, use_for_upgrade=False): logger.error("cluster {} backup info not exists".format(cluster_model.id)) raise TendbGetBackupInfoFailedException(message=_("获取集群 {} 的备份信息失败".format(cluster_id))) cluster["backupinfo"] = backup_info - cluster["new_master_ip"] = self.data["new_master_ip"] + cluster["new_master_ip"] = self.data["new_master_ ip"] cluster["new_slave_ip"] = self.data["new_slave_ip"] cluster["new_master_port"] = master_model.port cluster["new_slave_port"] = master_model.port diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_proxy_scale.py b/dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_proxy_scale.py index 9ebee4d866..a963bde1fb 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_proxy_scale.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_proxy_scale.py @@ -213,7 +213,7 @@ def redis_proxy_scale_up_flow(self): redis_pipeline.run_pipeline() @staticmethod - def __calc_scale_down_ips(bk_biz_id, proxy_ips, target_proxy_count): + def calc_scale_down_ips(bk_biz_id, proxy_ips, target_proxy_count): # 统计proxy的idc情况 idc_ips = defaultdict(list) max_count = 0 @@ -262,7 +262,7 @@ def __scale_down_cluster_info( raise Exception("proxy ip {} not in cluster {}".format(ip, cluster_name)) else: # 根据数量缩容 - scale_down_ips = cls.__calc_scale_down_ips(bk_biz_id, proxy_ips, target_proxy_count) + scale_down_ips = cls.calc_scale_down_ips(bk_biz_id, proxy_ips, target_proxy_count) return { "proxy_port": proxy_port, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_reduce_nodes.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_reduce_nodes.py index 111997d7b4..92c715ac92 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_reduce_nodes.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_reduce_nodes.py @@ -91,6 +91,7 @@ def __calc_reduce_spiders( if spider_reduced_hosts: return [{"ip": host["ip"]} for host in spider_reduced_hosts] + # TODO: 这块逻辑放到单据,需要提前拿到待下架机器 # 计算合理的待下架的spider节点列表 ctl_primary = cluster.tendbcluster_ctl_primary_address() diff --git a/dbm-ui/backend/flow/plugins/components/collections/common/exec_clear_machine.py b/dbm-ui/backend/flow/plugins/components/collections/common/exec_clear_machine.py index 3d9a5912e2..799be61d9a 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/common/exec_clear_machine.py +++ b/dbm-ui/backend/flow/plugins/components/collections/common/exec_clear_machine.py @@ -65,7 +65,7 @@ def _execute(self, data, parent_data) -> bool: "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", "script_content": base64_encode(db_type_script_map[global_data["db_type"]]), - "script_language": os_script_language_map[global_data["os_name"]], + "script_language": os_script_language_map[global_data["os_type"]], "target_server": {"ip_list": exec_ips}, } self.log_debug("[{}] ready start task with body {}".format(node_name, body)) diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/fake_semantic_check.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/fake_semantic_check.py index ef083bf1e0..3c2e25afa9 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/mysql/fake_semantic_check.py +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/fake_semantic_check.py @@ -22,6 +22,7 @@ class FakeSemanticCheck(BaseService): """模拟语义执行,仅用作测试""" def _execute(self, data, parent_data, callback=None) -> bool: + return True kwargs = data.get_one_of_inputs("kwargs") root_id = kwargs.get("root_id") diff --git a/dbm-ui/backend/flow/utils/cc_manage.py b/dbm-ui/backend/flow/utils/cc_manage.py index 54b5530500..7a105a8e63 100644 --- a/dbm-ui/backend/flow/utils/cc_manage.py +++ b/dbm-ui/backend/flow/utils/cc_manage.py @@ -229,7 +229,7 @@ def transfer_host_to_idlemodule( transfer_host_ids = list(set(transfer_host_ids)) if transfer_host_ids: resp = CCApi.transfer_host_to_idlemodule( - {"bk_biz_id": bk_biz_id, "bk_host_id": transfer_host_ids}, raw=True + {"bk_biz_id": bk_biz_id, "bk_host_id": transfer_host_ids}, raw=True, use_admin=True ) if resp.get("result"): return diff --git a/dbm-ui/backend/flow/utils/clear_machine_script.py b/dbm-ui/backend/flow/utils/clear_machine_script.py index 5ae8575750..3a984dbf85 100644 --- a/dbm-ui/backend/flow/utils/clear_machine_script.py +++ b/dbm-ui/backend/flow/utils/clear_machine_script.py @@ -9,9 +9,10 @@ """ from backend.configuration.constants import DBType +from backend.db_services.ipchooser.constants import BkOsTypeCode from backend.flow.consts import DBA_ROOT_USER, WINDOW_SYSTEM_JOB_USER -os_script_language_map = {"linux": 1, "window": 5} +os_script_language_map = {BkOsTypeCode.LINUX: 1, BkOsTypeCode.WINDOWS: 5} mysql_clear_machine_script = """ echo "clear mysql crontab...." diff --git a/dbm-ui/backend/ticket/builders/__init__.py b/dbm-ui/backend/ticket/builders/__init__.py index 54b21521d5..de36e9df34 100644 --- a/dbm-ui/backend/ticket/builders/__init__.py +++ b/dbm-ui/backend/ticket/builders/__init__.py @@ -19,10 +19,14 @@ from rest_framework import serializers from backend import env -from backend.configuration.constants import SystemSettingsEnum +from backend.components.dbresource.client import DBResourceApi +from backend.configuration.constants import DBType, SystemSettingsEnum from backend.configuration.models import DBAdministrator, SystemSettings +from backend.db_dirty.constants import MachineEventType, PoolType +from backend.db_dirty.models import DirtyMachine, MachineEvent from backend.db_meta.models import AppCache, Cluster from backend.db_services.dbbase.constants import IpSource +from backend.flow.engine.controller.base import BaseController from backend.iam_app.dataclass.actions import ActionEnum from backend.ticket.constants import TICKET_EXPIRE_DEFAULT_CONFIG, FlowRetryType, FlowType, TicketType from backend.ticket.models import Flow, Ticket, TicketFlowsConfig @@ -260,6 +264,83 @@ def patch_affinity_location(cls, cluster, resource_spec, roles=None): resource_spec[role]["location_spec"] = {"city": cluster.region, "sub_zone_ids": []} +class RecycleParamBuilder(FlowParamBuilder): + """ + 回收主机流程 参数构建器 + 职责:获取单据中的下架机器,并走回收流程 + """ + + controller_map = {DBType.MySQL.value: "MySQLController.mysql_machine_clear_scene"} + + def __init__(self, ticket: Ticket): + super().__init__(ticket) + self.ip_dest = self.ticket_data["ip_recycle"]["ip_dest"] + assert self.ip_dest is not None + + def build_controller_info(self) -> dict: + db_type = self.ticket_data["db_type"] + class_name, flow_name = self.controller_map[db_type].split(".") + module = importlib.import_module(f"backend.flow.engine.controller.{db_type}") + self.controller = getattr(getattr(module, class_name), flow_name) + return super().build_controller_info() + + def format_ticket_data(self): + self.ticket_data = { + "clear_hosts": self.ticket_data["recycle_hosts"], + "ip_dest": self.ip_dest, + # 一批机器的操作系统类型一致,任取一个即可 + "os_name": self.ticket_data["recycle_hosts"][0]["os_name"], + "os_type": self.ticket_data["recycle_hosts"][0]["os_type"], + "db_type": self.ticket.group, + } + self.add_common_params() + + def post_callback(self): + # 转移到故障池,记录机器事件(如果是资源池则资源导入后会记录) + ticket_data = self.ticket.current_flow().details["ticket_data"] + if ticket_data["ip_dest"] != PoolType.Fault: + return + + event = MachineEventType.ToFault + bk_biz_id, recycle_hosts, operator = self.ticket.bk_biz_id, ticket_data["clear_hosts"], self.ticket.creator + MachineEvent.host_event_trigger(bk_biz_id, recycle_hosts, event, operator, self.ticket, standard=True) + + +class ReImportResourceParamBuilder(FlowParamBuilder): + """ + 资源重导入流程 参数构造器 - 此流程目前仅用于回收后使用 + 职责:获取单据中下架的机器,并走资源池导入流程 + """ + + controller = BaseController.import_resource_init_step + + def __init__(self, ticket: Ticket): + super().__init__(ticket) + + def format_ticket_data(self): + recycle_hosts = self.ticket_data["recycle_hosts"] + self.ticket_data = { + "ticket_id": self.ticket.id, + "for_biz": self.ticket_data["ip_recycle"]["for_biz"], + "resource_type": self.ticket.group, + "os_type": recycle_hosts[0]["bk_os_type"], + "hosts": recycle_hosts, + "operator": self.ticket.creator, + # 标记为退回 + "return_resource": True, + } + self.add_common_params() + + def pre_callback(self): + # 在run的时候才会生成task id,此时要更新到资源池参数里面 + flow = self.ticket.current_flow() + flow.update_details(task_id=flow.flow_obj_id) + # 添加导入记录 + hosts = flow.details["ticket_data"]["hosts"] + import_record = {"task_id": flow.flow_obj_id, "operator": self.ticket.creator, "hosts": hosts} + DBResourceApi.import_operation_create(params=import_record) + + class TicketFlowBuilder: """ 单据流程构建器 @@ -271,12 +352,18 @@ class TicketFlowBuilder: serializer = None alarm_transform_serializer = None - # 默认的参数构造器 + # 默认任务参数构造器 inner_flow_name: str = "" inner_flow_builder: FlowParamBuilder = None + # 默认暂停参数构造器 pause_node_builder: PauseParamBuilder = PauseParamBuilder + # 默认审批参数构造器 itsm_flow_builder: ItsmParamBuilder = ItsmParamBuilder - + # 默认主机回收参数构造器 + recycle_flow_builder: RecycleParamBuilder = RecycleParamBuilder + # 默认资源重导入参数构造器 + import_resource_flow_builder: ReImportResourceParamBuilder = ReImportResourceParamBuilder + # 默认资源申请参数构造器 # resource_apply_builder和resource_batch_apply_builder只能存在其一,表示是资源池单次申请还是批量申请 resource_apply_builder: ResourceApplyParamBuilder = None resource_batch_apply_builder: ResourceApplyParamBuilder = None @@ -337,6 +424,11 @@ def need_resource_pool(self): """是否存在资源池接入""" return self.ticket.details.get("ip_source") == IpSource.RESOURCE_POOL + @property + def need_recycle(self): + """是否回收主机""" + return self.ticket.details.get("ip_recycle", {}).get("ip_dest") + def custom_ticket_flows(self): return [] @@ -373,12 +465,10 @@ def init_ticket_flows(self): # 判断并添加资源申请节点 if self.need_resource_pool: - if not self.resource_apply_builder: flow_type, resource_builder = FlowType.RESOURCE_BATCH_APPLY, self.resource_batch_apply_builder else: flow_type, resource_builder = FlowType.RESOURCE_APPLY, self.resource_apply_builder - flows.append( Flow( ticket=self.ticket, @@ -405,8 +495,29 @@ def init_ticket_flows(self): # 如果使用资源池,则在最后需要进行资源交付 if self.need_resource_pool: - flow_type = FlowType.RESOURCE_DELIVERY if self.resource_apply_builder else FlowType.RESOURCE_BATCH_DELIVERY - flows.append(Flow(ticket=self.ticket, flow_type=flow_type)) + flows.append(Flow(ticket=self.ticket, flow_type=FlowType.RESOURCE_DELIVERY, flow_alias=_("资源交付"))) + + # 判断并添加主机清理节点 + if self.need_recycle: + flows.append( + Flow( + ticket=self.ticket, + flow_type=FlowType.HOST_RECYCLE.value, + details=self.recycle_flow_builder(self.ticket).get_params(), + flow_alias=_("原主机清理释放"), + ), + ) + + # 判断并添加资源重导入节点 + if self.need_recycle == PoolType.Resource: + flows.append( + Flow( + ticket=self.ticket, + flow_type=FlowType.HOST_IMPORT_RESOURCE.value, + details=self.import_resource_flow_builder(self.ticket).get_params(), + flow_alias=_("原主机回收到资源池"), + ), + ) Flow.objects.bulk_create(flows) return list(Flow.objects.filter(ticket=self.ticket)) @@ -456,12 +567,12 @@ class BuilderFactory: registry = {} # 部署类单据集合 apply_ticket_type = [] + # 回收类单据集合 + recycle_ticket_type = [] # 敏感类单据集合 sensitive_ticket_type = [] # 单据与集群状态的映射 ticket_type__cluster_phase = {} - # 部署类单据和集群类型的映射 - ticket_type__cluster_type = {} # 单据和权限动作/资源类型的映射 ticket_type__iam_action = {} @@ -473,7 +584,6 @@ def register(cls, ticket_type: str, **kwargs) -> Callable: @param kwargs: 单据注册的额外信息,主要是将单据归为不同的集合中,目前有这几种类型 1. is_apply: bool ---- 表示单据是否是部署类单据(类似集群的部署,扩容,替换等) 2. phase: ClusterPhase ---- 表示单据与集群状态的映射 - 3. cluster_type: ClusterType ---- 表示单据与集群类型的映射 4. action: ActionMeta ---- 表示单据与权限动作的映射 """ @@ -489,12 +599,12 @@ def inner_wrapper(wrapped_class: TicketFlowBuilder) -> TicketFlowBuilder: if kwargs.get("is_apply") and kwargs.get("is_apply") not in cls.apply_ticket_type: cls.apply_ticket_type.append(ticket_type) + if kwargs.get("is_recycle") and kwargs.get("is_recycle") not in cls.recycle_ticket_type: + cls.recycle_ticket_type.append(ticket_type) if kwargs.get("is_sensitive") and kwargs.get("is_sensitive") not in cls.sensitive_ticket_type: cls.sensitive_ticket_type.append(ticket_type) if kwargs.get("phase"): cls.ticket_type__cluster_phase[ticket_type] = kwargs["phase"] - if kwargs.get("cluster_type"): - cls.ticket_type__cluster_type[ticket_type] = kwargs["cluster_type"] if hasattr(ActionEnum, ticket_type) or kwargs.get("iam"): # 单据类型和权限动作默认一一对应,如果是特殊指定的则通过iam参数传递 cls.ticket_type__iam_action[ticket_type] = getattr(ActionEnum, ticket_type, None) or kwargs.get("iam") diff --git a/dbm-ui/backend/ticket/builders/common/base.py b/dbm-ui/backend/ticket/builders/common/base.py index 8d46cb7830..c2118dd28e 100644 --- a/dbm-ui/backend/ticket/builders/common/base.py +++ b/dbm-ui/backend/ticket/builders/common/base.py @@ -8,6 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import itertools import operator import re from collections import defaultdict @@ -19,10 +20,12 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers -from backend.configuration.constants import MASTER_DOMAIN_INITIAL_VALUE, AffinityEnum +from backend.configuration.constants import MASTER_DOMAIN_INITIAL_VALUE, PLAT_BIZ_ID, AffinityEnum from backend.db_meta.enums import AccessLayer, ClusterPhase, ClusterType, InstanceInnerRole, InstanceStatus from backend.db_meta.enums.comm import SystemTagEnum from backend.db_meta.models import Cluster, ExtraProcessInstance, Machine, ProxyInstance, Spec, StorageInstance +from backend.db_services.dbbase.constants import IpDest +from backend.db_services.dbresource.handlers import ResourceHandler from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.db_services.mysql.cluster.handlers import ClusterServiceHandler from backend.db_services.mysql.dumper.handlers import DumperHandler @@ -63,6 +66,18 @@ def fetch_host_ids(details: Dict[str, Any]) -> List[int]: return [item for item in targets if isinstance(item, int)] +def fetch_apply_hosts(details: Dict[str, Any]) -> List[Dict]: + role_hosts = get_target_items_from_details(details, match_keys=["nodes"]) + hosts = list(itertools.chain(*[h for hosts in role_hosts for h in hosts.values()])) + return hosts + + +def fetch_recycle_hosts(details: Dict[str, Any]) -> List[Dict]: + role_hosts = get_target_items_from_details(details, match_keys=["old_nodes"]) + hosts = list(itertools.chain(*[h for hosts in role_hosts for h in hosts.values()])) + return hosts + + def remove_useless_spec(attrs: Dict[str, Any]) -> Dict[str, Any]: # 只保存有意义的规格资源申请 real_resource_spec = {} @@ -114,8 +129,11 @@ class InstanceInfoSerializer(HostInfoSerializer): port = serializers.IntegerField(help_text=_("端口号")) -class MultiInstanceHostInfoSerializer(HostInfoSerializer): - instance_num = serializers.IntegerField +class HostRecycleSerializer(serializers.Serializer): + """主机回收信息""" + + for_biz = serializers.IntegerField(help_text=_("目标业务"), required=False, default=PLAT_BIZ_ID) + ip_dest = serializers.ChoiceField(help_text=_("机器流向"), choices=IpDest.get_choices(), default=IpDest.Fault) class SkipToRepresentationMixin(object): @@ -231,12 +249,10 @@ def validate_instance_related_clusters( def validate_duplicate_cluster_name(cls, bk_biz_id, ticket_type, cluster_name): """校验是否存在重复集群名""" - from backend.ticket.builders import BuilderFactory - - cluster_type = BuilderFactory.ticket_type__cluster_type.get(ticket_type, ticket_type) - if Cluster.objects.filter(bk_biz_id=bk_biz_id, cluster_type=cluster_type, name=cluster_name).exists(): + cluster_types = TicketType.get_cluster_type_by_ticket(ticket_type) + if Cluster.objects.filter(bk_biz_id=bk_biz_id, cluster_type__in=cluster_types, name=cluster_name).exists(): raise serializers.ValidationError( - _("业务{}下已经存在同类型: {}, 同名: {} 集群,请重新命名").format(bk_biz_id, cluster_type, cluster_name) + _("业务{}下已经存在同类型: {}, 同名: {} 集群,请重新命名").format(bk_biz_id, cluster_types, cluster_name) ) @classmethod @@ -439,6 +455,8 @@ class BaseTicketFlowBuilderPatchMixin(object): need_patch_cluster_details: bool = True need_patch_spec_details: bool = True need_patch_instance_details: bool = False + need_patch_recycle_host_details: bool = False + need_patch_recycle_cluster_details: bool = False def patch_cluster_details(self): """补充集群信息""" @@ -449,9 +467,7 @@ def patch_cluster_details(self): clusters = { cluster.id: { **cluster.to_dict(), - "bk_cloud_name": cloud_info.get(str(cluster.to_dict().get("bk_cloud_id")), {}).get( - "bk_cloud_name", "" - ), + "bk_cloud_name": cloud_info.get(str(cluster.bk_cloud_id), {}).get("bk_cloud_name", ""), } for cluster in Cluster.objects.filter(id__in=cluster_ids) } @@ -475,6 +491,21 @@ def patch_instance_details(self): instances = {inst.id: inst.simple_desc for inst in StorageInstance.objects.filter(id__in=instance_ids)} self.ticket.details["instances"] = instances + def patch_recycle_host_details(self): + """补充回收主机信息,在回收类单据一定调用此方法""" + bk_biz_id = self.ticket.bk_biz_id + recycle_hosts = fetch_recycle_hosts(self.ticket.details) + if not recycle_hosts: + return + self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host(recycle_hosts, bk_biz_id) + + def patch_recycle_cluster_details(self): + """补充集群下架后回收主机信息,在下架类单据一定调用此方法""" + bk_biz_id = self.ticket.bk_biz_id + recycle_hosts = Cluster.get_cluster_related_machines(fetch_cluster_ids(self.ticket.details)) + recycle_hosts = [{"bk_host_id": host.bk_host_id} for host in recycle_hosts] + self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host(recycle_hosts, bk_biz_id) + def patch_ticket_detail(self): if self.need_patch_cluster_details: self.patch_cluster_details() @@ -482,6 +513,10 @@ def patch_ticket_detail(self): self.patch_spec_details() if self.need_patch_instance_details: self.patch_instance_details() + if self.need_patch_recycle_host_details: + self.patch_recycle_host_details() + if self.need_patch_recycle_cluster_details: + self.patch_recycle_cluster_details() self.ticket.save(update_fields=["details", "update_at", "remark"]) diff --git a/dbm-ui/backend/ticket/builders/common/bigdata.py b/dbm-ui/backend/ticket/builders/common/bigdata.py index 6a4c6565e3..2c648a8c84 100644 --- a/dbm-ui/backend/ticket/builders/common/bigdata.py +++ b/dbm-ui/backend/ticket/builders/common/bigdata.py @@ -26,6 +26,7 @@ BaseOperateResourceParamBuilder, BigDataTicketFlowBuilderPatchMixin, CommonValidate, + HostRecycleSerializer, InfluxdbTicketFlowBuilderPatchMixin, format_bigdata_resource_spec, ) @@ -180,12 +181,15 @@ def validate(self, attrs): class BigDataReplaceDetailSerializer(BigDataSingleClusterOpsDetailsSerializer): - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) old_nodes = serializers.DictField(help_text=_("旧节点信息集合"), child=serializers.ListField(help_text=_("节点信息"))) new_nodes = serializers.DictField( help_text=_("新节点信息集合"), child=serializers.ListField(help_text=_("节点信息")), required=False ) resource_spec = serializers.JSONField(help_text=_("规格类型"), required=False) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): # 校验替换前后角色类型和数量一致 diff --git a/dbm-ui/backend/ticket/builders/common/recycle.py b/dbm-ui/backend/ticket/builders/common/recycle.py new file mode 100644 index 0000000000..a65d4aaed9 --- /dev/null +++ b/dbm-ui/backend/ticket/builders/common/recycle.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging + +from django.utils.translation import ugettext_lazy as _ +from rest_framework import serializers + +from backend.db_services.dbbase.constants import IpDest +from backend.db_services.dbresource.handlers import ResourceHandler +from backend.ticket import builders +from backend.ticket.builders import RecycleParamBuilder, ReImportResourceParamBuilder, TicketFlowBuilder +from backend.ticket.builders.common.base import HostRecycleSerializer +from backend.ticket.constants import FlowType, TicketType +from backend.ticket.models import Flow + +logger = logging.getLogger("root") + + +class RecycleHostDetailSerializer(serializers.Serializer): + recycle_hosts = serializers.JSONField(help_text=_("机器回收信息")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收流向")) + + +class RecycleHostResourceParamBuilder(ReImportResourceParamBuilder): + def format_ticket_data(self): + # 导入资源的类型设置为预设的group + group = self.ticket_data["group"] + super().format_ticket_data() + self.ticket_data["resource_type"] = group + + +class RecycleHostParamBuilder(RecycleParamBuilder): + def format_ticket_data(self): + group = self.ticket_data["group"] + super().format_ticket_data() + self.ticket_data["db_type"] = group + + +@builders.BuilderFactory.register(TicketType.RECYCLE_HOST) +class RecycleHostFlowBuilder(TicketFlowBuilder): + serializer = RecycleHostDetailSerializer + import_resource_flow_builder = RecycleHostResourceParamBuilder + recycle_flow_builder = RecycleHostParamBuilder + # 此单据不属于任何db,暂定为common + group = "common" + + def init_ticket_flows(self): + # 主机清理 + flows = [ + Flow( + ticket=self.ticket, + flow_type=FlowType.HOST_RECYCLE.value, + details=self.recycle_flow_builder(self.ticket).get_params(), + ), + ] + # 导入资源池 + if self.ticket.details["ip_recycle"]["ip_dest"] == IpDest.Resource: + flows.append( + Flow( + ticket=self.ticket, + flow_type=FlowType.HOST_IMPORT_RESOURCE.value, + details=self.import_resource_flow_builder(self.ticket).get_params(), + ), + ) + + Flow.objects.bulk_create(flows) + return list(Flow.objects.filter(ticket=self.ticket)) + + def patch_ticket_detail(self): + recycle_hosts = self.ticket.details["recycle_hosts"] + self.ticket.update_details(recycle_hosts=ResourceHandler.standardized_resource_host(recycle_hosts)) diff --git a/dbm-ui/backend/ticket/builders/doris/doris_destroy.py b/dbm-ui/backend/ticket/builders/doris/doris_destroy.py index 472255c192..5cde8bfa96 100644 --- a/dbm-ui/backend/ticket/builders/doris/doris_destroy.py +++ b/dbm-ui/backend/ticket/builders/doris/doris_destroy.py @@ -15,6 +15,7 @@ from backend.db_meta.enums import ClusterPhase from backend.flow.engine.controller.doris import DorisController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BaseDorisTicketFlowBuilder, BigDataTakeDownDetailSerializer from backend.ticket.constants import TicketType @@ -22,7 +23,7 @@ class DorisDestroyDetailSerializer(BigDataTakeDownDetailSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class DorisDestroyFlowParamBuilder(builders.FlowParamBuilder): @@ -34,3 +35,4 @@ class DorisDestroyFlowBuilder(BaseDorisTicketFlowBuilder): serializer = DorisDestroyDetailSerializer inner_flow_builder = DorisDestroyFlowParamBuilder inner_flow_name = _("DORIS集群删除") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/doris/doris_shrink.py b/dbm-ui/backend/ticket/builders/doris/doris_shrink.py index 5b30b16dbf..55aebe0dd1 100644 --- a/dbm-ui/backend/ticket/builders/doris/doris_shrink.py +++ b/dbm-ui/backend/ticket/builders/doris/doris_shrink.py @@ -19,6 +19,7 @@ from backend.flow.engine.controller.doris import DorisController from backend.ticket import builders from backend.ticket.builders.common import constants +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BaseDorisTicketFlowBuilder, BigDataSingleClusterOpsDetailsSerializer from backend.ticket.constants import TicketType @@ -32,7 +33,8 @@ class NodesSerializer(serializers.Serializer): cold = serializers.ListField(help_text=_("cold信息列表"), child=serializers.DictField()) observer = serializers.ListField(help_text=_("observer信息列表"), child=serializers.DictField()) - nodes = NodesSerializer(help_text=_("nodes节点列表")) + old_nodes = NodesSerializer(help_text=_("nodes节点列表")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -109,6 +111,7 @@ class DorisShrinkFlowParamBuilder(builders.FlowParamBuilder): controller = DorisController.doris_shrink_scene def format_ticket_data(self): + self.ticket_data["nodes"] = self.ticket_data.pop("old_nodes") super().format_ticket_data() @@ -117,3 +120,4 @@ class DorisShrinkFlowBuilder(BaseDorisTicketFlowBuilder): serializer = DorisShrinkDetailSerializer inner_flow_builder = DorisShrinkFlowParamBuilder inner_flow_name = _("Doris集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/es/es_destroy.py b/dbm-ui/backend/ticket/builders/es/es_destroy.py index c87b0ca6ef..5b0ec8c7d1 100644 --- a/dbm-ui/backend/ticket/builders/es/es_destroy.py +++ b/dbm-ui/backend/ticket/builders/es/es_destroy.py @@ -29,8 +29,9 @@ class EsDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = EsController.es_destroy_scene -@builders.BuilderFactory.register(TicketType.ES_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.ES_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class EsDestroyFlowBuilder(BaseEsTicketFlowBuilder): serializer = EsDestroyDetailSerializer inner_flow_builder = EsDestroyFlowParamBuilder inner_flow_name = _("ES集群删除") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/es/es_replace.py b/dbm-ui/backend/ticket/builders/es/es_replace.py index 29842b6d3e..e3dbafd06a 100644 --- a/dbm-ui/backend/ticket/builders/es/es_replace.py +++ b/dbm-ui/backend/ticket/builders/es/es_replace.py @@ -46,7 +46,7 @@ def post_callback(self): next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.ES_REPLACE, is_apply=True) +@builders.BuilderFactory.register(TicketType.ES_REPLACE, is_apply=True, is_recycle=True) class EsReplaceFlowBuilder(BaseEsTicketFlowBuilder): serializer = EsReplaceDetailSerializer inner_flow_builder = EsReplaceFlowParamBuilder diff --git a/dbm-ui/backend/ticket/builders/es/es_shrink.py b/dbm-ui/backend/ticket/builders/es/es_shrink.py index 9bc091f147..664d007b11 100644 --- a/dbm-ui/backend/ticket/builders/es/es_shrink.py +++ b/dbm-ui/backend/ticket/builders/es/es_shrink.py @@ -18,6 +18,7 @@ from backend.db_meta.models import Cluster from backend.flow.engine.controller.es import EsController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BaseEsTicketFlowBuilder, BigDataSingleClusterOpsDetailsSerializer from backend.ticket.constants import TicketType @@ -31,7 +32,8 @@ class NodesSerializer(serializers.Serializer): cold = serializers.ListField(help_text=_("cold信息列表"), child=serializers.DictField()) client = serializers.ListField(help_text=_("client信息列表"), child=serializers.DictField()) - nodes = NodesSerializer(help_text=_("nodes节点列表")) + old_nodes = NodesSerializer(help_text=_("nodes节点列表")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -68,11 +70,13 @@ class EsShrinkFlowParamBuilder(builders.FlowParamBuilder): controller = EsController.es_shrink_scene def format_ticket_data(self): + self.ticket_data["nodes"] = self.ticket_data.pop("old_nodes") super().format_ticket_data() -@builders.BuilderFactory.register(TicketType.ES_SHRINK) +@builders.BuilderFactory.register(TicketType.ES_SHRINK, is_recycle=True) class EsShrinkFlowBuilder(BaseEsTicketFlowBuilder): serializer = EsShrinkDetailSerializer inner_flow_builder = EsShrinkFlowParamBuilder inner_flow_name = _("ES集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py b/dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py index d6b4cc3567..e20c718d18 100644 --- a/dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py +++ b/dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py @@ -29,8 +29,9 @@ class HdfsDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = HdfsController.hdfs_destroy_scene -@builders.BuilderFactory.register(TicketType.HDFS_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.HDFS_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class HdfsDestroyFlowBuilder(BaseHdfsTicketFlowBuilder): serializer = HdfsDestroyDetailSerializer inner_flow_builder = HdfsDestroyFlowParamBuilder inner_flow_name = _("HDFS 集群删除") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py b/dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py index e042901523..61c312f5ce 100644 --- a/dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py +++ b/dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py @@ -49,7 +49,7 @@ class HdfsResourceParamBuilder(BigDataReplaceResourceParamBuilder): pass -@builders.BuilderFactory.register(TicketType.HDFS_REPLACE, is_apply=True) +@builders.BuilderFactory.register(TicketType.HDFS_REPLACE, is_apply=True, is_recycle=True) class HdfsReplaceFlowBuilder(BaseHdfsTicketFlowBuilder): serializer = HdfsReplaceDetailSerializer inner_flow_builder = HdfsReplaceFlowParamBuilder diff --git a/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py b/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py index c3ef3b4c1b..e762a7679f 100644 --- a/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py +++ b/dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py @@ -18,6 +18,7 @@ from backend.db_meta.models import Cluster from backend.flow.engine.controller.hdfs import HdfsController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BaseHdfsTicketFlowBuilder, BigDataSingleClusterOpsDetailsSerializer from backend.ticket.constants import TicketType @@ -29,7 +30,8 @@ class HdfsShrinkDetailSerializer(BigDataSingleClusterOpsDetailsSerializer): class NodesSerializer(serializers.Serializer): datanode = serializers.ListField(help_text=_("broker信息列表"), child=serializers.DictField()) - nodes = NodesSerializer(help_text=_("nodes节点信息")) + old_nodes = NodesSerializer(help_text=_("nodes节点信息")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -65,11 +67,13 @@ class HdfsShrinkFlowParamBuilder(builders.FlowParamBuilder): controller = HdfsController.hdfs_shrink_scene def format_ticket_data(self): + self.ticket_data["nodes"] = self.ticket_data.pop("old_nodes") super().format_ticket_data() -@builders.BuilderFactory.register(TicketType.HDFS_SHRINK) +@builders.BuilderFactory.register(TicketType.HDFS_SHRINK, is_recycle=True) class HdfsShrinkFlowBuilder(BaseHdfsTicketFlowBuilder): serializer = HdfsShrinkDetailSerializer inner_flow_builder = HdfsShrinkFlowParamBuilder inner_flow_name = _("HDFS 集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py b/dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py index f597019187..a16dee5035 100644 --- a/dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py +++ b/dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py @@ -30,8 +30,9 @@ class KafkaDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = KafkaController.kafka_destroy_scene -@builders.BuilderFactory.register(TicketType.KAFKA_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.KAFKA_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class KafkaDestroyFlowBuilder(BaseKafkaTicketFlowBuilder): serializer = KafkaDestroyDetailSerializer inner_flow_builder = KafkaDestroyFlowParamBuilder inner_flow_name = _("Kafka 集群销毁") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/kafka/kafka_replace.py b/dbm-ui/backend/ticket/builders/kafka/kafka_replace.py index 10ceaf3011..09fb572d9f 100644 --- a/dbm-ui/backend/ticket/builders/kafka/kafka_replace.py +++ b/dbm-ui/backend/ticket/builders/kafka/kafka_replace.py @@ -40,7 +40,7 @@ class KafkaReplaceResourceParamBuilder(BigDataReplaceResourceParamBuilder): pass -@builders.BuilderFactory.register(TicketType.KAFKA_REPLACE, is_apply=True) +@builders.BuilderFactory.register(TicketType.KAFKA_REPLACE, is_apply=True, is_recycle=True) class KafkaReplaceFlowBuilder(BaseKafkaTicketFlowBuilder): serializer = KafkaReplaceDetailSerializer inner_flow_builder = KafkaReplaceFlowParamBuilder diff --git a/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py b/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py index 8f8b4b0ad4..6df48ff964 100644 --- a/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py +++ b/dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py @@ -18,6 +18,7 @@ from backend.db_meta.models import Cluster from backend.flow.engine.controller.kafka import KafkaController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BaseKafkaTicketFlowBuilder, BigDataSingleClusterOpsDetailsSerializer from backend.ticket.constants import TicketType @@ -30,7 +31,8 @@ class KafkaShrinkDetailSerializer(BigDataSingleClusterOpsDetailsSerializer): class NodesSerializer(serializers.Serializer): broker = serializers.ListField(help_text=_("broker信息列表"), child=serializers.DictField()) - nodes = NodesSerializer(help_text=_("nodes节点信息")) + old_nodes = NodesSerializer(help_text=_("nodes节点信息")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -67,11 +69,13 @@ class KafkaShrinkFlowParamBuilder(builders.FlowParamBuilder): controller = KafkaController.kafka_shrink_scene def format_ticket_data(self): + self.ticket_data["nodes"] = self.ticket_data.pop("old_nodes") super().format_ticket_data() -@builders.BuilderFactory.register(TicketType.KAFKA_SHRINK) +@builders.BuilderFactory.register(TicketType.KAFKA_SHRINK, is_recycle=True) class KafkaShrinkFlowBuilder(BaseKafkaTicketFlowBuilder): serializer = KafkaShrinkDetailSerializer inner_flow_builder = KafkaShrinkFlowParamBuilder inner_flow_name = _("Kafka 集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/mysql/base.py b/dbm-ui/backend/ticket/builders/mysql/base.py index 008cc5e994..0d813cc98c 100644 --- a/dbm-ui/backend/ticket/builders/mysql/base.py +++ b/dbm-ui/backend/ticket/builders/mysql/base.py @@ -9,7 +9,7 @@ specific language governing permissions and limitations under the License. """ import re -from typing import Any, Dict, List, Union +from typing import Dict, List, Union from django.utils.translation import ugettext as _ from rest_framework import serializers @@ -31,6 +31,7 @@ fetch_cluster_ids, ) from backend.ticket.constants import TicketType +from backend.utils.basic import get_target_items_from_details class BaseMySQLTicketFlowBuilder(MySQLTicketFlowBuilderPatchMixin, TicketFlowBuilder): @@ -85,21 +86,6 @@ class MySQLBaseOperateDetailSerializer(SkipToRepresentationMixin, serializers.Se ClusterDBHAStatusFlags.BackendMasterUnavailable: MASTER_UNAVAILABLE_WHITELIST, } - @classmethod - def fetch_obj_by_keys(cls, obj_dict: Dict, keys: List[str]): - """从给定的字典中提取key值""" - objs: List[Any] = [] - for key in keys: - if key not in obj_dict: - continue - - if isinstance(obj_dict[key], list): - objs.extend(obj_dict[key]) - else: - objs.append(obj_dict[key]) - - return objs - def validate_cluster_can_access(self, attrs): """校验集群状态是否可以提单""" clusters = Cluster.objects.filter(id__in=fetch_cluster_ids(details=attrs)) @@ -123,8 +109,8 @@ def validate_cluster_can_access(self, attrs): def validate_hosts_clusters_in_same_cloud_area(self, attrs, host_key: List[str], cluster_key: List[str]): """校验新增机器和集群是否在同一云区域下""" for info in attrs["infos"]: - host_infos = self.fetch_obj_by_keys(info, host_key) - cluster_ids = self.fetch_obj_by_keys(info, cluster_key) + host_infos = get_target_items_from_details(info, host_key) + cluster_ids = get_target_items_from_details(info, cluster_key) if not CommonValidate.validate_hosts_clusters_in_same_cloud_area(host_infos, cluster_ids): raise serializers.ValidationError(_("请保证所选集群{}与新增机器{}在同一云区域下").format(cluster_ids, host_infos)) @@ -132,7 +118,7 @@ def validate_instance_role(self, attrs, instance_key: List[str], role: Union[Acc """校验实例的角色类型是否一致""" inst_list: List[Dict] = [] for info in attrs["infos"]: - inst_list.extend(self.fetch_obj_by_keys(info, instance_key)) + inst_list.extend(get_target_items_from_details(info, instance_key)) if not CommonValidate.validate_instance_role(inst_list, role): raise serializers.ValidationError(_("请保证实例f{}的角色类型为{}").format(inst_list, role)) @@ -148,8 +134,8 @@ def validate_instance_related_clusters( """校验实例的关联集群是否一致""" # TODO: 貌似这里只能循环校验,数据量大可能会带来性能问题 for info in attrs["infos"]: - inst = self.fetch_obj_by_keys(info, instance_key)[0] - cluster_ids = self.fetch_obj_by_keys(info, cluster_key) + inst = get_target_items_from_details(info, instance_key)[0] + cluster_ids = get_target_items_from_details(info, cluster_key) if not CommonValidate.validate_instance_related_clusters(inst, cluster_ids, role): raise serializers.ValidationError(_("请保证所选实例{}的关联集群为{}").format(inst, cluster_ids)) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_add_slave.py b/dbm-ui/backend/ticket/builders/mysql/mysql_add_slave.py index 373fed42d7..81c7daf57f 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_add_slave.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_add_slave.py @@ -12,11 +12,13 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from backend.configuration.constants import AffinityEnum from backend.db_meta.enums import ClusterType +from backend.db_meta.models import StorageInstance from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.mysql import MySQLController from backend.ticket import builders -from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer +from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer, fetch_cluster_ids from backend.ticket.builders.common.constants import MySQLBackupSource from backend.ticket.builders.mysql.base import BaseMySQLHATicketFlowBuilder, MySQLBaseOperateDetailSerializer from backend.ticket.constants import TicketType @@ -65,6 +67,31 @@ def format_ticket_data(self): class MysqlAddSlaveResourceParamBuilder(BaseOperateResourceParamBuilder): + @classmethod + def patch_slave_subzone(cls, ticket_data): + cluster_ids = fetch_cluster_ids(ticket_data) + masters = ( + StorageInstance.objects.select_related("machine") + .prefetch_related("cluster") + .filter(cluster__in=cluster_ids) + ) + cluster_id__master_map = {master.cluster.first().id: master for master in masters} + for info in ticket_data["infos"]: + resource_spec = info["resource_spec"]["new_slave"] + master_subzone_id = cluster_id__master_map[info["cluster_ids"][0]].machine.bk_sub_zone_id + # 同城跨园区,要求slave和master在不同subzone + if resource_spec["affinity"] == AffinityEnum.CROS_SUBZONE: + resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=False) + # 同城同园区,要求slave和master在一个subzone + elif resource_spec["affinity"] in [AffinityEnum.SAME_SUBZONE, AffinityEnum.SAME_SUBZONE_CROSS_SWTICH]: + resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=True) + + def format(self): + # 补充城市和亲和性 + self.patch_info_affinity_location() + # 新申请的slave需要根据master来保证在同一园区/不同园区 + self.patch_slave_subzone(self.ticket_data) + def post_callback(self): next_flow = self.ticket.next_flow() ticket_data = next_flow.details["ticket_data"] diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py b/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py index 39a1981708..ba8a836d11 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py @@ -14,9 +14,10 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.mysql import MySQLController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer from backend.ticket.builders.common.constants import MySQLBackupSource, RollbackBuildClusterType from backend.ticket.builders.common.field import DBTimezoneField from backend.ticket.builders.mysql.base import ( @@ -33,6 +34,7 @@ class FixPointRollbackSerializer(serializers.Serializer): cluster_id = serializers.IntegerField(help_text=_("集群ID")) target_cluster_id = serializers.IntegerField(help_text=_("回档集群ID"), default=False) rollback_host = HostInfoSerializer(help_text=_("备份新机器"), default=False) + resource_spec = serializers.JSONField(help_text=_("资源规格"), required=False) backup_source = serializers.ChoiceField(help_text=_("备份源"), choices=MySQLBackupSource.get_choices()) rollback_time = DBTimezoneField( help_text=_("回档时间"), required=False, allow_blank=True, allow_null=True, default="" @@ -49,6 +51,7 @@ class FixPointRollbackSerializer(serializers.Serializer): help_text=_("回档集群类型"), choices=RollbackBuildClusterType.get_choices() ) ignore_check_db = serializers.BooleanField(help_text=_("是否忽略业务库"), required=False, default=False) + ip_source = serializers.ChoiceField(help_text=_("机器来源"), choices=IpSource.get_choices(), required=False) infos = serializers.ListSerializer(help_text=_("定点构造信息"), child=FixPointRollbackSerializer()) @classmethod @@ -112,9 +115,15 @@ def build_controller_info(self) -> dict: return super().build_controller_info() +class MysqlFixPointRollbackResourceParamBuilder(BaseOperateResourceParamBuilder): + def format(self): + self.patch_info_affinity_location() + + @builders.BuilderFactory.register(TicketType.MYSQL_ROLLBACK_CLUSTER) class MysqlFixPointRollbackFlowBuilder(BaseMySQLTicketFlowBuilder): serializer = MySQLFixPointRollbackDetailSerializer inner_flow_builder = MySQLFixPointRollbackFlowParamBuilder + resource_batch_apply_builder = MysqlFixPointRollbackResourceParamBuilder inner_flow_name = _("定点构造执行") retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py b/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py index 48155291de..50c7d7775a 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py @@ -18,6 +18,7 @@ from backend.flow.engine.controller.tbinlogdumper import TBinlogDumperController from backend.iam_app.dataclass.actions import ActionEnum from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.mysql.base import BaseMySQLHATicketFlowBuilder, MySQLClustersTakeDownDetailsSerializer from backend.ticket.builders.tbinlogdumper.dumper_reduce_nodes import TbinlogdumperReduceNodesFlowParamBuilder from backend.ticket.constants import FlowRetryType, FlowType, TicketType @@ -25,7 +26,7 @@ class MysqlHADestroyDetailSerializer(MySQLClustersTakeDownDetailsSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class MysqlHADestroyFlowParamBuilder(builders.FlowParamBuilder): @@ -41,7 +42,7 @@ def format_ticket_data(self): @builders.BuilderFactory.register( - TicketType.MYSQL_HA_DESTROY, phase=ClusterPhase.DESTROY, iam=ActionEnum.MYSQL_DESTROY + TicketType.MYSQL_HA_DESTROY, phase=ClusterPhase.DESTROY, iam=ActionEnum.MYSQL_DESTROY, is_recycle=True ) class MysqlHaDestroyFlowBuilder(BaseMySQLHATicketFlowBuilder): """Mysql下架流程的构建基类""" @@ -51,6 +52,7 @@ class MysqlHaDestroyFlowBuilder(BaseMySQLHATicketFlowBuilder): inner_flow_name = _("MySQL高可用销毁执行") dumper_flow_builder = MysqlDumperDestroyParamBuilder retry_type = FlowRetryType.MANUAL_RETRY + need_patch_recycle_cluster_details = True def cluster_dumper_destroy(self): cluster_ids = self.ticket.details["cluster_ids"] diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py index c0ad45184b..854b070384 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py @@ -12,11 +12,16 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers -from backend.db_meta.enums import ClusterType +from backend.db_meta.enums import ClusterType, InstanceInnerRole +from backend.db_meta.models import Cluster from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.mysql import MySQLController from backend.ticket import builders -from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer +from backend.ticket.builders.common.base import ( + BaseOperateResourceParamBuilder, + HostInfoSerializer, + HostRecycleSerializer, +) from backend.ticket.builders.common.constants import MySQLBackupSource from backend.ticket.builders.mysql.base import MySQLBaseOperateDetailSerializer from backend.ticket.builders.mysql.mysql_master_slave_switch import ( @@ -36,6 +41,7 @@ class MigrateClusterInfoSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("迁移主从信息"), child=MigrateClusterInfoSerializer()) backup_source = serializers.ChoiceField( help_text=_("备份源"), choices=MySQLBackupSource.get_choices(), default=MySQLBackupSource.REMOTE @@ -62,6 +68,10 @@ class MysqlMigrateClusterParamBuilder(MysqlMasterSlaveSwitchParamBuilder): controller = MySQLController.mysql_migrate_remote_scene def format_ticket_data(self): + for info in self.ticket_data["infos"]: + info["old_master_ip"] = info["old_nodes"]["old_master"][0]["ip"] + info["old_slave_ip"] = info["old_nodes"]["old_slave"][0]["ip"] + if self.ticket_data["ip_source"] == IpSource.RESOURCE_POOL: return @@ -71,19 +81,39 @@ def format_ticket_data(self): class MysqlMigrateClusterResourceParamBuilder(BaseOperateResourceParamBuilder): + def format(self): + self.patch_info_affinity_location(roles=["backend_group"]) + def post_callback(self): next_flow = self.ticket.next_flow() ticket_data = next_flow.details["ticket_data"] for info in ticket_data["infos"]: - info["bk_new_master"], info["bk_new_slave"] = info.pop("new_master")[0], info.pop("new_slave")[0] + backend = info.pop("backend_group")[0] + info["bk_new_master"], info["bk_new_slave"] = backend["master"], backend["slave"] info["new_master_ip"], info["new_slave_ip"] = info["bk_new_master"]["ip"], info["bk_new_slave"]["ip"] - next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.MYSQL_MIGRATE_CLUSTER, is_apply=True) +@builders.BuilderFactory.register(TicketType.MYSQL_MIGRATE_CLUSTER, is_apply=True, is_recycle=True) class MysqlMigrateClusterFlowBuilder(MysqlMasterSlaveSwitchFlowBuilder): serializer = MysqlMigrateClusterDetailSerializer inner_flow_builder = MysqlMigrateClusterParamBuilder inner_flow_name = TicketType.get_choice_label(TicketType.MYSQL_MIGRATE_CLUSTER) resource_batch_apply_builder = MysqlMigrateClusterResourceParamBuilder + need_patch_recycle_host_details = True + + @staticmethod + def get_old_master_slave_host(info): + # 同机关联情况下,任取一台集群 + cluster = Cluster.objects.get(id=info["cluster_ids"][0]) + master = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.MASTER) + slave = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.SLAVE, is_stand_by=True) + # 补充下架的机器信息 + info["old_nodes"] = {"old_master": [master.machine.simple_desc], "old_slave": [slave.machine.simple_desc]} + return info + + def patch_ticket_detail(self): + # mysql主从迁移会下架掉master和slave(stand by) + for info in self.ticket.details["infos"]: + self.get_old_master_slave_host(info) + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py index a9268aec56..76c10f84e5 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_upgrade.py @@ -26,6 +26,7 @@ BaseOperateResourceParamBuilder, DisplayInfoSerializer, HostInfoSerializer, + HostRecycleSerializer, fetch_cluster_ids, ) from backend.ticket.builders.common.constants import MySQLBackupSource @@ -34,6 +35,7 @@ MysqlMasterSlaveSwitchFlowBuilder, MysqlMasterSlaveSwitchParamBuilder, ) +from backend.ticket.builders.mysql.mysql_migrate_cluster import MysqlMigrateClusterFlowBuilder from backend.ticket.constants import TicketType @@ -56,6 +58,7 @@ class ReadOnlySlaveSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) backup_source = serializers.ChoiceField(help_text=_("备份源"), choices=MySQLBackupSource.get_choices()) infos = serializers.ListField(help_text=_("添加信息"), child=InfoSerializer()) force = serializers.BooleanField(help_text=_("是否强制执行"), required=False, default=False) @@ -130,12 +133,13 @@ def post_callback(self): super().post_callback() -@builders.BuilderFactory.register(TicketType.MYSQL_MIGRATE_UPGRADE, is_apply=True) +@builders.BuilderFactory.register(TicketType.MYSQL_MIGRATE_UPGRADE, is_apply=True, is_recycle=True) class MysqlMigrateUpgradeFlowBuilder(MysqlMasterSlaveSwitchFlowBuilder): serializer = MysqlMigrateUpgradeDetailSerializer inner_flow_builder = MysqlMigrateUpgradeParamBuilder inner_flow_name = TicketType.get_choice_label(TicketType.MYSQL_MIGRATE_UPGRADE) resource_batch_apply_builder = MysqlMigrateUpgradeResourceParamBuilder + need_patch_recycle_host_details = True def patch_ticket_detail(self): """mysql_master -> backend_group""" @@ -145,7 +149,6 @@ def patch_ticket_detail(self): resource_spec = {} cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) - id_cluster_map = Cluster.objects.prefetch_related( "storageinstance_set", "storageinstance_set__machine" ).in_bulk(cluster_ids, field_name="id") @@ -170,5 +173,7 @@ def patch_ticket_detail(self): "affinity": AffinityEnum.NONE.value, } info["resource_spec"] = resource_spec + # 补充下架机器的信息 + MysqlMigrateClusterFlowBuilder.get_old_master_slave_host(info) self.ticket.save(update_fields=["details"]) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_add.py b/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_add.py index 55be8aa48a..f22692d077 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_add.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_add.py @@ -61,6 +61,11 @@ def format_ticket_data(self): class MysqlProxyAddResourceParamBuilder(BaseOperateResourceParamBuilder): + def format(self): + self.patch_info_affinity_location(roles=["new_proxy"]) + for info in self.ticket_data["infos"]: + info["resource_spec"]["new_proxy"]["group_count"] = 2 + def post_callback(self): next_flow = self.ticket.next_flow() ticket_data = next_flow.details["ticket_data"] diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_switch.py b/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_switch.py index 48dd6053f9..306ea8d37e 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_switch.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_proxy_switch.py @@ -19,7 +19,7 @@ from backend.ticket.builders.common.base import ( BaseOperateResourceParamBuilder, DisplayInfoSerializer, - HostInfoSerializer, + HostRecycleSerializer, InstanceInfoSerializer, ) from backend.ticket.builders.mysql.base import ( @@ -32,14 +32,17 @@ class MysqlProxySwitchDetailSerializer(MySQLBaseOperateDetailSerializer): class SwitchInfoSerializer(DisplayInfoSerializer): + class OldProxySerializer(serializers.Serializer): + origin_proxy = serializers.ListSerializer(child=InstanceInfoSerializer()) + cluster_ids = serializers.ListField(help_text=_("集群ID列表"), child=serializers.IntegerField()) - origin_proxy = InstanceInfoSerializer(help_text=_("旧Proxy实例信息")) - target_proxy = HostInfoSerializer(help_text=_("新Proxy机器信息"), required=False) - resource_spec = serializers.JSONField(help_text=_("资源规格"), required=False) + old_nodes = OldProxySerializer(help_text=_("旧Proxy实例信息")) + resource_spec = serializers.JSONField(help_text=_("资源规格")) ip_source = serializers.ChoiceField( - help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT + help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.RESOURCE_POOL ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) force = serializers.BooleanField(help_text=_("是否强制替换"), required=False, default=False) infos = serializers.ListField(help_text=_("替换信息"), child=SwitchInfoSerializer()) @@ -74,12 +77,13 @@ class MysqlProxySwitchParamBuilder(builders.FlowParamBuilder): def format_ticket_data(self): for info in self.ticket_data["infos"]: - info["origin_proxy_ip"] = info["origin_proxy"] - if self.ticket_data["ip_source"] == IpSource.MANUAL_INPUT: - info["target_proxy_ip"] = info["target_proxy"] + info["origin_proxy_ip"] = info["old_nodes"]["origin_proxy"] class MysqlProxySwitchResourceParamBuilder(BaseOperateResourceParamBuilder): + def format(self): + self.patch_info_affinity_location(roles=["target_proxy"]) + def post_callback(self): next_flow = self.ticket.next_flow() ticket_data = next_flow.details["ticket_data"] @@ -92,9 +96,12 @@ def post_callback(self): @builders.BuilderFactory.register(TicketType.MYSQL_PROXY_SWITCH, is_apply=True) class MysqlProxySwitchFlowBuilder(BaseMySQLHATicketFlowBuilder): + need_patch_recycle_host_details = True + retry_type = FlowRetryType.MANUAL_RETRY serializer = MysqlProxySwitchDetailSerializer - inner_flow_builder = MysqlProxySwitchParamBuilder + inner_flow_name = _("替换PROXY执行") + inner_flow_builder = MysqlProxySwitchParamBuilder + resource_batch_apply_builder = MysqlProxySwitchResourceParamBuilder - retry_type = FlowRetryType.MANUAL_RETRY pause_node_builder = MySQLBasePauseParamBuilder diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py b/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py index 568d483a43..f236495e73 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py @@ -12,23 +12,40 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from backend.configuration.constants import AffinityEnum from backend.db_meta.enums import ClusterType, InstanceInnerRole +from backend.db_meta.models import StorageInstance +from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.mysql import MySQLController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer, InstanceInfoSerializer +from backend.ticket.builders.common.base import ( + BaseOperateResourceParamBuilder, + HostInfoSerializer, + HostRecycleSerializer, + InstanceInfoSerializer, +) from backend.ticket.builders.common.constants import MySQLBackupSource from backend.ticket.builders.mysql.base import BaseMySQLHATicketFlowBuilder, MySQLBaseOperateDetailSerializer from backend.ticket.constants import TicketType +from backend.utils.basic import get_target_items_from_details class MysqlRestoreSlaveDetailSerializer(MySQLBaseOperateDetailSerializer): class RestoreInfoSerializer(serializers.Serializer): - old_slave = InstanceInfoSerializer(help_text=_("旧从库 IP")) - new_slave = HostInfoSerializer(help_text=_("新从库 IP")) + class OldSlaveSerializer(serializers.Serializer): + old_slave = serializers.ListSerializer(child=InstanceInfoSerializer()) + + old_nodes = OldSlaveSerializer(help_text=_("旧从库信息")) + new_slave = HostInfoSerializer(help_text=_("新从库 IP"), required=False) + resource_spec = serializers.JSONField(help_text=_("资源规格"), required=False) cluster_ids = serializers.ListField(help_text=_("集群ID列表"), child=serializers.IntegerField()) backup_source = serializers.ChoiceField(help_text=_("备份源"), choices=MySQLBackupSource.get_choices()) infos = serializers.ListField(help_text=_("集群重建信息"), child=RestoreInfoSerializer()) + ip_source = serializers.ChoiceField( + help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): # 校验集群是否可用,集群类型为高可用 @@ -59,12 +76,57 @@ class MysqlRestoreSlaveParamBuilder(builders.FlowParamBuilder): def format_ticket_data(self): self.ticket_data["add_slave_only"] = False for info in self.ticket_data["infos"]: - info["old_slave_ip"], info["new_slave_ip"] = info["old_slave"]["ip"], info["new_slave"]["ip"] - info["bk_old_slave"], info["bk_new_slave"] = info.pop("old_slave"), info.pop("new_slave") + old_slave = info["old_nodes"]["old_slave"][0] + info["old_slave_ip"], info["bk_old_slave"] = old_slave["ip"], old_slave + + if self.ticket_data["ip_source"] == IpSource.RESOURCE_POOL: + return + + for info in self.ticket_data["infos"]: + new_slave = info.pop("new_slave") + info["new_slave_ip"], info["bk_new_slave"] = new_slave["ip"], new_slave + + +class MysqlRestoreSlaveResourceParamBuilder(BaseOperateResourceParamBuilder): + @classmethod + def patch_slave_subzone(cls, ticket_data): + # TODO: 后续改造为,尽量与原slave一致,不一致再满足亲和性 + slave_host_ids = get_target_items_from_details(ticket_data, match_keys=["bk_host_id"]) + slaves = StorageInstance.objects.prefetch_related("as_receiver__ejector__machine", "machine").filter( + machine__bk_host_id__in=slave_host_ids, cluster_type=ClusterType.TenDBCluster + ) + slave_host_map = {slave.machine.bk_host_id: slave for slave in slaves} + for info in ticket_data["infos"]: + resource_spec = info["resource_spec"]["new_slave"] + slave = slave_host_map[info["old_nodes"]["old_slave"][0]["bk_host_id"]] + master_subzone_id = slave.as_receiver.get().ejector.machine.bk_sub_zone_id + # 同城跨园区,要求slave和master在不同subzone + if resource_spec["affinity"] == AffinityEnum.CROS_SUBZONE: + resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=False) + # 同城同园区,要求slave和master在一个subzone + elif resource_spec["affinity"] in [AffinityEnum.SAME_SUBZONE, AffinityEnum.SAME_SUBZONE_CROSS_SWTICH]: + resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=True) + + def format(self): + # 补充亲和性和城市信息 + super().patch_info_affinity_location(roles=["new_slave"]) + # 补充slave园区申请 + self.patch_slave_subzone(self.ticket_data) + + def post_callback(self): + next_flow = self.ticket.next_flow() + ticket_data = next_flow.details["ticket_data"] + for info in ticket_data["infos"]: + info["bk_old_slave"], info["bk_new_slave"] = info.pop("old_slave"), info.pop("new_slave")[0] + info["old_slave_ip"], info["new_slave_ip"] = info["bk_old_slave"]["ip"], info["bk_new_slave"]["ip"] + + next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.MYSQL_RESTORE_SLAVE, is_apply=True) +@builders.BuilderFactory.register(TicketType.MYSQL_RESTORE_SLAVE, is_apply=True, is_recycle=True) class MysqlRestoreSlaveFlowBuilder(BaseMySQLHATicketFlowBuilder): serializer = MysqlRestoreSlaveDetailSerializer inner_flow_builder = MysqlRestoreSlaveParamBuilder + resource_batch_apply_builder = MysqlRestoreSlaveResourceParamBuilder inner_flow_name = _("Slave重建执行") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_single_destroy.py b/dbm-ui/backend/ticket/builders/mysql/mysql_single_destroy.py index d61ecabaef..9aeb70c981 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_single_destroy.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_single_destroy.py @@ -15,12 +15,13 @@ from backend.flow.engine.controller.mysql import MySQLController from backend.iam_app.dataclass.actions import ActionEnum from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.mysql.base import BaseMySQLSingleTicketFlowBuilder, MySQLClustersTakeDownDetailsSerializer from backend.ticket.constants import TicketType class MysqlSingleDestroyDetailSerializer(MySQLClustersTakeDownDetailsSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class MysqlSingleDestroyFlowParamBuilder(builders.FlowParamBuilder): @@ -28,9 +29,10 @@ class MysqlSingleDestroyFlowParamBuilder(builders.FlowParamBuilder): @builders.BuilderFactory.register( - TicketType.MYSQL_SINGLE_DESTROY, phase=ClusterPhase.DESTROY, iam=ActionEnum.MYSQL_DESTROY + TicketType.MYSQL_SINGLE_DESTROY, phase=ClusterPhase.DESTROY, iam=ActionEnum.MYSQL_DESTROY, is_recycle=True ) class MysqlSingleDestroyFlowBuilder(BaseMySQLSingleTicketFlowBuilder): serializer = MysqlSingleDestroyDetailSerializer inner_flow_builder = MysqlSingleDestroyFlowParamBuilder inner_flow_name = _("MySQL单节点销毁执行") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py b/dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py index 4ad2f64a4a..e30fb5d15b 100644 --- a/dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py +++ b/dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py @@ -30,8 +30,9 @@ class PulsarDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = PulsarController.pulsar_destroy_scene -@builders.BuilderFactory.register(TicketType.PULSAR_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.PULSAR_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class PulsarDestroyFlowBuilder(BasePulsarTicketFlowBuilder): serializer = PulsarDestroyDetailSerializer inner_flow_builder = PulsarDestroyFlowParamBuilder inner_flow_name = _("Pulsar 集群删除") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py b/dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py index 5787caa0fb..e7c5713564 100644 --- a/dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py +++ b/dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py @@ -39,7 +39,7 @@ class PulsarReplaceResourceParamBuilder(BigDataReplaceResourceParamBuilder): pass -@builders.BuilderFactory.register(TicketType.PULSAR_REPLACE, is_apply=True) +@builders.BuilderFactory.register(TicketType.PULSAR_REPLACE, is_apply=True, is_recycle=True) class PulsarReplaceFlowBuilder(BasePulsarTicketFlowBuilder): serializer = PulsarReplaceDetailSerializer inner_flow_builder = PulsarReplaceFlowParamBuilder diff --git a/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py b/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py index 64d7596892..34168ce09f 100644 --- a/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py +++ b/dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py @@ -15,9 +15,9 @@ from backend.db_meta.enums import InstanceRole from backend.db_meta.models import Cluster -from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.pulsar import PulsarController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import ( BasePulsarTicketFlowBuilder, BigDataSingleClusterOpsDetailsSerializer, @@ -28,12 +28,13 @@ class PulsarShrinkDetailSerializer(BigDataSingleClusterOpsDetailsSerializer): - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) - class NodesSerializer(serializers.Serializer): broker = serializers.ListField(help_text=_("broker信息列表"), child=serializers.DictField()) bookkeeper = serializers.ListField(help_text=_("bookkeeper信息列表"), child=serializers.DictField()) + old_nodes = serializers.JSONField(help_text=_("节点列表信息"), required=False) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) + def validate(self, attrs): super().validate(attrs) @@ -75,11 +76,13 @@ class PulsarShrinkFlowParamBuilder(builders.FlowParamBuilder): controller = PulsarController.pulsar_shrink_scene def format_ticket_data(self): + self.ticket_data["nodes"] = self.ticket_data.pop("old_nodes") super().format_ticket_data() -@builders.BuilderFactory.register(TicketType.PULSAR_SHRINK) +@builders.BuilderFactory.register(TicketType.PULSAR_SHRINK, is_recycle=True) class PulsarShrinkFlowBuilder(BasePulsarTicketFlowBuilder): serializer = PulsarShrinkDetailSerializer inner_flow_builder = PulsarShrinkFlowParamBuilder inner_flow_name = _("Pulsar 集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/redis/redis_destroy.py b/dbm-ui/backend/ticket/builders/redis/redis_destroy.py index 09407f4327..3ed66c9ea4 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_destroy.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_destroy.py @@ -14,7 +14,7 @@ from backend.db_meta.enums import ClusterPhase from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import SkipToRepresentationMixin +from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.base import ( BaseRedisInstanceTicketFlowBuilder, BaseRedisTicketFlowBuilder, @@ -25,7 +25,7 @@ class RedisDestroyDetailSerializer(RedisSingleOpsBaseDetailSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class RedisDestroyFlowParamBuilder(builders.FlowParamBuilder): @@ -59,9 +59,10 @@ class RedisInstanceDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = RedisController.fake_scene -@builders.BuilderFactory.register(TicketType.REDIS_INSTANCE_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.REDIS_INSTANCE_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class RedisInstanceCloseFlowBuilder(BaseRedisInstanceTicketFlowBuilder): serializer = RedisInstanceDestroyDetailSerializer inner_flow_builder = RedisInstanceDestroyFlowParamBuilder inner_flow_name = _("下架集群") pause_node_builder = RedisBasePauseParamBuilder + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_autofix.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_autofix.py index 88702b9913..25a581d280 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_autofix.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_autofix.py @@ -18,7 +18,7 @@ from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import SkipToRepresentationMixin +from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.redis_toolbox_cut_off import ( RedisClusterCutOffFlowBuilder, RedisClusterCutOffResourceParamBuilder, @@ -42,6 +42,7 @@ class HostInfoSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL.value ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) @@ -85,7 +86,7 @@ def post_callback(self): super().post_callback() -@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_AUTOFIX, is_apply=True) +@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_AUTOFIX, is_apply=True, is_recycle=True) class RedisClusterAutofixFlowBuilder(RedisClusterCutOffFlowBuilder): serializer = RedisClusterAutofixDetailSerializer alarm_transform_serializer = RedisClusterAutofixAlarmTransformSerializer @@ -95,6 +96,8 @@ class RedisClusterAutofixFlowBuilder(RedisClusterCutOffFlowBuilder): default_need_itsm = True default_need_manual_confirm = False + need_patch_recycle_host_details = True + @property def need_itsm(self): return True diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py index aa0eb05451..fd298765eb 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_cut_off.py @@ -9,6 +9,7 @@ specific language governing permissions and limitations under the License. """ import itertools +from collections import defaultdict from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers @@ -21,6 +22,7 @@ from backend.ticket.builders.common.base import ( BaseOperateResourceParamBuilder, DisplayInfoSerializer, + HostRecycleSerializer, SkipToRepresentationMixin, ) from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder, ClusterValidateMixin @@ -34,6 +36,7 @@ class InfoSerializer(DisplayInfoSerializer): class HostInfoSerializer(serializers.Serializer): ip = serializers.IPAddressField() spec_id = serializers.IntegerField() + bk_host_id = serializers.IntegerField() cluster_ids = serializers.ListField(help_text=_("集群列表"), child=serializers.IntegerField()) bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID")) @@ -42,7 +45,10 @@ class HostInfoSerializer(serializers.Serializer): redis_slave = serializers.ListField(help_text=_("slave列表"), child=HostInfoSerializer(), required=False) resource_spec = serializers.JSONField(required=False, help_text=_("资源申请信息(前端不用传递,后台渲染)")) - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) @@ -50,6 +56,7 @@ class RedisClusterCutOffParamBuilder(builders.FlowParamBuilder): controller = RedisController.redis_cluster_cutoff_scene def format_ticket_data(self): + self.ticket_data.pop("old_nodes") super().format_ticket_data() @@ -92,21 +99,23 @@ def post_callback(self): super().post_callback() -@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_CUTOFF, is_apply=True) +@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_CUTOFF, is_apply=True, is_recycle=True) class RedisClusterCutOffFlowBuilder(BaseRedisTicketFlowBuilder): serializer = RedisClusterCutOffDetailSerializer inner_flow_builder = RedisClusterCutOffParamBuilder inner_flow_name = _("整机替换") resource_batch_apply_builder = RedisClusterCutOffResourceParamBuilder + need_patch_recycle_host_details = True - def patch_ticket_detail(self): - """redis_master -> backend_group""" + def patch_resource_and_old_nodes(self): cluster_ids = list(itertools.chain(*[infos["cluster_ids"] for infos in self.ticket.details["infos"]])) - id__cluster = {cluster.id: cluster for cluster in Cluster.objects.filter(id__in=cluster_ids)} + cluster_map = {cluster.id: cluster for cluster in Cluster.objects.filter(id__in=cluster_ids)} + old_nodes = defaultdict(list) + for info in self.ticket.details["infos"]: resource_spec = {} # 取第一个cluster即可,即使是多集群,也是单机多实例的情况 - cluster = id__cluster[info["cluster_ids"][0]] + cluster = cluster_map[info["cluster_ids"][0]] for role in [ InstanceRole.REDIS_MASTER.value, InstanceRole.REDIS_PROXY.value, @@ -117,21 +126,35 @@ def patch_ticket_detail(self): if not role_hosts: continue - if role in [InstanceRole.REDIS_MASTER.value, InstanceRole.REDIS_PROXY.value]: - # 如果替换角色是master,则是master/slave成对替换 - resource_role = "backend_group" if role == InstanceRole.REDIS_MASTER.value else role - resource_spec[resource_role] = { + old_nodes[role].extend(role_hosts) + + # 如果是proxy,则至少跨两个机房 + if role == InstanceRole.REDIS_PROXY.value: + resource_spec[role] = { + "spec_id": info[role][0]["spec_id"], + "count": len(role_hosts), + "location_spec": {"city": cluster.region, "sub_zone_ids": []}, + "affinity": cluster.disaster_tolerance_level, + } + resource_spec[role].update(group_count=2) + # 如果替换角色是master,则是master/slave成对替换 + elif role == InstanceRole.REDIS_MASTER.value: + resource_spec["backend_group"] = { "spec_id": info[role][0]["spec_id"], "count": len(role_hosts), "location_spec": {"city": cluster.region, "sub_zone_ids": []}, "affinity": cluster.disaster_tolerance_level, } - # 如果是proxy,则至少跨两个机房 - if role == InstanceRole.REDIS_PROXY.value: - resource_spec[resource_role].update(group_count=2) + # 因为是成对替换,所以要把slave加入old nodes + redis_masters = StorageInstance.objects.prefetch_related("as_ejector__receiver", "machine").filter( + cluster=cluster, machine__ip__in=[host["ip"] for host in role_hosts] + ) + for master in redis_masters: + slave = master.as_ejector.get().receiver.machine + old_nodes[InstanceRole.REDIS_SLAVE].append({"ip": slave.ip, "bk_host_id": slave.bk_host_id}) + # 如果是替换slave, 需要和当前集群中的配对的 master 不同机房 elif role == InstanceRole.REDIS_SLAVE.value: - # 如果是替换slave, 需要和当前集群中的配对的 master 不同机房 - redis_slaves = StorageInstance.objects.prefetch_related("as_receiver", "machine").filter( + redis_slaves = StorageInstance.objects.prefetch_related("as_receiver__ejector", "machine").filter( cluster=cluster, machine__ip__in=[host["ip"] for host in role_hosts] ) ip__redis_slave = {slave.machine.ip: slave for slave in redis_slaves} @@ -147,7 +170,11 @@ def patch_ticket_detail(self): }, } - info["resource_spec"] = resource_spec + info.update(resource_spec=resource_spec, old_nodes=old_nodes) self.ticket.save(update_fields=["details"]) + + def patch_ticket_detail(self): + """redis_master -> backend_group""" + self.patch_resource_and_old_nodes() super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py index 3138903c13..ffa368c28e 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_datastruct_task_delete.py @@ -8,16 +8,20 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import itertools +import operator +from functools import reduce +from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from backend.db_meta.enums import DestroyedStatus -from backend.db_meta.models import Cluster +from backend.db_meta.models import Cluster, Machine from backend.db_services.redis.rollback.models import TbTendisRollbackTasks from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import SkipToRepresentationMixin +from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder, RedisBasePauseParamBuilder from backend.ticket.constants import TicketType @@ -39,28 +43,53 @@ def validate(self, attr): raise serializers.ValidationError(_("目标集群{}不存在,请确认.").format(attr["cluster_id"])) # 判断构造实例是否存在 - if not TbTendisRollbackTasks.objects.filter( + tasks = TbTendisRollbackTasks.objects.filter( related_rollback_bill_id=attr.get("related_rollback_bill_id"), prod_cluster=prod_cluster.immute_domain, bk_cloud_id=attr.get("bk_cloud_id"), destroyed_status=DestroyedStatus.NOT_DESTROYED, - ).exists(): + ) + if not tasks.exists(): raise serializers.ValidationError(_("集群{}: 没有找到未销毁的实例.").format(prod_cluster.immute_domain)) # 填写域名 attr["prod_cluster"] = prod_cluster.immute_domain + # 填写构造任务,patch函数用 + attr["datastruct_tasks"] = tasks return attr infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class RedisDataStructureTaskDeleteParamBuilder(builders.FlowParamBuilder): controller = RedisController.redis_data_structure_task_delete -@builders.BuilderFactory.register(TicketType.REDIS_DATA_STRUCTURE_TASK_DELETE) +@builders.BuilderFactory.register(TicketType.REDIS_DATA_STRUCTURE_TASK_DELETE, is_recycle=True) class RedisDataStructureTaskDeleteFlowBuilder(BaseRedisTicketFlowBuilder): serializer = RedisDataStructureTaskDeleteDetailSerializer inner_flow_builder = RedisDataStructureTaskDeleteParamBuilder inner_flow_name = _("Redis 销毁构造实例") pause_node_builder = RedisBasePauseParamBuilder + need_patch_recycle_host_details = True + + def patch_datastruct_delete_nodes(self): + drop_machine_filters = [] + for info in self.ticket.details["infos"]: + tasks = info.pop("datastruct_tasks") + instances = itertools.chain(*[task.temp_instance_range for task in tasks]) + filters = [ + Q(bk_biz_id=tasks[0].bk_biz_id, bk_cloud_id=tasks[0].bk_cloud_id, ip=instance.split(":")[0]) + for instance in instances + ] + drop_machine_filters.extend(filters) + + drop_machines = Machine.objects.filter(reduce(operator.or_, drop_machine_filters)) + self.ticket.details["old_nodes"]["datastruct_hosts"] = [ + {"ip": host.ip, "bk_host_id": host.bk_host_id} for host in drop_machines + ] + + def patch_ticket_detail(self): + self.patch_datastruct_delete_nodes() + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py index d5181c954c..b04e62a070 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_proxy_scale_down.py @@ -13,9 +13,15 @@ from rest_framework import serializers from backend.db_meta.models import Cluster +from backend.flow.engine.bamboo.scene.redis.redis_proxy_scale import RedisProxyScaleFlow from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer, SkipToRepresentationMixin, fetch_cluster_ids +from backend.ticket.builders.common.base import ( + HostInfoSerializer, + HostRecycleSerializer, + SkipToRepresentationMixin, + fetch_cluster_ids, +) from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder, ClusterValidateMixin from backend.ticket.constants import SwitchConfirmType, TicketType @@ -24,17 +30,21 @@ class ProxyScaleDownDetailSerializer(SkipToRepresentationMixin, ClusterValidateM """proxy缩容""" class InfoSerializer(serializers.Serializer): + class OldProxySerializer(serializers.Serializer): + proxy_reduced_hosts = serializers.ListSerializer( + help_text=_("缩容指定主机"), child=HostInfoSerializer(), required=False + ) + cluster_id = serializers.IntegerField(help_text=_("集群ID")) target_proxy_count = serializers.IntegerField(help_text=_("目标proxy数量"), min_value=2, required=False) proxy_reduce_count = serializers.IntegerField(help_text=_("缩容proxy数量"), required=False) - proxy_reduced_hosts = serializers.ListSerializer( - help_text=_("缩容指定主机"), child=HostInfoSerializer(), required=False - ) + old_nodes = OldProxySerializer(help_text=_("缩容指定proxy"), required=False) online_switch_type = serializers.ChoiceField( help_text=_("切换类型"), choices=SwitchConfirmType.get_choices(), default=SwitchConfirmType.NO_CONFIRM ) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): clusters = Cluster.objects.filter(id__in=fetch_cluster_ids(attrs)).prefetch_related("proxyinstance_set") @@ -47,6 +57,8 @@ def validate(self, attrs): info["target_proxy_count"] = cluster.proxyinstance_set.count() - len(info["proxy_reduced_hosts"]) if info["target_proxy_count"] < 2: raise serializers.ValidationError(_("请保证集群{}缩容后proxy数量不小于2").format(cluster.immute_domain)) + # 提前存入proxy信息用于后续patch + attrs.update(proxy_insts=cluster.proxyinstance_set.all(), bk_cloud_id=cluster.bk_cloud_id) return attrs @@ -58,8 +70,29 @@ def format_ticket_data(self): super().format_ticket_data() -@builders.BuilderFactory.register(TicketType.REDIS_PROXY_SCALE_DOWN) +@builders.BuilderFactory.register(TicketType.REDIS_PROXY_SCALE_DOWN, is_recycle=True) class ProxyScaleDownFlowBuilder(BaseRedisTicketFlowBuilder): serializer = ProxyScaleDownDetailSerializer inner_flow_builder = ProxyScaleDownParamBuilder inner_flow_name = _("Proxy缩容") + need_patch_recycle_host_details = True + + def patch_old_proxy_nodes(self): + for info in self.ticket.details["infos"]: + proxy_insts = info.pop("proxy_insts") + + if info.get("old_nodes"): + continue + + # 获取proxy ip和ip与host id的映射 + proxy_ip__host = {proxy.machine.ip: proxy.machine for proxy in proxy_insts} + proxy_ips = list(proxy_insts.values_list("machine__ip", flat=True)) + # 获取实际下架的ip + target_proxy_count = info["target_proxy_count"] + down_ips = RedisProxyScaleFlow.calc_scale_down_ips(self.ticket.bk_biz_id, proxy_ips, target_proxy_count) + # 补充old proxy nodes信息 + info["old_nodes"] = {"proxy": [{"bk_host_id": proxy_ip__host[ip], "ip": ip} for ip in down_ips]} + + def patch_ticket_detail(self): + self.patch_old_proxy_nodes() + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_redis_scale_updown.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_redis_scale_updown.py index 76aadb656a..69a2301072 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_redis_scale_updown.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_redis_scale_updown.py @@ -12,13 +12,16 @@ from rest_framework import serializers from backend.configuration.constants import AffinityEnum +from backend.db_meta.models import Cluster from backend.db_services.dbbase.constants import IpSource +from backend.db_services.dbresource.handlers import ResourceHandler from backend.flow.consts import RedisCapacityUpdateType from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders from backend.ticket.builders.common.base import ( BaseOperateResourceParamBuilder, DisplayInfoSerializer, + HostRecycleSerializer, SkipToRepresentationMixin, ) from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder, ClusterValidateMixin @@ -54,7 +57,10 @@ class BackendGroupSerializer(serializers.Serializer): ) resource_spec = ResourceSpecSerializer(help_text=_("资源申请")) - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer()) @@ -75,9 +81,26 @@ def post_callback(self): super().post_callback() -@builders.BuilderFactory.register(TicketType.REDIS_SCALE_UPDOWN, is_apply=True) +@builders.BuilderFactory.register(TicketType.REDIS_SCALE_UPDOWN, is_apply=True, is_recycle=True) class RedisScaleUpDownFlowBuilder(BaseRedisTicketFlowBuilder): serializer = RedisScaleUpDownDetailSerializer inner_flow_builder = RedisScaleUpDownParamBuilder inner_flow_name = _("Redis 集群容量变更") resource_batch_apply_builder = RedisScaleUpDownResourceParamBuilder + + def patch_down_cluster_hosts(self): + """针对全部全部机器替换,获取所有的下架机器""" + cluster_ids = [ + info["cluster_id"] + for info in self.ticket.details["infos"] + if info["update_mode"] == RedisCapacityUpdateType.ALL_MACHINES_REPLACE + ] + recycle_hosts = Cluster.get_cluster_related_machines(cluster_ids) + recycle_hosts = [{"bk_host_id": host_id} for host_id in recycle_hosts] + self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host( + recycle_hosts, self.ticket.bk_biz_id + ) + + def patch_ticket_detail(self): + self.patch_down_cluster_hosts() + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py index 6fa37073d2..21e914dabf 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_shard_update.py @@ -18,7 +18,7 @@ from backend.db_services.version.utils import query_versions_by_key from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import SkipToRepresentationMixin +from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.base import ( BaseRedisTicketFlowBuilder, ClusterValidateMixin, @@ -79,7 +79,10 @@ def validate(self, attr): return attr data_check_repair_setting = DataCheckRepairSettingSerializer() - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer(), allow_empty=False) @@ -98,9 +101,10 @@ def format(self): info["resource_spec"]["proxy"]["group_count"] = 2 -@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_SHARD_NUM_UPDATE, is_apply=True) +@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_SHARD_NUM_UPDATE, is_apply=True, is_recycle=True) class RedisShardUpdateFlowBuilder(BaseRedisTicketFlowBuilder): serializer = RedisShardUpdateDetailSerializer inner_flow_builder = RedisShardUpdateParamBuilder inner_flow_name = _("Redis 集群分片变更") resource_batch_apply_builder = RedisShardUpdateResourceParamBuilder + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py index 10c2b86d32..e25fac746f 100644 --- a/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py +++ b/dbm-ui/backend/ticket/builders/redis/redis_toolbox_type_update.py @@ -19,7 +19,7 @@ from backend.db_services.version.utils import query_versions_by_key from backend.flow.engine.controller.redis import RedisController from backend.ticket import builders -from backend.ticket.builders.common.base import SkipToRepresentationMixin +from backend.ticket.builders.common.base import HostRecycleSerializer, SkipToRepresentationMixin from backend.ticket.builders.redis.base import ( BaseRedisTicketFlowBuilder, ClusterValidateMixin, @@ -82,7 +82,10 @@ def validate(self, attr): return attr data_check_repair_setting = DataCheckRepairSettingSerializer() - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListField(help_text=_("批量操作参数列表"), child=InfoSerializer(), allow_empty=False) @@ -101,9 +104,10 @@ def format(self): info["resource_spec"]["proxy"]["group_count"] = 2 -@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_TYPE_UPDATE, is_apply=True) +@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_TYPE_UPDATE, is_apply=True, is_recycle=True) class RedisTypeUpdateFlowBuilder(BaseRedisTicketFlowBuilder): serializer = RedisTypeUpdateDetailSerializer inner_flow_builder = RedisTypeUpdateParamBuilder inner_flow_name = _("Redis 集群类型变更") resource_batch_apply_builder = RedisTypeUpdateResourceParamBuilder + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/riak/riak_destroy.py b/dbm-ui/backend/ticket/builders/riak/riak_destroy.py index cbce5ed70c..0223c7c925 100644 --- a/dbm-ui/backend/ticket/builders/riak/riak_destroy.py +++ b/dbm-ui/backend/ticket/builders/riak/riak_destroy.py @@ -17,6 +17,7 @@ from backend.db_meta.models import Cluster from backend.flow.engine.controller.riak import RiakController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.bigdata import BigDataTakeDownDetailSerializer from backend.ticket.builders.riak.base import BaseRiakTicketFlowBuilder from backend.ticket.constants import TicketType @@ -25,7 +26,7 @@ class RiakDestroyDetailSerializer(BigDataTakeDownDetailSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class RiakDestroyFlowParamBuilder(builders.FlowParamBuilder): @@ -36,8 +37,9 @@ def format_ticket_data(self): self.ticket_data["bk_cloud_id"] = cluster.bk_cloud_id -@builders.BuilderFactory.register(TicketType.RIAK_CLUSTER_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.RIAK_CLUSTER_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class RiakDestroyFlowBuilder(BaseRiakTicketFlowBuilder): serializer = RiakDestroyDetailSerializer inner_flow_builder = RiakDestroyFlowParamBuilder inner_flow_name = _("Riak 集群销毁") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/riak/riak_shrink.py b/dbm-ui/backend/ticket/builders/riak/riak_shrink.py index 7926c29a89..71161499c2 100644 --- a/dbm-ui/backend/ticket/builders/riak/riak_shrink.py +++ b/dbm-ui/backend/ticket/builders/riak/riak_shrink.py @@ -17,7 +17,7 @@ from backend.db_meta.models import Cluster from backend.flow.engine.controller.riak import RiakController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer from backend.ticket.builders.common.bigdata import BigDataSingleClusterOpsDetailsSerializer from backend.ticket.builders.riak.base import BaseRiakTicketFlowBuilder from backend.ticket.constants import TicketType @@ -26,8 +26,12 @@ class RiakShrinkDetailSerializer(BigDataSingleClusterOpsDetailsSerializer): + class RiakNodeSerializer(serializers.Serializer): + riak = serializers.ListSerializer(help_text=_("缩容节点"), child=HostInfoSerializer()) + cluster_id = serializers.IntegerField(help_text=_("集群ID")) - nodes = serializers.ListSerializer(help_text=_("缩容节点"), child=HostInfoSerializer()) + nodes = RiakNodeSerializer(help_text=_("缩容信息")) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): return attrs @@ -41,8 +45,9 @@ def format_ticket_data(self): self.ticket_data["bk_cloud_id"] = cluster.bk_cloud_id -@builders.BuilderFactory.register(TicketType.RIAK_CLUSTER_SCALE_IN) +@builders.BuilderFactory.register(TicketType.RIAK_CLUSTER_SCALE_IN, is_recycle=True) class RiakShrinkFlowBuilder(BaseRiakTicketFlowBuilder): serializer = RiakShrinkDetailSerializer inner_flow_builder = RiakShrinkFlowParamBuilder inner_flow_name = _("Riak 集群缩容") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_add_slave.py b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_add_slave.py index 7eef8102bd..96bdecf486 100644 --- a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_add_slave.py +++ b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_add_slave.py @@ -17,6 +17,7 @@ from backend.flow.engine.controller.sqlserver import SqlserverController from backend.ticket import builders from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.mysql.mysql_add_slave import MysqlAddSlaveResourceParamBuilder from backend.ticket.builders.sqlserver.base import ( BaseSQLServerHATicketFlowBuilder, SQLServerBaseOperateDetailSerializer, @@ -51,10 +52,17 @@ def format_ticket_data(self): class SQLServerAddSlaveResourceParamBuilder(SQLServerBaseOperateResourceParamBuilder): + def format(self): + # 补充城市和亲和性 + super().patch_info_affinity_location() + # 新增slave亲和性同mysql一致 + MysqlAddSlaveResourceParamBuilder.patch_slave_subzone(self.ticket_data) + def post_callback(self): next_flow = self.ticket.next_flow() for info in next_flow.details["ticket_data"]["infos"]: - info["new_slave_host"] = info["sqlserver"][0] + info["new_slave_host"] = info.pop("new_slave")[0] + info["resource_spec"]["sqlserver_ha"] = info["resource_spec"].pop("new_slave") next_flow.save(update_fields=["details"]) diff --git a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py index 3f94fbfbb0..0a89589da5 100644 --- a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py +++ b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_destroy.py @@ -14,19 +14,20 @@ from backend.db_meta.enums import ClusterPhase from backend.flow.engine.controller.sqlserver import SqlserverController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.sqlserver.base import BaseSQLServerTicketFlowBuilder, SQLServerTakeDownDetailsSerializer from backend.ticket.constants import FlowRetryType, TicketType class SQLServerDestroyDetailSerializer(SQLServerTakeDownDetailsSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class SQLServerDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = SqlserverController.cluster_destroy_scene -@builders.BuilderFactory.register(TicketType.SQLSERVER_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.SQLSERVER_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class SQLServerDestroyFlowBuilder(BaseSQLServerTicketFlowBuilder): """Sqlserver下架流程的构建基类""" @@ -34,3 +35,4 @@ class SQLServerDestroyFlowBuilder(BaseSQLServerTicketFlowBuilder): inner_flow_builder = SQLServerDestroyFlowParamBuilder inner_flow_name = _("SQLServer 销毁执行") retry_type = FlowRetryType.MANUAL_RETRY + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py index 8c44d59d78..085707c070 100644 --- a/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py +++ b/dbm-ui/backend/ticket/builders/sqlserver/sqlserver_restore_slave.py @@ -18,7 +18,7 @@ from backend.flow.engine.controller.sqlserver import SqlserverController from backend.flow.utils.sqlserver.sqlserver_bk_config import get_module_infos from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer from backend.ticket.builders.sqlserver.base import ( BaseSQLServerHATicketFlowBuilder, SQLServerBaseOperateDetailSerializer, @@ -31,13 +31,19 @@ class SQLServerRestoreSlaveDetailSerializer(SQLServerBaseOperateDetailSerializer): class SlaveInfoSerializer(serializers.Serializer): + class OldSlaveSerializer(serializers.Serializer): + old_slave_host = serializers.ListSerializer(child=HostInfoSerializer()) + cluster_ids = serializers.ListField(help_text=_("集群列表"), child=serializers.IntegerField()) resource_spec = serializers.JSONField(help_text=_("资源池规格"), required=False) - old_slave_host = HostInfoSerializer(help_text=_("旧slave机器信息")) + old_nodes = OldSlaveSerializer(help_text=_("旧slave机器信息")) new_slave_host = HostInfoSerializer(help_text=_("新slave机器信息"), required=False) infos = serializers.ListField(help_text=_("重建从库列表"), child=SlaveInfoSerializer()) - ip_source = serializers.ChoiceField(help_text=_("主机来源"), choices=IpSource.get_choices()) + ip_source = serializers.ChoiceField( + help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL + ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): # 校验实例的角色为slave @@ -56,10 +62,9 @@ class SQLServerRestoreSlaveFlowParamBuilder(builders.FlowParamBuilder): controller = SqlserverController.slave_rebuild_in_new_slave_scene def format_ticket_data(self): - pass - # for info in self.ticket_data["infos"]: - # info["slave_host"] = info.pop("slave") - # info["port"] = info["slave_host"].pop("port") + for info in self.ticket_data["infos"]: + old_nodes = info.pop("old_nodes") + info["old_slave_host"] = old_nodes["old_slave_host"][0] class SQLServerRestoreSlaveResourceParamBuilder(SQLServerBaseOperateResourceParamBuilder): @@ -109,12 +114,13 @@ def post_callback(self): next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.SQLSERVER_RESTORE_SLAVE) +@builders.BuilderFactory.register(TicketType.SQLSERVER_RESTORE_SLAVE, is_recycle=True) class SQLServerRestoreSlaveFlowBuilder(BaseSQLServerHATicketFlowBuilder): serializer = SQLServerRestoreSlaveDetailSerializer resource_batch_apply_builder = SQLServerRestoreSlaveResourceParamBuilder inner_flow_builder = SQLServerRestoreSlaveFlowParamBuilder inner_flow_name = _("SQLServer Slave重建执行") + need_patch_recycle_host_details = True def patch_ticket_detail(self): # 补充数据库版本和字符集 diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_destroy.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_destroy.py index ea72426e3f..0e23b4e10c 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_destroy.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_destroy.py @@ -14,6 +14,7 @@ from backend.db_meta.enums import ClusterPhase from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.tendbcluster.base import ( BaseTendbTicketFlowBuilder, TendbClustersTakeDownDetailsSerializer, @@ -22,16 +23,17 @@ class TendbDestroyDetailSerializer(TendbClustersTakeDownDetailsSerializer): - pass + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class TendbDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.spider_cluster_destroy_scene -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_DESTROY, phase=ClusterPhase.DESTROY) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_DESTROY, phase=ClusterPhase.DESTROY, is_recycle=True) class TendbDestroyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbDestroyDetailSerializer inner_flow_builder = TendbDestroyFlowParamBuilder inner_flow_name = _("TenDB Cluster 下架执行") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_migrate_cluster.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_migrate_cluster.py index 1e5a2733f4..34a224899a 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_migrate_cluster.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_migrate_cluster.py @@ -15,24 +15,32 @@ from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer from backend.ticket.builders.common.constants import MySQLBackupSource +from backend.ticket.builders.mysql.mysql_migrate_cluster import ( + MysqlMigrateClusterParamBuilder, + MysqlMigrateClusterResourceParamBuilder, +) from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import FlowRetryType, TicketType class TendbClusterMigrateClusterDetailSerializer(TendbBaseOperateDetailSerializer): class MigrateClusterInfoSerializer(serializers.Serializer): + class OldMasterSlaveSerializer(serializers.Serializer): + old_master = serializers.ListSerializer(child=HostInfoSerializer(help_text=_("旧主库主机"), required=False)) + old_slave = serializers.ListSerializer(child=HostInfoSerializer(help_text=_("旧从库主机"), required=False)) + new_master = HostInfoSerializer(help_text=_("新主库主机"), required=False) new_slave = HostInfoSerializer(help_text=_("新从库主机"), required=False) - old_master = HostInfoSerializer(help_text=_("旧主库主机"), required=False) - old_slave = HostInfoSerializer(help_text=_("旧从库主机"), required=False) + old_nodes = OldMasterSlaveSerializer(help_text=_("旧主从主机")) resource_spec = serializers.JSONField(help_text=_("资源规格"), required=False) cluster_id = serializers.IntegerField(help_text=_("集群ID列表")) ip_source = serializers.ChoiceField( help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) infos = serializers.ListSerializer(help_text=_("克隆主从信息"), child=MigrateClusterInfoSerializer()) backup_source = serializers.ChoiceField( help_text=_("备份源"), choices=MySQLBackupSource.get_choices(), default=MySQLBackupSource.REMOTE @@ -46,35 +54,22 @@ def validate(self, attrs): return attrs -class TendbClusterMigrateClusterParamBuilder(builders.FlowParamBuilder): +class TendbClusterMigrateClusterParamBuilder(MysqlMigrateClusterParamBuilder): controller = SpiderController.tendb_cluster_remote_migrate def format_ticket_data(self): - if self.ticket_data["ip_source"] == IpSource.RESOURCE_POOL: - return - - for info in self.ticket_data["infos"]: - info["new_master_ip"], info["new_slave_ip"] = info["new_master"]["ip"], info["new_slave"]["ip"] - info["bk_new_master"], info["bk_new_slave"] = info.pop("new_master"), info.pop("new_slave") - info["old_master_ip"], info["old_slave_ip"] = info.pop("old_master")["ip"], info.pop("old_slave")["ip"] - + super().format_ticket_data() -class TendbClusterMigrateClusterResourceParamBuilder(BaseOperateResourceParamBuilder): - def post_callback(self): - next_flow = self.ticket.next_flow() - ticket_data = next_flow.details["ticket_data"] - for info in ticket_data["infos"]: - info["bk_new_master"], info["bk_new_slave"] = info.pop("new_master")[0], info.pop("new_slave")[0] - info["new_master_ip"], info["new_slave_ip"] = info["bk_new_master"]["ip"], info["bk_new_slave"]["ip"] - info["old_master_ip"], info["old_slave_ip"] = info.pop("old_master")["ip"], info.pop("old_slave")["ip"] - next_flow.save(update_fields=["details"]) +class TendbClusterMigrateClusterResourceParamBuilder(MysqlMigrateClusterResourceParamBuilder): + pass -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_MIGRATE_CLUSTER, is_apply=True) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_MIGRATE_CLUSTER, is_apply=True, is_recycle=True) class TendbClusterMigrateClusterFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbClusterMigrateClusterDetailSerializer inner_flow_builder = TendbClusterMigrateClusterParamBuilder inner_flow_name = _("TenDB Cluster 主从迁移执行") resource_batch_apply_builder = TendbClusterMigrateClusterResourceParamBuilder retry_type = FlowRetryType.MANUAL_RETRY + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py index 160517d02d..ad740cff66 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_apply.py @@ -13,8 +13,10 @@ from rest_framework import serializers from backend.db_meta.models import Cluster +from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders +from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType @@ -24,8 +26,12 @@ class MNTApplySerializer(serializers.Serializer): cluster_id = serializers.IntegerField(help_text=_("集群ID")) bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID")) spider_ip_list = serializers.ListField(help_text=_("运维节点信息"), child=serializers.DictField()) + resource_spec = serializers.JSONField(help_text=_("资源规格参数"), required=False) infos = serializers.ListField(help_text=_("添加spider运维节点信息"), child=MNTApplySerializer()) + ip_source = serializers.ChoiceField( + help_text=_("机器导入类型"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT + ) def validate(self, attrs): super().validate(attrs) @@ -45,8 +51,13 @@ def format_ticket_data(self): info.update(immutable_domain=cluster_id__domain[info["cluster_id"]]) +class TendbMNTApplyResourceParamBuilder(BaseOperateResourceParamBuilder): + pass + + @builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_MNT_APPLY, is_apply=True) class TendbMNTApplyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbMNTApplyDetailSerializer inner_flow_builder = TendbMNTApplyParamBuilder + resource_batch_apply_builder = TendbMNTApplyResourceParamBuilder inner_flow_name = _("TendbCluster 添加运维节点") diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py index 254ea64293..c305bb75f7 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_mnt_destroy.py @@ -14,17 +14,22 @@ from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType class TendbMNTDestroyDetailSerializer(TendbBaseOperateDetailSerializer): class MNTDestroySerializer(serializers.Serializer): + class OldMNTSerializer(serializers.Serializer): + spider_ip_list = serializers.ListField(child=serializers.DictField()) + cluster_id = serializers.IntegerField(help_text=_("集群ID")) - spider_ip_list = serializers.ListField(help_text=_("运维节点信息"), child=serializers.DictField()) + old_nodes = OldMNTSerializer(help_text=_("运维节点信息")) infos = serializers.ListField(help_text=_("下架spider运维节点信息"), child=MNTDestroySerializer()) is_safe = serializers.BooleanField(help_text=_("是否安全模式执行"), required=False, default=True) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -35,8 +40,9 @@ class TendbMNTDestroyParamBuilder(builders.FlowParamBuilder): controller = SpiderController.reduce_spider_mnt_scene -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_MNT_DESTROY) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_MNT_DESTROY, is_recycle=True) class TendbMNTDestroyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbMNTDestroyDetailSerializer inner_flow_builder = TendbMNTDestroyParamBuilder inner_flow_name = _("TendbCluster 下架运维节点") + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py index b745592605..5b133f5dcf 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_node_reblance.py @@ -16,6 +16,7 @@ from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.common.constants import MySQLBackupSource from backend.ticket.builders.common.field import DBTimezoneField from backend.ticket.builders.tendbcluster.base import ( @@ -44,6 +45,7 @@ class NodeRebalanceItemSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("主机来源"), choices=IpSource.get_choices(), default=IpSource.RESOURCE_POOL.value ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) need_checksum = serializers.BooleanField(help_text=_("执行前是否需要数据校验")) trigger_checksum_type = serializers.ChoiceField(help_text=_("数据校验触发类型"), choices=TriggerChecksumType.get_choices()) trigger_checksum_time = DBTimezoneField(help_text=_("数据校验 触发时间")) @@ -85,9 +87,10 @@ def post_callback(self): next_flow.save(update_fields=["details"]) -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_NODE_REBALANCE, is_apply=True) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_NODE_REBALANCE, is_apply=True, is_recycle=True) class TendbMNTApplyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbNodeRebalanceDetailSerializer inner_flow_builder = TendbNodeRebalanceFlowParamBuilderBuilder resource_batch_apply_builder = TendbNodeRebalanceResourceParamBuilder inner_flow_name = _("TendbCluster 集群容量变更") + need_patch_recycle_cluster_details = True diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_restore_slave.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_restore_slave.py index 0660bd8b5e..6210ad1545 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_restore_slave.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_restore_slave.py @@ -12,30 +12,34 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers -from backend.configuration.constants import AffinityEnum from backend.db_meta.enums import ClusterType -from backend.db_meta.models import StorageInstance from backend.db_services.dbbase.constants import IpSource from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import BaseOperateResourceParamBuilder, HostInfoSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer from backend.ticket.builders.common.constants import MySQLBackupSource -from backend.ticket.builders.mysql.mysql_restore_slave import MysqlRestoreSlaveDetailSerializer +from backend.ticket.builders.mysql.mysql_restore_slave import ( + MysqlRestoreSlaveDetailSerializer, + MysqlRestoreSlaveResourceParamBuilder, +) from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder from backend.ticket.constants import TicketType -from backend.utils.basic import get_target_items_from_details class TendbClusterRestoreSlaveDetailSerializer(MysqlRestoreSlaveDetailSerializer): class RestoreInfoSerializer(serializers.Serializer): - old_slave = HostInfoSerializer(help_text=_("旧从库 IP")) + class OldSlaveSerializer(serializers.Serializer): + old_slave = serializers.ListSerializer(child=HostInfoSerializer(help_text=_("旧从库 IP"))) + new_slave = HostInfoSerializer(help_text=_("新从库 IP"), required=False) + old_nodes = OldSlaveSerializer(help_text=_("旧从库信息")) resource_spec = serializers.JSONField(help_text=_("新从库资源池参数"), required=False) cluster_id = serializers.IntegerField(help_text=_("集群ID")) ip_source = serializers.ChoiceField( help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT ) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) backup_source = serializers.ChoiceField(help_text=_("备份源"), choices=MySQLBackupSource.get_choices()) infos = serializers.ListField(help_text=_("集群重建信息"), child=RestoreInfoSerializer()) @@ -65,44 +69,14 @@ def format_ticket_data(self): info["bk_old_slave"], info["bk_new_slave"] = info.pop("old_slave"), info.pop("new_slave") -class TendbClusterRestoreSlaveResourceParamBuilder(BaseOperateResourceParamBuilder): - def patch_slave_subzone(self): - # 对于亲和性为跨园区的,slave和master需要在不同园区 - slave_host_ids = get_target_items_from_details(self.ticket.details, match_keys=["bk_host_id"]) - slaves = StorageInstance.objects.prefetch_related("as_receiver__ejector__machine", "machine").filter( - machine__bk_host_id__in=slave_host_ids, cluster_type=ClusterType.TenDBCluster - ) - slave_host_map = {slave.machine.bk_host_id: slave for slave in slaves} - for info in self.ticket_data["infos"]: - resource_spec = info["resource_spec"]["new_slave"] - slave = slave_host_map[info["old_slave"]["bk_host_id"]] - master_subzone_id = slave.as_receiver.get().ejector.machine.bk_sub_zone_id - # 同城跨园区,要求slave和master在不同subzone - if resource_spec["affinity"] == AffinityEnum.CROS_SUBZONE: - resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=False) - # 同城同园区,要求slave和master在一个subzone - elif resource_spec["affinity"] in [AffinityEnum.SAME_SUBZONE, AffinityEnum.SAME_SUBZONE_CROSS_SWTICH]: - resource_spec["location_spec"].update(sub_zone_ids=[master_subzone_id], include_or_exclue=True) - - def format(self): - # 补充亲和性和城市信息 - super().patch_info_affinity_location(roles=["new_slave"]) - # 补充slave园区申请 - self.patch_slave_subzone() - - def post_callback(self): - next_flow = self.ticket.next_flow() - ticket_data = next_flow.details["ticket_data"] - for info in ticket_data["infos"]: - info["bk_old_slave"], info["bk_new_slave"] = info.pop("old_slave"), info.pop("new_slave")[0] - info["old_slave_ip"], info["new_slave_ip"] = info["bk_old_slave"]["ip"], info["bk_new_slave"]["ip"] - - next_flow.save(update_fields=["details"]) +class TendbClusterRestoreSlaveResourceParamBuilder(MysqlRestoreSlaveResourceParamBuilder): + pass -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_RESTORE_SLAVE, is_apply=True) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_RESTORE_SLAVE, is_apply=True, is_recycle=True) class TendbClusterRestoreSlaveFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbClusterRestoreSlaveDetailSerializer inner_flow_builder = TendbClusterRestoreSlaveParamBuilder inner_flow_name = _("TenDB Cluster Slave重建") resource_batch_apply_builder = TendbClusterRestoreSlaveResourceParamBuilder + need_patch_recycle_host_details = True diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py index 492f3647f6..7aeddcc7c9 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_reduce_nodes.py @@ -13,26 +13,27 @@ from rest_framework import serializers from backend.db_meta.enums import TenDBClusterSpiderRole +from backend.db_meta.models import Cluster from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import HostInfoSerializer +from backend.ticket.builders.common.base import HostInfoSerializer, HostRecycleSerializer, fetch_cluster_ids from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType class TendbSpiderReduceNodesDetailSerializer(TendbBaseOperateDetailSerializer): class SpiderNodesItemSerializer(serializers.Serializer): + class OldSpiderSerializer(serializers.Serializer): + spider_reduced_hosts = serializers.ListSerializer(help_text=_("缩容spider信息"), child=HostInfoSerializer()) + cluster_id = serializers.IntegerField(help_text=_("集群ID")) spider_reduced_to_count = serializers.IntegerField(help_text=_("剩余spider数量"), required=False) - spider_reduced_hosts = serializers.ListSerializer( - help_text=_("缩容指定主机"), child=HostInfoSerializer(), required=False - ) - reduce_spider_role = serializers.ChoiceField( - help_text=_("缩容的角色"), choices=TenDBClusterSpiderRole.get_choices() - ) + old_nodes = OldSpiderSerializer(help_text=_("缩容指定主机"), required=False) + reduce_spider_role = serializers.ChoiceField(help_text=_("角色"), choices=TenDBClusterSpiderRole.get_choices()) is_safe = serializers.BooleanField(help_text=_("是否做安全检测")) infos = serializers.ListSerializer(help_text=_("缩容信息"), child=SpiderNodesItemSerializer()) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) def validate(self, attrs): super().validate(attrs) @@ -44,11 +45,46 @@ class TendbSpiderReduceNodesFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.reduce_spider_nodes_scene def format_ticket_data(self): - pass + for info in self.ticket_data: + info["spider_reduced_hosts"] = info.pop("old_nodes")["spider_reduced_hosts"] -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_REDUCE_NODES) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_REDUCE_NODES, is_recycle=True) class TendbSpiderReduceNodesFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbSpiderReduceNodesDetailSerializer inner_flow_builder = TendbSpiderReduceNodesFlowParamBuilder inner_flow_name = _("TenDB Cluster 接入层缩容") + need_patch_recycle_host_details = True + + def calc_reduce_spider(self): + """计算实际缩容的spider主机""" + cluster_ids = fetch_cluster_ids(self.ticket.details["infos"]) + clusters = Cluster.objects.prefetch_related("proxyinstance_set").filter(id__in=cluster_ids) + cluster_map = {cluster.id: cluster for cluster in clusters} + for info in self.ticket.details["infos"]: + # 如果制定主机缩容,则忽略 + if info.get("old_nodes"): + continue + + cluster = cluster_map[info["cluster_id"]] + reduce_spider_role = info["reduce_spider_role"] + # 获取目标角色的spider + spider_set = [ + proxy + for proxy in cluster.proxyinstance_set + if proxy.tendbclusterspiderext.spider_role == reduce_spider_role + ] + spider_count = len(spider_set) + + # 计算合理的待下架的spider节点列表 + # 选择上尽量避开ctl_primary的选择, 避免做一次切换逻辑 + ctl_primary_ip = cluster.tendbcluster_ctl_primary_address().split(":")[0] + except_reduce_spiders = [spider for spider in spider_set if spider.machine.ip != ctl_primary_ip] + info["old_nodes"]["spider_reduced_hosts"] = [ + {"ip": s.machine.ip, "bk_host_id": s.machine.bk_host_id} + for s in except_reduce_spiders[: spider_count - reduce_spider_role] + ] + + def patch_ticket_detail(self): + self.calc_reduce_spider() + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_slave_destroy.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_slave_destroy.py index c16b6aff5a..92e5b2adec 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_slave_destroy.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_spider_slave_destroy.py @@ -12,8 +12,11 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from backend.db_meta.enums import TenDBClusterSpiderRole +from backend.db_meta.models import ProxyInstance from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders +from backend.ticket.builders.common.base import HostRecycleSerializer from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType @@ -21,14 +24,32 @@ class SpiderSlaveDestroyDetailSerializer(TendbBaseOperateDetailSerializer): is_safe = serializers.BooleanField(help_text=_("是否做安全检测"), required=False, default=True) cluster_ids = serializers.ListField(help_text=_("集群ID列表"), child=serializers.IntegerField()) + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) class SpiderSlaveDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.destroy_tendb_slave_cluster -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_SLAVE_DESTROY) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_SPIDER_SLAVE_DESTROY, is_recycle=True) class SpiderSlaveApplyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = SpiderSlaveDestroyDetailSerializer inner_flow_builder = SpiderSlaveDestroyFlowParamBuilder inner_flow_name = _("TenDB Cluster 只读接入层下架") + need_patch_recycle_host_details = True + + def get_reduce_spider_slave(self): + cluster_ids = self.ticket.details["cluster_ids"] + # 获取所有下架的spider slave + reduce_spider_slaves = ProxyInstance.objects.select_related("machine").filter( + cluster__in=cluster_ids, tendbclusterspiderext__spider_role=TenDBClusterSpiderRole.SPIDER_SLAVE.value + ) + # 获取下架的机器信息,并补充到details中 + reduce_spider_slave_hosts = [ + {"ip": spider.machine.ip, "bk_host_id": spider.machine.bk_host_id} for spider in reduce_spider_slaves + ] + self.ticket.details["old_nodes"] = {"reduce_spider_slave_hosts": reduce_spider_slave_hosts} + + def patch_ticket_detail(self): + self.get_reduce_spider_slave() + super().patch_ticket_detail() diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_temporary_destroy.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_temporary_destroy.py index bc6bcf8e3d..0350a1ca80 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_temporary_destroy.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_temporary_destroy.py @@ -13,7 +13,7 @@ from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.common.base import CommonValidate +from backend.ticket.builders.common.base import CommonValidate, HostRecycleSerializer from backend.ticket.builders.tendbcluster.base import ( BaseTendbTicketFlowBuilder, TendbClustersTakeDownDetailsSerializer, @@ -23,6 +23,8 @@ class TendbTemporaryDestroyDetailSerializer(TendbClustersTakeDownDetailsSerializer): + ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息")) + def validate_cluster_ids(self, value): CommonValidate.validate_destroy_temporary_cluster_ids(value) return value @@ -36,9 +38,10 @@ class TendbTemporaryDestroyFlowParamBuilder(builders.FlowParamBuilder): controller = SpiderController.spider_cluster_destroy_scene -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_TEMPORARY_DESTROY) +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_TEMPORARY_DESTROY, is_recycle=True) class TendbDestroyFlowBuilder(BaseTendbTicketFlowBuilder): serializer = TendbTemporaryDestroyDetailSerializer + need_patch_recycle_cluster_details = True def custom_ticket_flows(self): flows = [ diff --git a/dbm-ui/backend/ticket/constants.py b/dbm-ui/backend/ticket/constants.py index 9308ee4212..4ad87911e3 100644 --- a/dbm-ui/backend/ticket/constants.py +++ b/dbm-ui/backend/ticket/constants.py @@ -178,7 +178,8 @@ def get_cluster_type_by_ticket(cls, ticket_type): MYSQL_MASTER_FAIL_OVER = TicketEnumField("MYSQL_MASTER_FAIL_OVER", _("MySQL 主库故障切换"), _("集群维护")) MYSQL_HA_APPLY = TicketEnumField("MYSQL_HA_APPLY", _("MySQL 高可用部署"), register_iam=False) MYSQL_IMPORT_SQLFILE = TicketEnumField("MYSQL_IMPORT_SQLFILE", _("MySQL 变更SQL执行"), _("SQL 任务")) - MYSQL_FORCE_IMPORT_SQLFILE = TicketEnumField("MYSQL_FORCE_IMPORT_SQLFILE", _("MySQL 强制变更SQL执行"), _("SQL 任务"), register_iam=False) # noqa + MYSQL_FORCE_IMPORT_SQLFILE = TicketEnumField("MYSQL_FORCE_IMPORT_SQLFILE", _("MySQL 强制变更SQL执行"), + _("SQL 任务"), register_iam=False) # noqa MYSQL_SEMANTIC_CHECK = TicketEnumField("MYSQL_SEMANTIC_CHECK", _("MySQL 模拟执行"), register_iam=False) MYSQL_PROXY_ADD = TicketEnumField("MYSQL_PROXY_ADD", _("MySQL 添加Proxy"), _("集群维护")) MYSQL_PROXY_SWITCH = TicketEnumField("MYSQL_PROXY_SWITCH", _("MySQL 替换Proxy"), _("集群维护")) @@ -190,7 +191,8 @@ def get_cluster_type_by_ticket(cls, ticket_type): MYSQL_HA_ENABLE = TicketEnumField("MYSQL_HA_ENABLE", _("MySQL 高可用启用"), register_iam=False) MYSQL_AUTHORIZE_RULES = TicketEnumField("MYSQL_AUTHORIZE_RULES", _("MySQL 集群授权"), _("权限管理")) MYSQL_EXCEL_AUTHORIZE_RULES = TicketEnumField("MYSQL_EXCEL_AUTHORIZE_RULES", _("MySQL EXCEL授权"), _("权限管理")) - MYSQL_CLIENT_CLONE_RULES = TicketEnumField("MYSQL_CLIENT_CLONE_RULES", _("MySQL 客户端权限克隆"), register_iam=False) + MYSQL_CLIENT_CLONE_RULES = TicketEnumField("MYSQL_CLIENT_CLONE_RULES", _("MySQL 客户端权限克隆"), + register_iam=False) MYSQL_INSTANCE_CLONE_RULES = TicketEnumField("MYSQL_INSTANCE_CLONE_RULES", _("MySQL DB实例权限克隆"), _("权限管理")) MYSQL_HA_RENAME_DATABASE = TicketEnumField("MYSQL_HA_RENAME_DATABASE", _("MySQL 高可用DB重命名"), _("集群维护")) MYSQL_HA_TRUNCATE_DATA = TicketEnumField("MYSQL_HA_TRUNCATE_DATA", _("MySQL 高可用清档"), _("数据处理")) @@ -203,7 +205,8 @@ def get_cluster_type_by_ticket(cls, ticket_type): MYSQL_ROLLBACK_CLUSTER = TicketEnumField("MYSQL_ROLLBACK_CLUSTER", _("MySQL 定点构造"), _("回档")) MYSQL_HA_FULL_BACKUP = TicketEnumField("MYSQL_HA_FULL_BACKUP", _("MySQL 高可用全库备份"), _("备份")) MYSQL_SINGLE_TRUNCATE_DATA = TicketEnumField("MYSQL_SINGLE_TRUNCATE_DATA", _("MySQL 单节点清档"), _("数据处理")) - MYSQL_SINGLE_RENAME_DATABASE = TicketEnumField("MYSQL_SINGLE_RENAME_DATABASE", _("MySQL 单节点DB重命名"), _("集群维护")) # noqa + MYSQL_SINGLE_RENAME_DATABASE = TicketEnumField("MYSQL_SINGLE_RENAME_DATABASE", _("MySQL 单节点DB重命名"), + _("集群维护")) # noqa MYSQL_HA_STANDARDIZE = TicketEnumField("MYSQL_HA_STANDARDIZE", _("TendbHA 标准化"), register_iam=False) MYSQL_HA_METADATA_IMPORT = TicketEnumField("MYSQL_HA_METADATA_IMPORT", _("TendbHA 元数据导入"), register_iam=False) MYSQL_OPEN_AREA = TicketEnumField("MYSQL_OPEN_AREA", _("MySQL 开区"), _("克隆开区"), register_iam=False) @@ -214,58 +217,97 @@ def get_cluster_type_by_ticket(cls, ticket_type): MYSQL_SLAVE_MIGRATE_UPGRADE = TicketEnumField("MYSQL_SLAVE_MIGRATE_UPGRADE", _("MySQL Slave 迁移升级"), _("版本升级")) MYSQL_RO_SLAVE_UNINSTALL = TicketEnumField("MYSQL_RO_SLAVE_UNINSTALL", _("MySQL非stanby slave下架"), _("集群维护")) MYSQL_PROXY_UPGRADE = TicketEnumField("MYSQL_PROXY_UPGRADE", _("MySQL Proxy升级"), _("版本升级")) - MYSQL_HA_TRANSFER_TO_OTHER_BIZ = TicketEnumField("MYSQL_HA_TRANSFER_TO_OTHER_BIZ", _("TendbHA集群迁移至其他业务"), register_iam=False)# noqa - MYSQL_PUSH_PERIPHERAL_CONFIG = TicketEnumField("MYSQL_PUSH_PERIPHERAL_CONFIG", _("推送周边配置"), register_iam=False) + MYSQL_HA_TRANSFER_TO_OTHER_BIZ = TicketEnumField("MYSQL_HA_TRANSFER_TO_OTHER_BIZ", _("TendbHA集群迁移至其他业务"), + register_iam=False) # noqa + MYSQL_PUSH_PERIPHERAL_CONFIG = TicketEnumField("MYSQL_PUSH_PERIPHERAL_CONFIG", _("推送周边配置"), + register_iam=False) # SPIDER(TenDB Cluster) - TENDBCLUSTER_OPEN_AREA = TicketEnumField("TENDBCLUSTER_OPEN_AREA", _("TenDB Cluster 开区"), _("克隆开区"), register_iam=False) # noqa + TENDBCLUSTER_OPEN_AREA = TicketEnumField("TENDBCLUSTER_OPEN_AREA", _("TenDB Cluster 开区"), _("克隆开区"), + register_iam=False) # noqa TENDBCLUSTER_CHECKSUM = TicketEnumField("TENDBCLUSTER_CHECKSUM", _("TenDB Cluster 数据校验修复"), _("数据处理")) - TENDBCLUSTER_DATA_REPAIR = TicketEnumField("TENDBCLUSTER_DATA_REPAIR", _("TenDB Cluster 数据修复"), register_iam=False) # noqa + TENDBCLUSTER_DATA_REPAIR = TicketEnumField("TENDBCLUSTER_DATA_REPAIR", _("TenDB Cluster 数据修复"), + register_iam=False) # noqa TENDBCLUSTER_PARTITION = TicketEnumField("TENDBCLUSTER_PARTITION", _("TenDB Cluster 分区管理"), _("分区管理")) - TENDBCLUSTER_PARTITION_CRON = TicketEnumField("TENDBCLUSTER_PARTITION_CRON", _("TenDB Cluster 分区定时任务"), register_iam=False) # noqa - TENDBCLUSTER_DB_TABLE_BACKUP = TicketEnumField("TENDBCLUSTER_DB_TABLE_BACKUP", _("TenDB Cluster 库表备份"), _("备份")) - TENDBCLUSTER_RENAME_DATABASE = TicketEnumField("TENDBCLUSTER_RENAME_DATABASE", _("TenDB Cluster 数据库重命名"), _("SQL 任务")) # noqa - TENDBCLUSTER_TRUNCATE_DATABASE = TicketEnumField("TENDBCLUSTER_TRUNCATE_DATABASE", _("TenDB Cluster 清档"), _("数据处理")) - TENDBCLUSTER_MASTER_FAIL_OVER = TicketEnumField("TENDBCLUSTER_MASTER_FAIL_OVER", _("TenDB Cluster 主库故障切换"), _("集群维护")) # noqa - TENDBCLUSTER_MASTER_SLAVE_SWITCH = TicketEnumField("TENDBCLUSTER_MASTER_SLAVE_SWITCH", _("TenDB Cluster 主从互切"), _("集群维护")) # noqa - TENDBCLUSTER_IMPORT_SQLFILE = TicketEnumField("TENDBCLUSTER_IMPORT_SQLFILE", _("TenDB Cluster 变更SQL执行"), _("SQL 任务")) # noqa - TENDBCLUSTER_FORCE_IMPORT_SQLFILE = TicketEnumField("TENDBCLUSTER_FORCE_IMPORT_SQLFILE", _("TenDB Cluster 强制变更SQL执行"), _("SQL 任务"), register_iam=False) # noqa - TENDBCLUSTER_SEMANTIC_CHECK = TicketEnumField("TENDBCLUSTER_SEMANTIC_CHECK", _("TenDB Cluster 模拟执行"), register_iam=False) # noqa - TENDBCLUSTER_SPIDER_ADD_NODES = TicketEnumField("TENDBCLUSTER_SPIDER_ADD_NODES", _("TenDB Cluster 扩容接入层"), _("集群维护")) # noqa - TENDBCLUSTER_SPIDER_REDUCE_NODES = TicketEnumField("TENDBCLUSTER_SPIDER_REDUCE_NODES", _("TenDB Cluster 缩容接入层"), _("集群维护")) # noqa - TENDBCLUSTER_SPIDER_MNT_APPLY = TicketEnumField("TENDBCLUSTER_SPIDER_MNT_APPLY", _("TenDB Cluster 添加运维节点"), _("运维 Spider 管理")) # noqa - TENDBCLUSTER_SPIDER_MNT_DESTROY = TicketEnumField("TENDBCLUSTER_SPIDER_MNT_DESTROY", _("TenDB Cluster 下架运维节点"), _("运维 Spider 管理")) # noqa - TENDBCLUSTER_SPIDER_SLAVE_APPLY = TicketEnumField("TENDBCLUSTER_SPIDER_SLAVE_APPLY", _("TenDB Cluster 部署只读接入层"), _("访问入口")) # noqa - TENDBCLUSTER_SPIDER_SLAVE_DESTROY = TicketEnumField("TENDBCLUSTER_SPIDER_SLAVE_DESTROY", _("TenDB Cluster 只读接入层下架"), _("访问入口")) # noqa - TENDBCLUSTER_RESTORE_SLAVE = TicketEnumField("TENDBCLUSTER_RESTORE_SLAVE", _("TenDB Cluster Slave重建"), _("集群维护")) # noqa - TENDBCLUSTER_RESTORE_LOCAL_SLAVE = TicketEnumField("TENDBCLUSTER_RESTORE_LOCAL_SLAVE", _("TenDB Cluster Slave原地重建"), _("集群维护")) # noqa - TENDBCLUSTER_MIGRATE_CLUSTER = TicketEnumField("TENDBCLUSTER_MIGRATE_CLUSTER", _("TenDB Cluster 主从迁移"), _("集群维护")) # noqa + TENDBCLUSTER_PARTITION_CRON = TicketEnumField("TENDBCLUSTER_PARTITION_CRON", _("TenDB Cluster 分区定时任务"), + register_iam=False) # noqa + TENDBCLUSTER_DB_TABLE_BACKUP = TicketEnumField("TENDBCLUSTER_DB_TABLE_BACKUP", _("TenDB Cluster 库表备份"), + _("备份")) + TENDBCLUSTER_RENAME_DATABASE = TicketEnumField("TENDBCLUSTER_RENAME_DATABASE", _("TenDB Cluster 数据库重命名"), + _("SQL 任务")) # noqa + TENDBCLUSTER_TRUNCATE_DATABASE = TicketEnumField("TENDBCLUSTER_TRUNCATE_DATABASE", _("TenDB Cluster 清档"), + _("数据处理")) + TENDBCLUSTER_MASTER_FAIL_OVER = TicketEnumField("TENDBCLUSTER_MASTER_FAIL_OVER", _("TenDB Cluster 主库故障切换"), + _("集群维护")) # noqa + TENDBCLUSTER_MASTER_SLAVE_SWITCH = TicketEnumField("TENDBCLUSTER_MASTER_SLAVE_SWITCH", _("TenDB Cluster 主从互切"), + _("集群维护")) # noqa + TENDBCLUSTER_IMPORT_SQLFILE = TicketEnumField("TENDBCLUSTER_IMPORT_SQLFILE", _("TenDB Cluster 变更SQL执行"), + _("SQL 任务")) # noqa + TENDBCLUSTER_FORCE_IMPORT_SQLFILE = TicketEnumField("TENDBCLUSTER_FORCE_IMPORT_SQLFILE", + _("TenDB Cluster 强制变更SQL执行"), _("SQL 任务"), + register_iam=False) # noqa + TENDBCLUSTER_SEMANTIC_CHECK = TicketEnumField("TENDBCLUSTER_SEMANTIC_CHECK", _("TenDB Cluster 模拟执行"), + register_iam=False) # noqa + TENDBCLUSTER_SPIDER_ADD_NODES = TicketEnumField("TENDBCLUSTER_SPIDER_ADD_NODES", _("TenDB Cluster 扩容接入层"), + _("集群维护")) # noqa + TENDBCLUSTER_SPIDER_REDUCE_NODES = TicketEnumField("TENDBCLUSTER_SPIDER_REDUCE_NODES", + _("TenDB Cluster 缩容接入层"), _("集群维护")) # noqa + TENDBCLUSTER_SPIDER_MNT_APPLY = TicketEnumField("TENDBCLUSTER_SPIDER_MNT_APPLY", _("TenDB Cluster 添加运维节点"), + _("运维 Spider 管理")) # noqa + TENDBCLUSTER_SPIDER_MNT_DESTROY = TicketEnumField("TENDBCLUSTER_SPIDER_MNT_DESTROY", + _("TenDB Cluster 下架运维节点"), _("运维 Spider 管理")) # noqa + TENDBCLUSTER_SPIDER_SLAVE_APPLY = TicketEnumField("TENDBCLUSTER_SPIDER_SLAVE_APPLY", + _("TenDB Cluster 部署只读接入层"), _("访问入口")) # noqa + TENDBCLUSTER_SPIDER_SLAVE_DESTROY = TicketEnumField("TENDBCLUSTER_SPIDER_SLAVE_DESTROY", + _("TenDB Cluster 只读接入层下架"), _("访问入口")) # noqa + TENDBCLUSTER_RESTORE_SLAVE = TicketEnumField("TENDBCLUSTER_RESTORE_SLAVE", _("TenDB Cluster Slave重建"), + _("集群维护")) # noqa + TENDBCLUSTER_RESTORE_LOCAL_SLAVE = TicketEnumField("TENDBCLUSTER_RESTORE_LOCAL_SLAVE", + _("TenDB Cluster Slave原地重建"), _("集群维护")) # noqa + TENDBCLUSTER_MIGRATE_CLUSTER = TicketEnumField("TENDBCLUSTER_MIGRATE_CLUSTER", _("TenDB Cluster 主从迁移"), + _("集群维护")) # noqa TENDBCLUSTER_APPLY = TicketEnumField("TENDBCLUSTER_APPLY", _("TenDB Cluster 集群部署")) TENDBCLUSTER_ENABLE = TicketEnumField("TENDBCLUSTER_ENABLE", _("TenDB Cluster 集群启用"), register_iam=False) TENDBCLUSTER_DISABLE = TicketEnumField("TENDBCLUSTER_DISABLE", _("TenDB Cluster 集群禁用"), register_iam=False) TENDBCLUSTER_DESTROY = TicketEnumField("TENDBCLUSTER_DESTROY", _("TenDB Cluster 集群销毁"), _("集群管理")) - TENDBCLUSTER_TEMPORARY_DESTROY = TicketEnumField("TENDBCLUSTER_TEMPORARY_DESTROY", _("TenDB Cluster 临时集群销毁"), _("集群管理")) # noqa - TENDBCLUSTER_NODE_REBALANCE = TicketEnumField("TENDBCLUSTER_NODE_REBALANCE", _("TenDB Cluster 集群容量变更"), _("集群维护")) # noqa + TENDBCLUSTER_TEMPORARY_DESTROY = TicketEnumField("TENDBCLUSTER_TEMPORARY_DESTROY", _("TenDB Cluster 临时集群销毁"), + _("集群管理")) # noqa + TENDBCLUSTER_NODE_REBALANCE = TicketEnumField("TENDBCLUSTER_NODE_REBALANCE", _("TenDB Cluster 集群容量变更"), + _("集群维护")) # noqa TENDBCLUSTER_FULL_BACKUP = TicketEnumField("TENDBCLUSTER_FULL_BACKUP", _("TenDB Cluster 全库备份"), _("备份")) - TENDBCLUSTER_ROLLBACK_CLUSTER = TicketEnumField("TENDBCLUSTER_ROLLBACK_CLUSTER", _("TenDB Cluster 定点构造"), _("回档")) # noqa + TENDBCLUSTER_ROLLBACK_CLUSTER = TicketEnumField("TENDBCLUSTER_ROLLBACK_CLUSTER", _("TenDB Cluster 定点构造"), + _("回档")) # noqa TENDBCLUSTER_FLASHBACK = TicketEnumField("TENDBCLUSTER_FLASHBACK", _("TenDB Cluster 闪回"), _("回档")) - TENDBCLUSTER_CLIENT_CLONE_RULES = TicketEnumField("TENDBCLUSTER_CLIENT_CLONE_RULES", _("TenDB Cluster 客户端权限克隆"), _("权限管理")) # noqa - TENDBCLUSTER_INSTANCE_CLONE_RULES = TicketEnumField("TENDBCLUSTER_INSTANCE_CLONE_RULES", _("TenDB Cluster DB实例权限克隆"), _("权限管理")) # noqa - TENDBCLUSTER_AUTHORIZE_RULES = TicketEnumField("TENDBCLUSTER_AUTHORIZE_RULES", _("TenDB Cluster 授权"), _("权限管理")) - TENDBCLUSTER_EXCEL_AUTHORIZE_RULES = TicketEnumField("TENDBCLUSTER_EXCEL_AUTHORIZE_RULES", _("TenDB Cluster EXCEL授权"), _("权限管理")) # noqa - TENDBCLUSTER_STANDARDIZE = TicketEnumField("TENDBCLUSTER_STANDARDIZE", _("TenDB Cluster 集群标准化"), register_iam=False) - TENDBCLUSTER_METADATA_IMPORT = TicketEnumField("TENDBCLUSTER_METADATA_IMPORT", _("TenDB Cluster 元数据导入"), register_iam=False) # noqa - TENDBCLUSTER_APPEND_DEPLOY_CTL = TicketEnumField("TENDBCLUSTER_APPEND_DEPLOY_CTL", _("TenDB Cluster 追加部署中控"), register_iam=False) # noqa - TENDBSINGLE_METADATA_IMPORT = TicketEnumField("TENDBSINGLE_METADATA_IMPORT", _("TenDB Single 元数据导入"), register_iam=False) # noqa - TENDBSINGLE_STANDARDIZE = TicketEnumField("TENDBSINGLE_STANDARDIZE", _("TenDB Single 集群标准化"), register_iam=False) # noqa + TENDBCLUSTER_CLIENT_CLONE_RULES = TicketEnumField("TENDBCLUSTER_CLIENT_CLONE_RULES", + _("TenDB Cluster 客户端权限克隆"), _("权限管理")) # noqa + TENDBCLUSTER_INSTANCE_CLONE_RULES = TicketEnumField("TENDBCLUSTER_INSTANCE_CLONE_RULES", + _("TenDB Cluster DB实例权限克隆"), _("权限管理")) # noqa + TENDBCLUSTER_AUTHORIZE_RULES = TicketEnumField("TENDBCLUSTER_AUTHORIZE_RULES", _("TenDB Cluster 授权"), + _("权限管理")) + TENDBCLUSTER_EXCEL_AUTHORIZE_RULES = TicketEnumField("TENDBCLUSTER_EXCEL_AUTHORIZE_RULES", + _("TenDB Cluster EXCEL授权"), _("权限管理")) # noqa + TENDBCLUSTER_STANDARDIZE = TicketEnumField("TENDBCLUSTER_STANDARDIZE", _("TenDB Cluster 集群标准化"), + register_iam=False) + TENDBCLUSTER_METADATA_IMPORT = TicketEnumField("TENDBCLUSTER_METADATA_IMPORT", _("TenDB Cluster 元数据导入"), + register_iam=False) # noqa + TENDBCLUSTER_APPEND_DEPLOY_CTL = TicketEnumField("TENDBCLUSTER_APPEND_DEPLOY_CTL", _("TenDB Cluster 追加部署中控"), + register_iam=False) # noqa + TENDBSINGLE_METADATA_IMPORT = TicketEnumField("TENDBSINGLE_METADATA_IMPORT", _("TenDB Single 元数据导入"), + register_iam=False) # noqa + TENDBSINGLE_STANDARDIZE = TicketEnumField("TENDBSINGLE_STANDARDIZE", _("TenDB Single 集群标准化"), + register_iam=False) # noqa TENDBCLUSTER_DATA_MIGRATE = TicketEnumField("TENDBCLUSTER_DATA_MIGRATE", _("TenDB Cluster DB克隆"), _("数据处理")) TENDBCLUSTER_DUMP_DATA = TicketEnumField("TENDBCLUSTER_DUMP_DATA", _("TenDB Cluster 数据导出"), _("数据处理")) # Tbinlogdumper TBINLOGDUMPER_INSTALL = TicketEnumField("TBINLOGDUMPER_INSTALL", _("TBINLOGDUMPER 上架"), register_iam=False) - TBINLOGDUMPER_REDUCE_NODES = TicketEnumField("TBINLOGDUMPER_REDUCE_NODES", _("TBINLOGDUMPER 下架"), register_iam=False) # noqa - TBINLOGDUMPER_SWITCH_NODES = TicketEnumField("TBINLOGDUMPER_SWITCH_NODES", _("TBINLOGDUMPER 切换"), register_iam=False) # noqa - TBINLOGDUMPER_DISABLE_NODES = TicketEnumField("TBINLOGDUMPER_DISABLE_NODES", _("TBINLOGDUMPER 禁用"), register_iam=False) # noqa - TBINLOGDUMPER_ENABLE_NODES = TicketEnumField("TBINLOGDUMPER_ENABLE_NODES", _("TBINLOGDUMPER 启用"), register_iam=False) # noqa + TBINLOGDUMPER_REDUCE_NODES = TicketEnumField("TBINLOGDUMPER_REDUCE_NODES", _("TBINLOGDUMPER 下架"), + register_iam=False) # noqa + TBINLOGDUMPER_SWITCH_NODES = TicketEnumField("TBINLOGDUMPER_SWITCH_NODES", _("TBINLOGDUMPER 切换"), + register_iam=False) # noqa + TBINLOGDUMPER_DISABLE_NODES = TicketEnumField("TBINLOGDUMPER_DISABLE_NODES", _("TBINLOGDUMPER 禁用"), + register_iam=False) # noqa + TBINLOGDUMPER_ENABLE_NODES = TicketEnumField("TBINLOGDUMPER_ENABLE_NODES", _("TBINLOGDUMPER 启用"), + register_iam=False) # noqa # SQLServer SQLSERVER_SINGLE_APPLY = TicketEnumField("SQLSERVER_SINGLE_APPLY", _("SQLServer 单节点部署"), register_iam=False) @@ -277,9 +319,12 @@ def get_cluster_type_by_ticket(cls, ticket_type): SQLSERVER_DISABLE = TicketEnumField("SQLSERVER_DISABLE", _("SQLServer 集群禁用"), register_iam=False) SQLSERVER_ENABLE = TicketEnumField("SQLSERVER_ENABLE", _("SQLServer 集群启用"), register_iam=False) SQLSERVER_DBRENAME = TicketEnumField("SQLSERVER_DBRENAME", _("SQLServer DB重命名"), _("集群维护")) - SQLSERVER_MASTER_SLAVE_SWITCH = TicketEnumField("SQLSERVER_MASTER_SLAVE_SWITCH", _("SQLServer 主从互切"), _("集群维护")) # noqa - SQLSERVER_MASTER_FAIL_OVER = TicketEnumField("SQLSERVER_MASTER_FAIL_OVER", _("SQLServer 主库故障切换"), _("集群维护")) - SQLSERVER_RESTORE_LOCAL_SLAVE = TicketEnumField("SQLSERVER_RESTORE_LOCAL_SLAVE", _("SQLServer 原地重建"), _("集群维护")) # noqa + SQLSERVER_MASTER_SLAVE_SWITCH = TicketEnumField("SQLSERVER_MASTER_SLAVE_SWITCH", _("SQLServer 主从互切"), + _("集群维护")) # noqa + SQLSERVER_MASTER_FAIL_OVER = TicketEnumField("SQLSERVER_MASTER_FAIL_OVER", _("SQLServer 主库故障切换"), + _("集群维护")) + SQLSERVER_RESTORE_LOCAL_SLAVE = TicketEnumField("SQLSERVER_RESTORE_LOCAL_SLAVE", _("SQLServer 原地重建"), + _("集群维护")) # noqa SQLSERVER_RESTORE_SLAVE = TicketEnumField("SQLSERVER_RESTORE_SLAVE", _("SQLServer 新机重建"), _("集群维护")) SQLSERVER_ADD_SLAVE = TicketEnumField("SQLSERVER_ADD_SLAVE", _("SQLServer 添加从库"), _("集群维护")) SQLSERVER_RESET = TicketEnumField("SQLSERVER_RESET", _("SQLServer 集群重置"), _("集群维护")) @@ -287,9 +332,11 @@ def get_cluster_type_by_ticket(cls, ticket_type): SQLSERVER_INCR_MIGRATE = TicketEnumField("SQLSERVER_INCR_MIGRATE", _("SQLServer 增量迁移"), _("数据处理")) SQLSERVER_ROLLBACK = TicketEnumField("SQLSERVER_ROLLBACK", _("SQLServer 定点构造"), _("数据处理")) SQLSERVER_AUTHORIZE_RULES = TicketEnumField("SQLSERVER_AUTHORIZE_RULES", _("SQLServer 集群授权"), _("权限管理")) - SQLSERVER_EXCEL_AUTHORIZE_RULES = TicketEnumField("SQLSERVER_EXCEL_AUTHORIZE_RULES", _("SQLServer EXCEL授权"), _("权限管理")) # noqa + SQLSERVER_EXCEL_AUTHORIZE_RULES = TicketEnumField("SQLSERVER_EXCEL_AUTHORIZE_RULES", _("SQLServer EXCEL授权"), + _("权限管理")) # noqa SQLSERVER_BUILD_DB_SYNC = TicketEnumField("SQLSERVER_BUILD_DB_SYNC", _("SQLServer DB建立同步"), register_iam=False) - SQLSERVER_MODIFY_STATUS = TicketEnumField("SQLSERVER_MODIFY_STATUS", _("SQLServer 修改故障实例状态"), register_iam=False) + SQLSERVER_MODIFY_STATUS = TicketEnumField("SQLSERVER_MODIFY_STATUS", _("SQLServer 修改故障实例状态"), + register_iam=False) # REDIS REDIS_PLUGIN_CREATE_CLB = TicketEnumField("REDIS_PLUGIN_CREATE_CLB", _("Redis 创建CLB"), _("集群管理")) @@ -315,18 +362,22 @@ def get_cluster_type_by_ticket(cls, ticket_type): REDIS_SCALE_UPDOWN = TicketEnumField("REDIS_SCALE_UPDOWN", _("Redis 集群容量变更"), _("集群维护")) REDIS_CLUSTER_CUTOFF = TicketEnumField("REDIS_CLUSTER_CUTOFF", _("Redis 整机替换"), _("集群维护")) REDIS_CLUSTER_AUTOFIX = TicketEnumField("REDIS_CLUSTER_AUTOFIX", _("Redis 故障自愈"), _("集群维护")) - REDIS_CLUSTER_INSTANCE_SHUTDOWN = TicketEnumField("REDIS_CLUSTER_INSTANCE_SHUTDOWN", _("Redis 故障自愈-实例下架"), _("集群维护")) # noqa + REDIS_CLUSTER_INSTANCE_SHUTDOWN = TicketEnumField("REDIS_CLUSTER_INSTANCE_SHUTDOWN", _("Redis 故障自愈-实例下架"), + _("集群维护")) # noqa REDIS_MASTER_SLAVE_SWITCH = TicketEnumField("REDIS_MASTER_SLAVE_SWITCH", _("Redis 主从切换"), _("集群维护")) REDIS_PROXY_SCALE_UP = TicketEnumField("REDIS_PROXY_SCALE_UP", _("Redis Proxy扩容"), _("集群维护")) REDIS_PROXY_SCALE_DOWN = TicketEnumField("REDIS_PROXY_SCALE_DOWN", _("Redis Proxy缩容"), _("集群维护")) REDIS_ADD_DTS_SERVER = TicketEnumField("REDIS_ADD_DTS_SERVER", _("Redis 新增DTS SERVER"), register_iam=False) REDIS_REMOVE_DTS_SERVER = TicketEnumField("REDIS_REMOVE_DTS_SERVER", _("Redis 删除DTS SERVER"), register_iam=False) REDIS_DATA_STRUCTURE = TicketEnumField("REDIS_DATA_STRUCTURE", _("Redis 集群数据构造"), _("数据构造")) - REDIS_DATA_STRUCTURE_TASK_DELETE = TicketEnumField("REDIS_DATA_STRUCTURE_TASK_DELETE", _("Redis 数据构造记录删除"), _("数据构造")) # noqa - REDIS_CLUSTER_SHARD_NUM_UPDATE = TicketEnumField("REDIS_CLUSTER_SHARD_NUM_UPDATE", _("Redis 集群分片数变更"), _("集群维护")) + REDIS_DATA_STRUCTURE_TASK_DELETE = TicketEnumField("REDIS_DATA_STRUCTURE_TASK_DELETE", _("Redis 数据构造记录删除"), + _("数据构造")) # noqa + REDIS_CLUSTER_SHARD_NUM_UPDATE = TicketEnumField("REDIS_CLUSTER_SHARD_NUM_UPDATE", _("Redis 集群分片数变更"), + _("集群维护")) REDIS_CLUSTER_TYPE_UPDATE = TicketEnumField("REDIS_CLUSTER_TYPE_UPDATE", _("Redis 集群类型变更"), _("集群维护")) REDIS_CLUSTER_DATA_COPY = TicketEnumField("REDIS_CLUSTER_DATA_COPY", _("Redis 集群数据复制"), _("数据传输")) - REDIS_CLUSTER_ROLLBACK_DATA_COPY = TicketEnumField("REDIS_CLUSTER_ROLLBACK_DATA_COPY", _("Redis 构造实例数据回写"), _("数据构造")) # noqa + REDIS_CLUSTER_ROLLBACK_DATA_COPY = TicketEnumField("REDIS_CLUSTER_ROLLBACK_DATA_COPY", _("Redis 构造实例数据回写"), + _("数据构造")) # noqa REDIS_DATACOPY_CHECK_REPAIR = TicketEnumField("REDIS_DATACOPY_CHECK_REPAIR", _("Redis 数据校验与修复")) REDIS_CLUSTER_ADD_SLAVE = TicketEnumField("REDIS_CLUSTER_ADD_SLAVE", _("Redis 重建从库"), _("集群维护")) REDIS_DTS_ONLINE_SWITCH = TicketEnumField("REDIS_DTS_ONLINE_SWITCH", _("Redis DTS在线切换"), register_iam=False) @@ -334,14 +385,20 @@ def get_cluster_type_by_ticket(cls, ticket_type): REDIS_SLOTS_MIGRATE = TicketEnumField("REDIS_SLOTS_MIGRATE", _("Redis slots 迁移"), register_iam=False) REDIS_VERSION_UPDATE_ONLINE = TicketEnumField("REDIS_VERSION_UPDATE_ONLINE", _("Redis 集群版本升级")) # noqa REDIS_CLUSTER_REINSTALL_DBMON = TicketEnumField("REDIS_CLUSTER_REINSTALL_DBMON", _("Redis 集群重装DBMON")) # noqa - REDIS_PREDIXY_CONFIG_SERVERS_REWRITE = TicketEnumField("REDIS_PREDIXY_CONFIG_SERVERS_REWRITE", _("predixy配置重写"), register_iam=False) # noqa - REDIS_CLUSTER_PROXYS_UPGRADE = TicketEnumField("REDIS_CLUSTER_PROXYS_UPGRADE", _("Redis 集群proxys版本升级"), register_iam=False) # noqa + REDIS_PREDIXY_CONFIG_SERVERS_REWRITE = TicketEnumField("REDIS_PREDIXY_CONFIG_SERVERS_REWRITE", _("predixy配置重写"), + register_iam=False) # noqa + REDIS_CLUSTER_PROXYS_UPGRADE = TicketEnumField("REDIS_CLUSTER_PROXYS_UPGRADE", _("Redis 集群proxys版本升级"), + register_iam=False) # noqa REDIS_DIRTY_MACHINE_CLEAR = TicketEnumField("REDIS_DIRTY_MACHINE_CLEAR", _("Redis脏机清理"), register_iam=False) - REDIS_CLUSTER_STORAGES_CLI_CONNS_KILL = TicketEnumField("REDIS_CLUSTER_STORAGES_CLI_CONNS_KILL", _("Redis 集群存储层cli连接kill"), register_iam=False) # noqa - REDIS_CLUSTER_RENAME_DOMAIN = TicketEnumField("REDIS_CLUSTER_RENAME_DOMAIN", _("Redis集群域名重命名"), _("集群维护")) + REDIS_CLUSTER_STORAGES_CLI_CONNS_KILL = TicketEnumField("REDIS_CLUSTER_STORAGES_CLI_CONNS_KILL", + _("Redis 集群存储层cli连接kill"), + register_iam=False) # noqa + REDIS_CLUSTER_RENAME_DOMAIN = TicketEnumField("REDIS_CLUSTER_RENAME_DOMAIN", _("Redis集群域名重命名"), + _("集群维护")) REDIS_CLUSTER_MAXMEMORY_SET = TicketEnumField("REDIS_CLUSTER_MAXMEMORY_SET", _("Redis 集群设置maxmemory")) # noqa REDIS_CLUSTER_LOAD_MODULES = TicketEnumField("REDIS_CLUSTER_LOAD_MODULES", _("Redis 集群加载modules")) # noqa - REDIS_TENDISPLUS_LIGHTNING_DATA= TicketEnumField("REDIS_TENDISPLUS_LIGHTNING_DATA", _("Tendisplus闪电导入数据"), _("集群维护")) # noqa + REDIS_TENDISPLUS_LIGHTNING_DATA = TicketEnumField("REDIS_TENDISPLUS_LIGHTNING_DATA", _("Tendisplus闪电导入数据"), + _("集群维护")) # noqa # 大数据 KAFKA_APPLY = TicketEnumField("KAFKA_APPLY", _("Kafka 集群部署"), register_iam=False) @@ -408,8 +465,10 @@ def get_cluster_type_by_ticket(cls, ticket_type): RIAK_CLUSTER_MIGRATE = TicketEnumField("RIAK_CLUSTER_MIGRATE", _("Riak 集群迁移"), _("集群管理")) # MONGODB - MONGODB_REPLICASET_APPLY = TicketEnumField("MONGODB_REPLICASET_APPLY", _("MongoDB 副本集集群部署"), register_iam=False) # noqa - MONGODB_SHARD_APPLY = TicketEnumField("MONGODB_SHARD_APPLY", _("MongoDB 分片集群部署"), _("集群管理"), register_iam=False) # noqa + MONGODB_REPLICASET_APPLY = TicketEnumField("MONGODB_REPLICASET_APPLY", _("MongoDB 副本集集群部署"), + register_iam=False) # noqa + MONGODB_SHARD_APPLY = TicketEnumField("MONGODB_SHARD_APPLY", _("MongoDB 分片集群部署"), _("集群管理"), + register_iam=False) # noqa MONGODB_EXEC_SCRIPT_APPLY = TicketEnumField("MONGODB_EXEC_SCRIPT_APPLY", _("MongoDB 变更脚本执行"), _("脚本任务")) MONGODB_REMOVE_NS = TicketEnumField("MONGODB_REMOVE_NS", _("MongoDB 清档"), _("数据处理")) MONGODB_FULL_BACKUP = TicketEnumField("MONGODB_FULL_BACKUP", _("MongoDB 全库备份"), _("备份")) @@ -417,7 +476,8 @@ def get_cluster_type_by_ticket(cls, ticket_type): MONGODB_ADD_MONGOS = TicketEnumField("MONGODB_ADD_MONGOS", _("MongoDB 扩容接入层"), _("集群维护")) MONGODB_REDUCE_MONGOS = TicketEnumField("MONGODB_REDUCE_MONGOS", _("MongoDB 缩容接入层"), _("集群维护")) MONGODB_ADD_SHARD_NODES = TicketEnumField("MONGODB_ADD_SHARD_NODES", _("MongoDB 扩容shard节点数"), _("集群维护")) - MONGODB_REDUCE_SHARD_NODES = TicketEnumField("MONGODB_REDUCE_SHARD_NODES", _("MongoDB 缩容shard节点数"), _("集群维护")) + MONGODB_REDUCE_SHARD_NODES = TicketEnumField("MONGODB_REDUCE_SHARD_NODES", _("MongoDB 缩容shard节点数"), + _("集群维护")) MONGODB_SCALE_UPDOWN = TicketEnumField("MONGODB_SCALE_UPDOWN", _("MongoDB 集群容量变更"), _("集群维护")) MONGODB_ENABLE = TicketEnumField("MONGODB_ENABLE", _("MongoDB 集群启用"), register_iam=False) MONGODB_INSTANCE_RELOAD = TicketEnumField("MONGODB_INSTANCE_RELOAD", _("MongoDB 实例重启"), _("集群管理")) @@ -425,7 +485,8 @@ def get_cluster_type_by_ticket(cls, ticket_type): MONGODB_DESTROY = TicketEnumField("MONGODB_DESTROY", _("MongoDB 集群删除"), _("集群管理")) MONGODB_CUTOFF = TicketEnumField("MONGODB_CUTOFF", _("MongoDB 整机替换"), _("集群维护")) MONGODB_AUTHORIZE_RULES = TicketEnumField("MONGODB_AUTHORIZE_RULES", _("MongoDB 授权"), _("权限管理")) - MONGODB_EXCEL_AUTHORIZE_RULES = TicketEnumField("MONGODB_EXCEL_AUTHORIZE_RULES", _("MongoDB Excel授权"), _("权限管理")) # noqa + MONGODB_EXCEL_AUTHORIZE_RULES = TicketEnumField("MONGODB_EXCEL_AUTHORIZE_RULES", _("MongoDB Excel授权"), + _("权限管理")) # noqa MONGODB_IMPORT = TicketEnumField("MONGODB_IMPORT", _("MongoDB 数据导入"), _("集群维护")) MONGODB_RESTORE = TicketEnumField("MONGODB_RESTORE", _("MongoDB 定点回档"), _("集群维护")) MONGODB_TEMPORARY_DESTROY = TicketEnumField("MONGODB_TEMPORARY_DESTROY", _("MongoDB 临时集群销毁"), _("集群维护")) @@ -460,6 +521,7 @@ def get_cluster_type_by_ticket(cls, ticket_type): # 资源池 RESOURCE_IMPORT = EnumField("RESOURCE_IMPORT", _("资源池导入")) ADMIN_PASSWORD_MODIFY = EnumField("ADMIN_PASSWORD_MODIFY", _("临时密码修改")) + RECYCLE_HOST = EnumField("RECYCLE_HOST", _("主机回收")) # fmt: on # VM @@ -498,8 +560,12 @@ class FlowType(str, StructuredEnum): RESOURCE_DELIVERY = EnumField("RESOURCE_DELIVERY", _("资源交付")) # 资源批量申请节点 RESOURCE_BATCH_APPLY = EnumField("RESOURCE_BATCH_APPLY", _("资源批量申请")) - # 资源批量交付节点 - RESOURCE_BATCH_DELIVERY = EnumField("RESOURCE_BATCH_DELIVERY", _("资源批量交付")) + # 主机回收 + HOST_RECYCLE = EnumField("HOST_RECYCLE", _("主机回收")) + # 主机重导入资源池 + HOST_IMPORT_RESOURCE = EnumField("HOST_IMPORT_RESOURCE", _("主机重导入资源池")) + # 主机回收描述节点,用于关联回收单据 + HOST_RECYCLE_DELIVERY = EnumField("HOST_RECYCLE_DELIVERY", _("主机回收描述")) class FlowContext(str, StructuredEnum): diff --git a/dbm-ui/backend/ticket/flow_manager/inner.py b/dbm-ui/backend/ticket/flow_manager/inner.py index c4d70ad206..39306d9774 100644 --- a/dbm-ui/backend/ticket/flow_manager/inner.py +++ b/dbm-ui/backend/ticket/flow_manager/inner.py @@ -122,10 +122,6 @@ def check_exclusive_operations(self): cluster_ids=cluster_ids, ticket_type=ticket_type, exclude_ticket_ids=[self.ticket.id] ) - def handle_exclusive_error(self): - """处理执行互斥后重试的逻辑""" - pass - def callback(self, callback_type: FlowCallbackType) -> None: """ inner节点独有的钩子函数,执行前置/后继流程节点动作 @@ -232,3 +228,32 @@ def _status(self) -> str: return constants.TicketStatus.SUCCEEDED return status + + +class SimpleTaskFlow(InnerFlow): + """ + 内置简单任务流程。 + 此任务通常跟集群无关,eg: 主机清理,资源导入等 + """ + + def __init__(self, flow_obj: Flow): + self.root_id = flow_obj.flow_obj_id + super().__init__(flow_obj=flow_obj) + + def run(self) -> None: + root_id = self.flow_obj.flow_obj_id or generate_root_id() + self.run_status_handler(root_id) + # 运行回收流程 + try: + self.callback(callback_type=FlowCallbackType.PRE_CALLBACK.value) + self._run() + except Exception as err: # pylint: disable=broad-except + self.run_error_status_handler(err) + + def _run(self) -> None: + return super()._run() + + def _retry(self) -> None: + # 重试则将机器挪出污点池 + self.flush_error_status_handler() + self.run() diff --git a/dbm-ui/backend/ticket/flow_manager/manager.py b/dbm-ui/backend/ticket/flow_manager/manager.py index c12118d2f5..1252e51295 100644 --- a/dbm-ui/backend/ticket/flow_manager/manager.py +++ b/dbm-ui/backend/ticket/flow_manager/manager.py @@ -10,23 +10,13 @@ """ import logging -from backend import env from backend.ticket import constants from backend.ticket.constants import FLOW_FINISHED_STATUS, FlowType from backend.ticket.flow_manager.delivery import DeliveryFlow, DescribeTaskFlow -from backend.ticket.flow_manager.inner import IgnoreResultInnerFlow, InnerFlow, QuickInnerFlow +from backend.ticket.flow_manager.inner import IgnoreResultInnerFlow, InnerFlow, QuickInnerFlow, SimpleTaskFlow from backend.ticket.flow_manager.itsm import ItsmFlow from backend.ticket.flow_manager.pause import PauseFlow -from backend.ticket.flow_manager.resource import ( - FakeResourceApplyFlow, - FakeResourceBatchApplyFlow, - FakeResourceBatchDeliveryFlow, - FakeResourceDeliveryFlow, - ResourceApplyFlow, - ResourceBatchApplyFlow, - ResourceBatchDeliveryFlow, - ResourceDeliveryFlow, -) +from backend.ticket.flow_manager.resource import ResourceApplyFlow, ResourceBatchApplyFlow, ResourceDeliveryFlow from backend.ticket.flow_manager.timer import TimerFlow from backend.ticket.models import Ticket @@ -42,20 +32,11 @@ FlowType.RESOURCE_APPLY: ResourceApplyFlow, FlowType.RESOURCE_DELIVERY: ResourceDeliveryFlow, FlowType.RESOURCE_BATCH_APPLY: ResourceBatchApplyFlow, - FlowType.RESOURCE_BATCH_DELIVERY: ResourceBatchDeliveryFlow, + FlowType.HOST_RECYCLE: SimpleTaskFlow, + FlowType.HOST_IMPORT_RESOURCE: SimpleTaskFlow, + FlowType.HOST_RECYCLE_DELIVERY: DeliveryFlow, } -# 开启无资源池环境调试,从空闲机筛选机器伪造资源返回 -if env.FAKE_RESOURCE_APPLY_ENABLE: - SUPPORTED_FLOW_MAP.update( - { - FlowType.RESOURCE_APPLY: FakeResourceApplyFlow, - FlowType.RESOURCE_DELIVERY: FakeResourceDeliveryFlow, - FlowType.RESOURCE_BATCH_APPLY: FakeResourceBatchApplyFlow, - FlowType.RESOURCE_BATCH_DELIVERY: FakeResourceBatchDeliveryFlow, - } - ) - logger = logging.getLogger("root") diff --git a/dbm-ui/backend/ticket/flow_manager/resource.py b/dbm-ui/backend/ticket/flow_manager/resource.py index ee3b2f6012..d201f13377 100644 --- a/dbm-ui/backend/ticket/flow_manager/resource.py +++ b/dbm-ui/backend/ticket/flow_manager/resource.py @@ -10,21 +10,20 @@ """ import copy import importlib +import itertools import logging from collections import defaultdict from typing import Any, Dict, List, Optional, Union -from django.core.cache import cache from django.utils.translation import gettext as _ -from backend import env from backend.components.dbresource.client import DBResourceApi from backend.configuration.constants import AffinityEnum from backend.configuration.models import DBAdministrator +from backend.db_dirty.constants import MachineEventType +from backend.db_dirty.models import MachineEvent from backend.db_meta.models import Spec from backend.db_services.dbresource.exceptions import ResourceApplyException, ResourceApplyInsufficientException -from backend.db_services.ipchooser.constants import CommonEnum -from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.ticket import constants from backend.ticket.constants import FlowCallbackType, FlowType, ResourceApplyErrCode, TodoType from backend.ticket.flow_manager.base import BaseTicketFlow @@ -129,10 +128,12 @@ def _format_resource_hosts(self, hosts): "ip": host["ip"], "bk_cloud_id": host["bk_cloud_id"], "bk_host_id": host["bk_host_id"], - # 补充机器的内存,cpu和磁盘信息。(bk_disk的单位是GB, bk_mem的单位是MB) + # 补充机器的内存,cpu,磁盘和操作系统信息。(bk_disk的单位是GB, bk_mem的单位是MB) "bk_cpu": host["cpu_num"], "bk_disk": host["total_storage_cap"], "bk_mem": host["dram_cap"], + "os_name": host["os_name"], + "os_type": host["os_type"], # bk_disk为系统盘,storage_device为数据盘/data|/data1 "storage_device": host["storage_device"], # 补充城市和园区 @@ -193,6 +194,17 @@ def apply_resource(self, ticket_data): else: node_infos[group_name].extend(host_infos) + # 记录申请记录 + applied_host_infos = list(itertools.chain(*node_infos.values())) + MachineEvent.host_event_trigger( + self.ticket.bk_biz_id, + applied_host_infos, + event=MachineEventType.ApplyResource, + operator=self.ticket.creator, + ticket=self.ticket, + standard=True, + ) + return resource_request_id, node_infos def create_replenish_todo(self): @@ -228,6 +240,16 @@ def fetch_apply_params(self, ticket_data): # 根据规格来填充相应机器的申请参数 resource_spec = ticket_data["resource_spec"] for role, role_spec in resource_spec.items(): + # 如果是指定主机,则直接请求,不走解析规格的逻辑 + if role_spec.get("hosts"): + specify_host_params = { + "group_mark": role, + "hosts": role_spec["hosts"], + "bk_cloud_id": bk_cloud_id, + "count": len(role_spec["hosts"]), + } + details.append(specify_host_params) + continue # 如果申请数量为0/规格ID不合法(存在spec id为0 --> 是前端表单的默认值),则跳过 if not role_spec["count"] or not role_spec["spec_id"]: continue @@ -394,93 +416,3 @@ def confirm_resource(self, ticket_data): def _run(self) -> str: self.confirm_resource(self.ticket.details) return super()._run() - - -class ResourceBatchDeliveryFlow(ResourceDeliveryFlow): - """ - 内置资源申请批量交付流程,主要是通知资源池机器使用成功 - """ - - def _run(self) -> str: - # 暂时与单独交付节点没有区别 - return super()._run() - - -class FakeResourceApplyFlow(ResourceApplyFlow): - def apply_resource(self, ticket_data): - """模拟资源池申请""" - - host_in_use = set(cache.get(HOST_IN_USE, [])) - - resp = ResourceQueryHelper.query_cc_hosts( - {"bk_biz_id": env.DBA_APP_BK_BIZ_ID, "bk_inst_id": 7, "bk_obj_id": "module"}, - [], - 0, - 1000, - CommonEnum.DEFAULT_HOST_FIELDS.value, - return_status=True, - bk_cloud_id=0, - ) - count, apply_data = resp["count"], list(filter(lambda x: x["status"] == 1, resp["info"])) - - for item in apply_data: - item["ip"] = item["bk_host_innerip"] - - # 排除缓存占用的主机 - host_free = list(filter(lambda x: x["bk_host_id"] not in host_in_use, apply_data)) - - index = 0 - expected_count = 0 - node_infos: Dict[str, List] = defaultdict(list) - for detail in self.fetch_apply_params(ticket_data): - role, count = detail["group_mark"], detail["count"] - host_infos = host_free[index : index + count] - try: - if "backend_group" in role: - backend_group_name = role.rsplit("_", 1)[0] - node_infos[backend_group_name].append({"master": host_infos[0], "slave": host_infos[1]}) - else: - node_infos[role] = host_infos - except IndexError: - raise ResourceApplyException(_("模拟资源申请失败,主机数量不够")) - - index += count - expected_count += len(host_infos) - - if expected_count < index: - raise ResourceApplyException(_("模拟资源申请失败,主机数量不够:{} < {}").format(count, index)) - - logger.info(_("模拟资源申请成功(%s):%s"), expected_count, node_infos) - - # 添加新占用的主机 - host_in_use = host_in_use.union(list(map(lambda x: x["bk_host_id"], host_free[:index]))) - cache.set(HOST_IN_USE, list(host_in_use)) - - return count, node_infos - - -class FakeResourceBatchApplyFlow(FakeResourceApplyFlow, ResourceBatchApplyFlow): - pass - - -class FakeResourceDeliveryFlow(ResourceDeliveryFlow): - """ - 内置资源申请交付流程,暂时无需操作 - """ - - def confirm_resource(self, ticket_data): - pass - - def _run(self) -> str: - self.confirm_resource(self.ticket.details) - return super()._run() - - -class FakeResourceBatchDeliveryFlow(FakeResourceDeliveryFlow): - """ - 内置资源申请批量交付流程,主要是通知资源池机器使用成功 - """ - - def _run(self) -> str: - # 暂时与单独交付节点没有区别 - return super()._run() diff --git a/dbm-ui/backend/ticket/models/ticket.py b/dbm-ui/backend/ticket/models/ticket.py index cea46db296..370055d676 100644 --- a/dbm-ui/backend/ticket/models/ticket.py +++ b/dbm-ui/backend/ticket/models/ticket.py @@ -22,6 +22,7 @@ from backend.bk_web.models import AuditedModel from backend.configuration.constants import PLAT_BIZ_ID, DBType from backend.db_monitor.exceptions import AutofixException +from backend.db_services.dbbase.constants import IpDest from backend.ticket.constants import ( EXCLUSIVE_TICKET_EXCEL_PATH, FlowRetryType, @@ -206,6 +207,46 @@ def create_ticket( return ticket + @classmethod + def create_recycle_ticket(cls, ticket_id: int, ip_dest: IpDest): + """ + 从一个终止单据派生产生另一个清理单据 + :param ticket_id: 终止单据ID + :param ip_dest: 机器流向 + """ + from backend.ticket.builders import BuilderFactory + + ticket = cls.objects.get(id=ticket_id) + # 忽略非回收单据 + if ticket.ticket_type not in BuilderFactory.apply_ticket_type: + return None + + # 创建回收单据流程 + from backend.ticket.builders.common.base import fetch_apply_hosts + + details = { + "recycle_hosts": fetch_apply_hosts(ticket.details), + "ip_recycle": {"ip_dest": ip_dest, "for_biz": ticket.bk_biz_id}, + "group": ticket.group, + } + recycle_ticket = cls.create_ticket( + ticket_type=TicketType.RECYCLE_HOST, + creator=ticket.creator, + bk_biz_id=ticket.bk_biz_id, + remark=_("单据{}终止后自动发起清理机器单据").format(ticket.id), + details=details, + ) + + # 对原单据动态插入一个描述flow,关联这个回收单 + Flow.objects.create( + ticket=ticket, + flow_type=FlowType.HOST_RECYCLE_DELIVERY.value, + details={"recycle_ticket": recycle_ticket.id}, + flow_alias=_("原主机清理释放"), + ) + + return recycle_ticket + @classmethod def create_ticket_from_bk_monitor(cls, callback_data): """ diff --git a/dbm-ui/backend/ticket/models/todo.py b/dbm-ui/backend/ticket/models/todo.py index 77478bd41d..4809f14caa 100644 --- a/dbm-ui/backend/ticket/models/todo.py +++ b/dbm-ui/backend/ticket/models/todo.py @@ -18,7 +18,6 @@ from backend.bk_web.constants import LEN_MIDDLE, LEN_SHORT from backend.bk_web.models import AuditedModel from backend.ticket.constants import FlowMsgStatus, FlowMsgType, TicketFlowStatus, TodoStatus, TodoType -from backend.ticket.tasks.ticket_tasks import send_msg_for_flow logger = logging.getLogger("root") @@ -28,6 +27,8 @@ def exist_unfinished(self): return self.filter(status__in=[TodoStatus.TODO, TodoStatus.RUNNING]).exists() def create(self, **kwargs): + from backend.ticket.tasks.ticket_tasks import send_msg_for_flow + todo = super().create(**kwargs) send_msg_for_flow.apply_async( kwargs={