diff --git a/unilabos/device_comms/opcua_client/client.py b/unilabos/device_comms/opcua_client/client.py index 011ce07e..b45cded2 100644 --- a/unilabos/device_comms/opcua_client/client.py +++ b/unilabos/device_comms/opcua_client/client.py @@ -5,7 +5,6 @@ from pydantic import BaseModel from opcua import Client, ua -from opcua.ua import NodeClass import pandas as pd import os @@ -13,7 +12,7 @@ from unilabos.device_comms.opcua_client.node.uniopcua import Variable, Method, NodeType, DataType from unilabos.device_comms.universal_driver import UniversalDriver from unilabos.utils.log import logger - +from unilabos.devices.workstation.post_process.decks import post_process_deck class OpcUaNode(BaseModel): name: str @@ -117,6 +116,8 @@ class BaseClient(UniversalDriver): _variables_to_find: Dict[str, Dict[str, Any]] = {} _name_mapping: Dict[str, str] = {} # 英文名到中文名的映射 _reverse_mapping: Dict[str, str] = {} # 中文名到英文名的映射 + # 直接缓存已找到的 ua.Node 对象,避免因字符串 NodeId 格式导致订阅失败 + _found_node_objects: Dict[str, Any] = {} def __init__(self): super().__init__() @@ -125,6 +126,9 @@ def __init__(self): # 初始化名称映射字典 self._name_mapping = {} self._reverse_mapping = {} + # 初始化线程锁(在子类中会被重新创建,这里提供默认实现) + import threading + self._client_lock = threading.RLock() def _set_client(self, client: Optional[Client]) -> None: if client is None: @@ -152,15 +156,24 @@ def _find_nodes(self) -> None: if not self.client: raise ValueError('client is not connected') - logger.info('开始查找节点...') + logger.info(f'开始查找 {len(self._variables_to_find)} 个节点...') try: # 获取根节点 root = self.client.get_root_node() objects = root.get_child(["0:Objects"]) + # 记录查找前的状态 + before_count = len(self._node_registry) + # 查找节点 self._find_nodes_recursive(objects) + # 记录查找后的状态 + after_count = len(self._node_registry) + newly_found = after_count - before_count + + logger.info(f"本次查找新增 {newly_found} 个节点,当前共 {after_count} 个") + # 检查是否所有节点都已找到 not_found = [] for var_name, var_info in self._variables_to_find.items(): @@ -168,9 +181,13 @@ def _find_nodes(self) -> None: not_found.append(var_name) if not_found: - logger.warning(f"以下节点未找到: {', '.join(not_found)}") + logger.warning(f"⚠ 以下 {len(not_found)} 个节点未找到: {', '.join(not_found[:10])}{'...' if len(not_found) > 10 else ''}") + logger.warning(f"提示:请检查这些节点名称是否与服务器的 BrowseName 完全匹配(包括大小写、空格等)") + # 提供一个示例来帮助调试 + if not_found: + logger.info(f"尝试在服务器中查找第一个未找到的节点 '{not_found[0]}' 的相似节点...") else: - logger.info("所有节点均已找到") + logger.info(f"✓ 所有 {len(self._variables_to_find)} 个节点均已找到并注册") except Exception as e: logger.error(f"查找节点失败: {e}") @@ -188,17 +205,20 @@ def _find_nodes_recursive(self, node) -> None: var_info = self._variables_to_find[node_name] node_type = var_info.get("node_type") data_type = var_info.get("data_type") + node_id_str = str(node.nodeid) # 根据节点类型创建相应的对象 if node_type == NodeType.VARIABLE: - self._node_registry[node_name] = Variable(self.client, node_name, str(node.nodeid), data_type) - logger.info(f"找到变量节点: {node_name}") + self._node_registry[node_name] = Variable(self.client, node_name, node_id_str, data_type) + logger.info(f"✓ 找到变量节点: '{node_name}', NodeId: {node_id_str}, DataType: {data_type}") + # 缓存真实的 ua.Node 对象用于订阅 + self._found_node_objects[node_name] = node elif node_type == NodeType.METHOD: # 对于方法节点,需要获取父节点ID parent_node = node.get_parent() parent_node_id = str(parent_node.nodeid) - self._node_registry[node_name] = Method(self.client, node_name, str(node.nodeid), parent_node_id, data_type) - logger.info(f"找到方法节点: {node_name}") + self._node_registry[node_name] = Method(self.client, node_name, node_id_str, parent_node_id, data_type) + logger.info(f"✓ 找到方法节点: '{node_name}', NodeId: {node_id_str}, ParentId: {parent_node_id}") # 递归处理子节点 for child in node.get_children(): @@ -296,13 +316,17 @@ def use_node(self, name: str) -> OpcUaNodeBase: if name in self._name_mapping: chinese_name = self._name_mapping[name] if chinese_name in self._node_registry: - return self._node_registry[chinese_name] + node = self._node_registry[chinese_name] + logger.debug(f"使用节点: '{name}' -> '{chinese_name}', NodeId: {node.node_id}") + return node elif chinese_name in self._variables_to_find: logger.warning(f"节点 {chinese_name} (英文名: {name}) 尚未找到,尝试重新查找") if self.client: self._find_nodes() if chinese_name in self._node_registry: - return self._node_registry[chinese_name] + node = self._node_registry[chinese_name] + logger.info(f"重新查找成功: '{chinese_name}', NodeId: {node.node_id}") + return node raise ValueError(f'节点 {chinese_name} (英文名: {name}) 未注册或未找到') # 直接使用原始名称查找 @@ -312,9 +336,14 @@ def use_node(self, name: str) -> OpcUaNodeBase: if self.client: self._find_nodes() if name in self._node_registry: - return self._node_registry[name] + node = self._node_registry[name] + logger.info(f"重新查找成功: '{name}', NodeId: {node.node_id}") + return node + logger.error(f"❌ 节点 '{name}' 未注册或未找到。已注册节点: {list(self._node_registry.keys())[:5]}...") raise ValueError(f'节点 {name} 未注册或未找到') - return self._node_registry[name] + node = self._node_registry[name] + logger.debug(f"使用节点: '{name}', NodeId: {node.node_id}") + return node def get_node_registry(self) -> Dict[str, OpcUaNodeBase]: return self._node_registry @@ -335,12 +364,13 @@ def register_node_list(self, node_list: List[OpcUaNode]) -> "BaseClient": return self logger.info(f'开始注册 {len(node_list)} 个节点...') + new_nodes_count = 0 for node in node_list: if node is None: continue if node.name in self._node_registry: - logger.info(f'节点 {node.name} 已存在') + logger.debug(f'节点 "{node.name}" 已存在于注册表') exist = self._node_registry[node.name] if exist.type != node.node_type: raise ValueError(f'节点 {node.name} 类型 {node.node_type} 与已存在的类型 {exist.type} 不一致') @@ -351,9 +381,10 @@ def register_node_list(self, node_list: List[OpcUaNode]) -> "BaseClient": "node_type": node.node_type, "data_type": node.data_type } - logger.info(f'添加节点 {node.name} 到待查找列表') + new_nodes_count += 1 + logger.debug(f'添加节点 "{node.name}" ({node.node_type}) 到待查找列表') - logger.info('节点注册完成') + logger.info(f'节点注册完成:新增 {new_nodes_count} 个待查找节点,总计 {len(self._variables_to_find)} 个') # 如果客户端已连接,立即开始查找 if self.client: @@ -470,7 +501,7 @@ def execute_node_function(use_node: Callable[[str], OpcUaNodeBase]) -> Union[boo val = result_dict.get("value") err = result_dict.get("error") - print(f"读取 {node_name} 返回值 = {val} (类型: {type(val).__name__}), 错误 = {err}") + print(f"读取 {node_name} 返回值 = {val} (类型: {type(val).__name__}, 错误 = {err}") return val, err except Exception as e: print(f"解析读取结果失败: {e}, 原始结果: {result_str}") @@ -516,17 +547,27 @@ def create_init_function(self, func_name: str = None, write_nodes: Union[Dict[st """ if write_nodes is None: raise ValueError("必须提供write_nodes参数") - + def execute_init_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: + """根据 _workflow_params 为各节点写入真实数值。 + + 约定: + - write_nodes 为 list 时: 节点名 == 参数名,从 _workflow_params[node_name] 取值; + - write_nodes 为 dict 时: + * value 为字符串且在 _workflow_params 中: 当作参数名去取值; + * 否则 value 视为常量直接写入。 + """ + + params = getattr(self, "_workflow_params", {}) or {} + if isinstance(write_nodes, list): - # 处理节点列表 + # 节点列表形式: 节点名与参数名一致 for node_name in write_nodes: - # 尝试从参数中获取同名参数的值 - current_value = True # 默认值 - if hasattr(self, '_workflow_params') and node_name in self._workflow_params: - current_value = self._workflow_params[node_name] - print(f"初始化函数: 从参数获取值 {node_name} = {current_value}") - + if node_name not in params: + print(f"初始化函数: 参数中未找到 {node_name}, 跳过写入") + continue + + current_value = params[node_name] print(f"初始化函数: 写入节点 {node_name} = {current_value}") input_json = json.dumps({"node_name": node_name, "value": current_value}) result_str = self.write_node(input_json) @@ -538,14 +579,15 @@ def execute_init_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: except Exception as e: print(f"初始化函数: 解析写入结果失败: {e}, 原始结果: {result_str}") elif isinstance(write_nodes, dict): - # 处理节点字典,使用指定的值 + # 映射形式: 节点名 -> 参数名或常量 for node_name, node_value in write_nodes.items(): - # 检查值是否是字符串类型的参数名 - current_value = node_value - if isinstance(node_value, str) and hasattr(self, '_workflow_params') and node_value in self._workflow_params: - current_value = self._workflow_params[node_value] + if isinstance(node_value, str) and node_value in params: + current_value = params[node_value] print(f"初始化函数: 从参数获取值 {node_value} = {current_value}") - + else: + current_value = node_value + print(f"初始化函数: 使用常量值 写入 {node_name} = {current_value}") + print(f"初始化函数: 写入节点 {node_name} = {current_value}") input_json = json.dumps({"node_name": node_name, "value": current_value}) result_str = self.write_node(input_json) @@ -672,20 +714,20 @@ def create_start_function(self, func_name: str, stop_condition_expression: str = condition_nodes: 条件节点列表 [节点名1, 节点名2] """ def execute_start_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: - # 直接处理写入节点 + """开始函数: 写入触发节点, 然后轮询条件节点直到满足停止条件。""" + + params = getattr(self, "_workflow_params", {}) or {} + + # 先处理写入节点(触发位等) if write_nodes: if isinstance(write_nodes, list): - # 处理节点列表,默认值都是True - for i, node_name in enumerate(write_nodes): - # 尝试获取与节点对应的参数值 - param_name = f"write_{i}" - - # 获取参数值(如果有) - current_value = True # 默认值 - if hasattr(self, '_workflow_params') and param_name in self._workflow_params: - current_value = self._workflow_params[param_name] - - # 直接写入节点 + # 列表形式: 节点名与参数名一致, 若无参数则直接写 True + for node_name in write_nodes: + if node_name in params: + current_value = params[node_name] + else: + current_value = True + print(f"直接写入节点 {node_name} = {current_value}") input_json = json.dumps({"node_name": node_name, "value": current_value}) result_str = self.write_node(input_json) @@ -697,14 +739,13 @@ def execute_start_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: except Exception as e: print(f"解析直接写入结果失败: {e}, 原始结果: {result_str}") elif isinstance(write_nodes, dict): - # 处理节点字典,值是指定的 + # 字典形式: 节点名 -> 常量值(如 True/False) for node_name, node_value in write_nodes.items(): - # 尝试获取参数值(如果节点名与参数名匹配) - current_value = node_value # 使用指定的默认值 - if hasattr(self, '_workflow_params') and node_name in self._workflow_params: - current_value = self._workflow_params[node_name] - - # 直接写入节点 + if node_name in params: + current_value = params[node_name] + else: + current_value = node_value + print(f"直接写入节点 {node_name} = {current_value}") input_json = json.dumps({"node_name": node_name, "value": current_value}) result_str = self.write_node(input_json) @@ -732,6 +773,7 @@ def execute_start_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: # 直接读取节点 result_str = self.read_node(node_name) try: + time.sleep(1) result_str = result_str.replace("'", '"') result_dict = json.loads(result_str) read_res = result_dict.get("value") @@ -1035,31 +1077,33 @@ def read_node(self, node_name: str) -> Dict[str, Any]: 读取节点值的便捷方法 返回包含result字段的字典 """ - try: - node = self.use_node(node_name) - value, error = node.read() - - # 创建结果字典 - result = { - "value": value, - "error": error, - "node_name": node_name, - "timestamp": time.time() - } - - # 返回JSON字符串 - return json.dumps(result) - except Exception as e: - logger.error(f"读取节点 {node_name} 失败: {e}") - # 创建错误结果字典 - result = { - "value": None, - "error": True, - "node_name": node_name, - "error_message": str(e), - "timestamp": time.time() - } - return json.dumps(result) + # 使用锁保护客户端访问 + with self._client_lock: + try: + node = self.use_node(node_name) + value, error = node.read() + + # 创建结果字典 + result = { + "value": value, + "error": error, + "node_name": node_name, + "timestamp": time.time() + } + + # 返回JSON字符串 + return json.dumps(result) + except Exception as e: + logger.error(f"读取节点 {node_name} 失败: {e}") + # 创建错误结果字典 + result = { + "value": None, + "error": True, + "node_name": node_name, + "error_message": str(e), + "timestamp": time.time() + } + return json.dumps(result) def write_node(self, json_input: str) -> str: """ @@ -1068,47 +1112,49 @@ def write_node(self, json_input: str) -> str: eg:'{\"node_name\":\"反应罐号码\",\"value\":\"2\"}' 返回JSON格式的字符串,包含操作结果 """ - try: - # 解析JSON格式的输入 - if not isinstance(json_input, str): - json_input = str(json_input) - + # 使用锁保护客户端访问 + with self._client_lock: try: - input_data = json.loads(json_input) - if not isinstance(input_data, dict): - return json.dumps({"error": True, "error_message": "输入必须是包含node_name和value的JSON对象", "success": False}) + # 解析JSON格式的输入 + if not isinstance(json_input, str): + json_input = str(json_input) + + try: + input_data = json.loads(json_input) + if not isinstance(input_data, dict): + return json.dumps({"error": True, "error_message": "输入必须是包含node_name和value的JSON对象", "success": False}) + + # 从JSON中提取节点名称和值 + node_name = input_data.get("node_name") + value = input_data.get("value") - # 从JSON中提取节点名称和值 - node_name = input_data.get("node_name") - value = input_data.get("value") + if node_name is None: + return json.dumps({"error": True, "error_message": "JSON中缺少node_name字段", "success": False}) + except json.JSONDecodeError as e: + return json.dumps({"error": True, "error_message": f"JSON解析错误: {str(e)}", "success": False}) - if node_name is None: - return json.dumps({"error": True, "error_message": "JSON中缺少node_name字段", "success": False}) - except json.JSONDecodeError as e: - return json.dumps({"error": True, "error_message": f"JSON解析错误: {str(e)}", "success": False}) - - node = self.use_node(node_name) - error = node.write(value) - - # 创建结果字典 - result = { - "value": value, - "error": error, - "node_name": node_name, - "timestamp": time.time(), - "success": not error - } - - return json.dumps(result) - except Exception as e: - logger.error(f"写入节点失败: {e}") - result = { - "error": True, - "error_message": str(e), - "timestamp": time.time(), - "success": False - } - return json.dumps(result) + node = self.use_node(node_name) + error = node.write(value) + + # 创建结果字典 + result = { + "value": value, + "error": error, + "node_name": node_name, + "timestamp": time.time(), + "success": not error + } + + return json.dumps(result) + except Exception as e: + logger.error(f"写入节点失败: {e}") + result = { + "error": True, + "error_message": str(e), + "timestamp": time.time(), + "success": False + } + return json.dumps(result) def call_method(self, node_name: str, *args) -> Tuple[Any, bool]: """ @@ -1128,13 +1174,50 @@ def call_method(self, node_name: str, *args) -> Tuple[Any, bool]: class OpcUaClient(BaseClient): - def __init__(self, url: str, config_path: str = None, username: str = None, password: str = None, refresh_interval: float = 1.0): + def __init__( + self, + url: str, + deck: Optional[Union[post_process_deck, Dict[str, Any]]] = None, + config_path: str = None, + username: str = None, + password: str = None, + use_subscription: bool = True, + cache_timeout: float = 5.0, + subscription_interval: int = 500, + *args, + **kwargs, + ): # 降低OPCUA库的日志级别 import logging logging.getLogger("opcua").setLevel(logging.WARNING) super().__init__() + + # ===== 关键修改:参照 BioyondWorkstation 处理 deck ===== + + super().__init__() + + # 处理 deck 参数 + if deck is None: + self.deck = post_process_deck(setup=True) + elif isinstance(deck, dict): + self.deck = post_process_deck(setup=True) + elif hasattr(deck, 'children'): + self.deck = deck + else: + raise ValueError(f"deck 参数类型不支持: {type(deck)}") + + if self.deck is None: + raise ValueError("Deck 配置不能为空") + + # 统计仓库信息 + warehouse_count = 0 + if hasattr(self.deck, 'children'): + warehouse_count = len(self.deck.children) + logger.info(f"Deck 初始化完成,加载 {warehouse_count} 个资源") + + # OPC UA 客户端初始化 client = Client(url) if username and password: @@ -1142,145 +1225,392 @@ def __init__(self, url: str, config_path: str = None, username: str = None, pass client.set_password(password) self._set_client(client) - self._connect() + + # 订阅相关属性 + self._use_subscription = use_subscription + self._subscription = None + self._subscription_handles = {} + self._subscription_interval = subscription_interval + + # 缓存相关属性 + self._node_values = {} # 修改为支持时间戳的缓存结构 + self._cache_timeout = cache_timeout - # 节点值缓存和刷新相关属性 - self._node_values = {} # 缓存节点值 - self._refresh_interval = refresh_interval # 刷新间隔(秒) - self._refresh_running = False - self._refresh_thread = None + # 连接状态监控 + self._connection_check_interval = 30.0 # 连接检查间隔(秒) + self._connection_monitor_running = False + self._connection_monitor_thread = None + + # 添加线程锁,保护OPC UA客户端的并发访问 + import threading + self._client_lock = threading.RLock() + + # 连接到服务器 + self._connect() # 如果提供了配置文件路径,则加载配置并注册工作流 if config_path: self.load_config(config_path) - # 启动节点值刷新线程 - self.start_node_refresh() + # 启动连接监控 + self._start_connection_monitor() - def _register_nodes_as_attributes(self): - """将所有节点注册为实例属性,可以通过self.node_name访问""" - for node_name, node in self._node_registry.items(): - # 检查是否有对应的英文名称 - eng_name = self._reverse_mapping.get(node_name) - if eng_name: - # 如果有对应的英文名称,使用英文名称作为属性名 - attr_name = eng_name - else: - # 如果没有对应的英文名称,使用原始名称,但替换空格和特殊字符 - attr_name = node_name.replace(' ', '_').replace('-', '_') - - # 创建获取节点值的属性方法,使用中文名称获取节点值 - def create_property_getter(node_key): - def getter(self): - # 优先从缓存获取值 - if node_key in self._node_values: - return self._node_values[node_key] - # 缓存中没有则直接读取 - value, _ = self.use_node(node_key).read() - return value - return getter - - # 使用property装饰器将方法注册为类属性 - setattr(OpcUaClient, attr_name, property(create_property_getter(node_name))) - logger.info(f"已注册节点 '{node_name}' 为属性 '{attr_name}'") + + def _connect(self) -> None: + """连接到OPC UA服务器""" + logger.info('尝试连接到 OPC UA 服务器...') + if self.client: + try: + self.client.connect() + logger.info('✓ 客户端已连接!') + + # 连接后开始查找节点 + if self._variables_to_find: + self._find_nodes() + + # 如果启用订阅模式,设置订阅 + if self._use_subscription: + self._setup_subscriptions() + else: + logger.info("订阅模式已禁用,将使用按需读取模式") + + except Exception as e: + logger.error(f'客户端连接失败: {e}') + raise + else: + raise ValueError('客户端未初始化') - def refresh_node_values(self): - """刷新所有节点的值到缓存""" - if not self.client: - logger.warning("客户端未初始化,无法刷新节点值") - return - - try: - # 简单检查连接状态,如果不连接会抛出异常 - self.client.get_namespace_array() - except Exception as e: - logger.warning(f"客户端连接异常,无法刷新节点值: {e}") + class SubscriptionHandler: + """freeopcua订阅处理器:必须实现 datachange_notification 方法""" + def __init__(self, outer): + self.outer = outer + + def datachange_notification(self, node, val, data): + # 委托给外层类的处理函数 + try: + self.outer._on_subscription_datachange(node, val, data) + except Exception as e: + logger.error(f"订阅数据回调处理失败: {e}") + + # 可选:事件通知占位,避免库调用时报缺失 + def event_notification(self, event): + pass + + def _setup_subscriptions(self): + """设置 OPC UA 订阅""" + if not self.client or not self._use_subscription: return - for node_name, node in self._node_registry.items(): + with self._client_lock: try: - if hasattr(node, 'read'): - value, error = node.read() - if not error: - self._node_values[node_name] = value - #logger.debug(f"已刷新节点 '{node_name}' 的值: {value}") - except Exception as e: - logger.error(f"刷新节点 '{node_name}' 失败: {e}") + logger.info(f"开始设置订阅 (发布间隔: {self._subscription_interval}ms)...") - def get_node_value(self, name): - """获取节点值,支持中文名和英文名""" - # 如果提供的是英文名,转换为中文名 + # 创建订阅 + handler = OpcUaClient.SubscriptionHandler(self) + self._subscription = self.client.create_subscription( + self._subscription_interval, + handler + ) + + # 为所有变量节点创建监控项 + subscribed_count = 0 + skipped_count = 0 + + for node_name, node in self._node_registry.items(): + # 只为变量节点创建订阅 + if node.type == NodeType.VARIABLE and node.node_id: + try: + # 优先使用在查找阶段缓存的真实 ua.Node 对象 + ua_node = self._found_node_objects.get(node_name) + if ua_node is None: + ua_node = self.client.get_node(node.node_id) + handle = self._subscription.subscribe_data_change(ua_node) + self._subscription_handles[node_name] = handle + subscribed_count += 1 + logger.debug(f"✓ 已订阅节点: {node_name}") + except Exception as e: + skipped_count += 1 + logger.warning(f"✗ 订阅节点 {node_name} 失败: {e}") + else: + skipped_count += 1 + + logger.info(f"订阅设置完成: 成功 {subscribed_count} 个, 跳过 {skipped_count} 个") + + except Exception as e: + logger.error(f"设置订阅失败: {e}") + traceback.print_exc() + # 订阅失败时回退到按需读取模式 + self._use_subscription = False + logger.warning("订阅模式设置失败,已自动切换到按需读取模式") + + def _on_subscription_datachange(self, node, val, data): + """订阅数据变化处理器(供内部 SubscriptionHandler 调用)""" + try: + node_id = str(node.nodeid) + current_time = time.time() + # 查找对应的节点名称 + for node_name, node_obj in self._node_registry.items(): + if node_obj.node_id == node_id: + self._node_values[node_name] = { + 'value': val, + 'timestamp': current_time, + 'source': 'subscription' + } + logger.debug(f"订阅更新: {node_name} = {val}") + break + except Exception as e: + logger.error(f"处理订阅数据失败: {e}") + + def get_node_value(self, name, use_cache=True, force_read=False): + """ + 获取节点值(智能缓存版本) + + 参数: + name: 节点名称(支持中文名或英文名) + use_cache: 是否使用缓存 + force_read: 是否强制从服务器读取(忽略缓存) + """ + # 处理名称映射 if name in self._name_mapping: chinese_name = self._name_mapping[name] - # 优先从缓存获取值 - if chinese_name in self._node_values: - return self._node_values[chinese_name] - # 缓存中没有则直接读取 - value, _ = self.use_node(chinese_name).read() - return value - # 如果提供的是中文名,直接使用 elif name in self._node_registry: - # 优先从缓存获取值 - if name in self._node_values: - return self._node_values[name] - # 缓存中没有则直接读取 - value, _ = self.use_node(name).read() - return value + chinese_name = name else: raise ValueError(f"未找到名称为 '{name}' 的节点") + + # 如果强制读取,直接从服务器读取 + if force_read: + with self._client_lock: + value, _ = self.use_node(chinese_name).read() + # 更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'forced_read' + } + return value + + # 检查缓存 + if use_cache and chinese_name in self._node_values: + cache_entry = self._node_values[chinese_name] + cache_age = time.time() - cache_entry['timestamp'] + + # 如果是订阅模式,缓存永久有效(由订阅更新) + # 如果是按需读取模式,检查缓存超时 + if cache_entry.get('source') == 'subscription' or cache_age < self._cache_timeout: + logger.debug(f"从缓存读取: {chinese_name} = {cache_entry['value']} (age: {cache_age:.2f}s, source: {cache_entry.get('source', 'unknown')})") + return cache_entry['value'] + + # 缓存过期或不存在,从服务器读取 + with self._client_lock: + try: + value, error = self.use_node(chinese_name).read() + if not error: + # 更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'on_demand_read' + } + return value + else: + logger.warning(f"读取节点 {chinese_name} 失败") + return None + except Exception as e: + logger.error(f"读取节点 {chinese_name} 出错: {e}") + return None def set_node_value(self, name, value): - """设置节点值,支持中文名和英文名""" - # 如果提供的是英文名,转换为中文名 + """ + 设置节点值 + 写入成功后会立即更新本地缓存 + """ + # 处理名称映射 if name in self._name_mapping: chinese_name = self._name_mapping[name] - node = self.use_node(chinese_name) - # 如果提供的是中文名,直接使用 elif name in self._node_registry: - node = self.use_node(name) + chinese_name = name else: raise ValueError(f"未找到名称为 '{name}' 的节点") - # 写入值 - error = node.write(value) - if not error: - # 更新缓存 - if hasattr(node, 'name'): - self._node_values[node.name] = value - return True + with self._client_lock: + try: + node = self.use_node(chinese_name) + error = node.write(value) + + if not error: + # 写入成功,立即更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'write' + } + logger.debug(f"写入成功: {chinese_name} = {value}") + return True + else: + logger.warning(f"写入节点 {chinese_name} 失败") + return False + except Exception as e: + logger.error(f"写入节点 {chinese_name} 出错: {e}") + return False + + def _check_connection(self) -> bool: + """检查连接状态""" + try: + with self._client_lock: + if self.client: + # 尝试获取命名空间数组来验证连接 + self.client.get_namespace_array() + return True + except Exception as e: + logger.warning(f"连接检查失败: {e}") + return False return False - def _refresh_worker(self): - """节点值刷新线程的工作函数""" - self._refresh_running = True - logger.info(f"节点值刷新线程已启动,刷新间隔: {self._refresh_interval}秒") + def _connection_monitor_worker(self): + """连接监控线程工作函数""" + self._connection_monitor_running = True + logger.info(f"连接监控线程已启动 (检查间隔: {self._connection_check_interval}秒)") + + reconnect_attempts = 0 + max_reconnect_attempts = 5 - while self._refresh_running: + while self._connection_monitor_running: try: - self.refresh_node_values() + # 检查连接状态 + if not self._check_connection(): + logger.warning("检测到连接断开,尝试重新连接...") + reconnect_attempts += 1 + + if reconnect_attempts <= max_reconnect_attempts: + try: + # 尝试重新连接 + with self._client_lock: + if self.client: + try: + self.client.disconnect() + except: + pass + + self.client.connect() + logger.info("✓ 重新连接成功") + + # 重新设置订阅 + if self._use_subscription: + self._setup_subscriptions() + + reconnect_attempts = 0 + except Exception as e: + logger.error(f"重新连接失败 (尝试 {reconnect_attempts}/{max_reconnect_attempts}): {e}") + time.sleep(5) # 重连失败后等待5秒 + else: + logger.error(f"达到最大重连次数 ({max_reconnect_attempts}),停止重连") + self._connection_monitor_running = False + else: + # 连接正常,重置重连计数 + reconnect_attempts = 0 + except Exception as e: - logger.error(f"节点值刷新过程出错: {e}") + logger.error(f"连接监控出错: {e}") - # 等待下一次刷新 - time.sleep(self._refresh_interval) + # 等待下次检查 + time.sleep(self._connection_check_interval) - def start_node_refresh(self): - """启动节点值刷新线程""" - if self._refresh_thread is not None and self._refresh_thread.is_alive(): - logger.warning("节点值刷新线程已在运行") + def _start_connection_monitor(self): + """启动连接监控线程""" + if self._connection_monitor_thread is not None and self._connection_monitor_thread.is_alive(): + logger.warning("连接监控线程已在运行") return import threading - self._refresh_thread = threading.Thread(target=self._refresh_worker, daemon=True) - self._refresh_thread.start() + self._connection_monitor_thread = threading.Thread( + target=self._connection_monitor_worker, + daemon=True, + name="OpcUaConnectionMonitor" + ) + self._connection_monitor_thread.start() + + def _stop_connection_monitor(self): + """停止连接监控线程""" + self._connection_monitor_running = False + if self._connection_monitor_thread and self._connection_monitor_thread.is_alive(): + self._connection_monitor_thread.join(timeout=2.0) + logger.info("连接监控线程已停止") + + def read_node(self, node_name: str) -> str: + """ + 读取节点值的便捷方法(使用缓存) + 返回JSON格式字符串 + """ + try: + # 使用get_node_value方法,自动处理缓存 + value = self.get_node_value(node_name, use_cache=True) + + # 获取缓存信息 + chinese_name = self._name_mapping.get(node_name, node_name) + cache_info = self._node_values.get(chinese_name, {}) + + result = { + "value": value, + "error": False, + "node_name": node_name, + "timestamp": time.time(), + "cache_age": time.time() - cache_info.get('timestamp', time.time()), + "source": cache_info.get('source', 'unknown') + } + + return json.dumps(result) + except Exception as e: + logger.error(f"读取节点 {node_name} 失败: {e}") + result = { + "value": None, + "error": True, + "node_name": node_name, + "error_message": str(e), + "timestamp": time.time() + } + return json.dumps(result) + + def get_cache_stats(self) -> Dict[str, Any]: + """获取缓存统计信息""" + current_time = time.time() + stats = { + 'total_cached_nodes': len(self._node_values), + 'subscription_nodes': 0, + 'on_demand_nodes': 0, + 'expired_nodes': 0, + 'cache_timeout': self._cache_timeout, + 'using_subscription': self._use_subscription + } - def stop_node_refresh(self): - """停止节点值刷新线程""" - self._refresh_running = False - if self._refresh_thread and self._refresh_thread.is_alive(): - self._refresh_thread.join(timeout=2.0) - logger.info("节点值刷新线程已停止") + for node_name, cache_entry in self._node_values.items(): + source = cache_entry.get('source', 'unknown') + cache_age = current_time - cache_entry['timestamp'] + + if source == 'subscription': + stats['subscription_nodes'] += 1 + elif source in ['on_demand_read', 'forced_read', 'write']: + stats['on_demand_nodes'] += 1 + + if cache_age > self._cache_timeout: + stats['expired_nodes'] += 1 + return stats + + def print_cache_stats(self): + """打印缓存统计信息""" + stats = self.get_cache_stats() + print("\n" + "="*80) + print("缓存统计信息") + print("="*80) + print(f"总缓存节点数: {stats['total_cached_nodes']}") + print(f"订阅模式: {'启用' if stats['using_subscription'] else '禁用'}") + print(f" - 订阅更新节点: {stats['subscription_nodes']}") + print(f" - 按需读取节点: {stats['on_demand_nodes']}") + print(f" - 已过期节点: {stats['expired_nodes']}") + print(f"缓存超时时间: {stats['cache_timeout']}秒") + print("="*80 + "\n") + def load_config(self, config_path: str) -> None: """从JSON配置文件加载并注册工作流""" try: @@ -1289,29 +1619,39 @@ def load_config(self, config_path: str) -> None: # 处理节点注册 if "register_node_list_from_csv_path" in config_data: - # 获取配置文件所在目录 config_dir = os.path.dirname(os.path.abspath(config_path)) - # 处理CSV路径,如果是相对路径,则相对于配置文件所在目录 if "path" in config_data["register_node_list_from_csv_path"]: csv_path = config_data["register_node_list_from_csv_path"]["path"] if not os.path.isabs(csv_path): - # 转换为绝对路径 csv_path = os.path.join(config_dir, csv_path) config_data["register_node_list_from_csv_path"]["path"] = csv_path - # 直接使用字典 self.register_node_list_from_csv_path(**config_data["register_node_list_from_csv_path"]) + + if self.client and self._variables_to_find: + logger.info("CSV加载完成,开始查找服务器节点...") + self._find_nodes() # 处理工作流创建 if "create_flow" in config_data: - # 直接传递字典列表 self.create_workflow_from_json(config_data["create_flow"]) - # 将工作流注册为实例方法 self.register_workflows_as_methods() # 将所有节点注册为属性 self._register_nodes_as_attributes() + + # 打印统计信息 + found_count = len(self._node_registry) + total_count = len(self._variables_to_find) + if found_count < total_count: + logger.warning(f"节点查找完成:找到 {found_count}/{total_count} 个节点") + else: + logger.info(f"✓ 节点查找完成:所有 {found_count} 个节点均已找到") + + # 如果使用订阅模式,重新设置订阅(确保新节点被订阅) + if self._use_subscription and found_count > 0: + self._setup_subscriptions() logger.info(f"成功从 {config_path} 加载配置") except Exception as e: @@ -1319,12 +1659,71 @@ def load_config(self, config_path: str) -> None: traceback.print_exc() def disconnect(self): - # 停止刷新线程 - self.stop_node_refresh() + """断开连接并清理资源""" + logger.info("正在断开连接...") + + # 停止连接监控 + self._stop_connection_monitor() + + # 删除订阅 + if self._subscription: + try: + with self._client_lock: + self._subscription.delete() + logger.info("订阅已删除") + except Exception as e: + logger.warning(f"删除订阅失败: {e}") + # 断开客户端连接 if self.client: - self.client.disconnect() - logger.info("OPC UA client disconnected") + try: + with self._client_lock: + self.client.disconnect() + logger.info("✓ OPC UA 客户端已断开连接") + except Exception as e: + logger.error(f"断开连接失败: {e}") + + def _register_nodes_as_attributes(self): + """将所有节点注册为实例属性""" + for node_name, node in self._node_registry.items(): + if not node.node_id or node.node_id == "": + logger.warning(f"⚠ 节点 '{node_name}' 的 node_id 为空,跳过注册为属性") + continue + + eng_name = self._reverse_mapping.get(node_name) + attr_name = eng_name if eng_name else node_name.replace(' ', '_').replace('-', '_') + + def create_property_getter(node_key): + def getter(self): + return self.get_node_value(node_key, use_cache=True) + return getter + + setattr(OpcUaClient, attr_name, property(create_property_getter(node_name))) + logger.debug(f"已注册节点 '{node_name}' 为属性 '{attr_name}'") + + def post_init(self, ros_node): + """ROS2 节点就绪后的初始化""" + if not (hasattr(self, 'deck') and self.deck): + return + + if not (hasattr(ros_node, 'resource_tracker') and ros_node.resource_tracker): + logger.warning("resource_tracker 不存在,无法注册 deck") + return + + # 1. 本地注册(必需) + ros_node.resource_tracker.add_resource(self.deck) + + # 2. 上传云端 + try: + from unilabos.ros.nodes.base_device_node import ROS2DeviceNode + ROS2DeviceNode.run_async_func( + ros_node.update_resource, + True, + resources=[self.deck] + ) + logger.info("Deck 已上传到云端") + except Exception as e: + logger.error(f"上传失败: {e}") if __name__ == '__main__': @@ -1338,8 +1737,8 @@ def disconnect(self): # 创建OPC UA客户端并加载配置 try: client = OpcUaClient( - url="opc.tcp://localhost:4840/freeopcua/server/", # 替换为实际的OPC UA服务器地址 - config_path=config_path # 传入配置文件路径 + url="opc.tcp://192.168.1.88:4840/freeopcua/server/", # 替换为实际的OPC UA服务器地址 + config_path="D:\\Uni-Lab-OS\\unilabos\\device_comms\\opcua_client\\opcua_huairou.json" # 传入配置文件路径 ) # 列出所有已注册的工作流 @@ -1349,13 +1748,15 @@ def disconnect(self): # 测试trigger_grab_action工作流 - 使用英文参数名 print("\n测试trigger_grab_action工作流 - 使用英文参数名:") - client.trigger_grab_action(reaction_tank_number=2, raw_tank_number=3) + client.trigger_grab_action(reaction_tank_number=2, raw_tank_number=2) + # client.set_node_value("reaction_tank_number", 2) + # 读取节点值 - 使用英文节点名 grab_complete = client.get_node_value("grab_complete") reaction_tank = client.get_node_value("reaction_tank_number") raw_tank = client.get_node_value("raw_tank_number") - + print(f"\n执行后状态检查 (使用英文节点名):") print(f" - 抓取完成状态: {grab_complete}") print(f" - 当前反应罐号码: {reaction_tank}") @@ -1376,5 +1777,5 @@ def disconnect(self): except Exception as e: print(f"错误: {e}") traceback.print_exc() - - + + diff --git a/unilabos/device_comms/opcua_client/node/uniopcua.py b/unilabos/device_comms/opcua_client/node/uniopcua.py index ce16cfc4..d99a5fd7 100644 --- a/unilabos/device_comms/opcua_client/node/uniopcua.py +++ b/unilabos/device_comms/opcua_client/node/uniopcua.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from typing import Tuple, Union, Optional, Any, List -from opcua import Client, Node +from opcua import Client, Node, ua from opcua.ua import NodeId, NodeClass, VariantType @@ -47,23 +47,68 @@ def __init__(self, client: Client, name: str, node_id: str, typ: NodeType, data_ def _get_node(self) -> Node: if self._node is None: try: - # 检查是否是NumericNodeId(ns=X;i=Y)格式 - if "NumericNodeId" in self._node_id: - # 从字符串中提取命名空间和标识符 - import re - match = re.search(r'ns=(\d+);i=(\d+)', self._node_id) - if match: - ns = int(match.group(1)) - identifier = int(match.group(2)) - node_id = NodeId(identifier, ns) - self._node = self._client.get_node(node_id) + # 尝试多种 NodeId 字符串格式解析,兼容不同服务器/库的输出 + # 可能的格式示例: 'ns=2;i=1234', 'ns=2;s=SomeString', + # 'StringNodeId(ns=4;s=OPC|变量名)', 'NumericNodeId(ns=2;i=1234)' 等 + import re + + nid = self._node_id + # 如果已经是 NodeId/Node 对象(库用户可能传入),直接使用 + try: + from opcua.ua import NodeId as UaNodeId + if isinstance(nid, UaNodeId): + self._node = self._client.get_node(nid) + return self._node + except Exception: + # 若导入或类型判断失败,则继续下一步 + pass + + # 直接以字符串形式处理 + if isinstance(nid, str): + nid = nid.strip() + + # 处理包含类名的格式,如 'StringNodeId(ns=4;s=...)' 或 'NumericNodeId(ns=2;i=...)' + # 提取括号内的内容 + match_wrapped = re.match(r'(String|Numeric|Byte|Guid|TwoByteNode|FourByteNode)NodeId\((.*)\)', nid) + if match_wrapped: + # 提取括号内的实际 node_id 字符串 + nid = match_wrapped.group(2).strip() + + # 常见短格式 'ns=2;i=1234' 或 'ns=2;s=SomeString' + if re.match(r'^ns=\d+;[is]=', nid): + self._node = self._client.get_node(nid) else: - raise ValueError(f"无法解析节点ID: {self._node_id}") + # 尝试提取 ns 和 i 或 s + # 对于字符串标识符,可能包含特殊字符,使用非贪婪匹配 + m_num = re.search(r'ns=(\d+);i=(\d+)', nid) + m_str = re.search(r'ns=(\d+);s=(.+?)(?:\)|$)', nid) + if m_num: + ns = int(m_num.group(1)) + identifier = int(m_num.group(2)) + node_id = NodeId(identifier, ns) + self._node = self._client.get_node(node_id) + elif m_str: + ns = int(m_str.group(1)) + identifier = m_str.group(2).strip() + # 对于字符串标识符,直接使用字符串格式 + node_id_str = f"ns={ns};s={identifier}" + self._node = self._client.get_node(node_id_str) + else: + # 回退:尝试直接传入字符串(有些实现接受其它格式) + try: + self._node = self._client.get_node(self._node_id) + except Exception as e: + # 输出更详细的错误信息供调试 + print(f"获取节点失败(尝试直接字符串): {self._node_id}, 错误: {e}") + raise else: - # 直接使用节点ID字符串 + # 非字符串,尝试直接使用 self._node = self._client.get_node(self._node_id) except Exception as e: print(f"获取节点失败: {self._node_id}, 错误: {e}") + # 添加额外提示,帮助定位 BadNodeIdUnknown 问题 + print("提示: 请确认该 node_id 是否来自当前连接的服务器地址空间," \ + "以及 CSV/配置中名称与服务器 BrowseName 是否匹配。") raise return self._node @@ -104,7 +149,56 @@ def read(self) -> Tuple[Any, bool]: def write(self, value: Any) -> bool: try: - self._get_node().set_value(value) + # 如果声明了数据类型,则尝试转换并使用对应的 Variant 写入 + coerced = value + try: + if self._data_type is not None: + # 基于声明的数据类型做简单类型转换 + dt = self._data_type + if dt in (DataType.SBYTE, DataType.BYTE, DataType.INT16, DataType.UINT16, + DataType.INT32, DataType.UINT32, DataType.INT64, DataType.UINT64): + # 数值类型 -> int + if isinstance(value, str): + coerced = int(value) + else: + coerced = int(value) + elif dt in (DataType.FLOAT, DataType.DOUBLE): + if isinstance(value, str): + coerced = float(value) + else: + coerced = float(value) + elif dt == DataType.BOOLEAN: + if isinstance(value, str): + v = value.strip().lower() + if v in ("true", "1", "yes", "on"): + coerced = True + elif v in ("false", "0", "no", "off"): + coerced = False + else: + coerced = bool(value) + else: + coerced = bool(value) + elif dt == DataType.STRING or dt == DataType.BYTESTRING or dt == DataType.DATETIME: + coerced = str(value) + + # 使用 ua.Variant 明确指定 VariantType + try: + variant = ua.Variant(coerced, dt.value) + self._get_node().set_value(variant) + except Exception: + # 回退:有些 set_value 实现接受 (value, variant_type) + try: + self._get_node().set_value(coerced, dt.value) + except Exception: + # 最后回退到直接写入(保持兼容性) + self._get_node().set_value(coerced) + else: + # 未声明数据类型,直接写入 + self._get_node().set_value(value) + except Exception: + # 若在转换或按数据类型写入失败,尝试直接写入原始值并让上层捕获错误 + self._get_node().set_value(value) + return False except Exception as e: print(f"写入变量 {self._name} 失败: {e}") @@ -120,20 +214,50 @@ def __init__(self, client: Client, name: str, node_id: str, parent_node_id: str, def _get_parent_node(self) -> Node: if self._parent_node is None: try: - # 检查是否是NumericNodeId(ns=X;i=Y)格式 - if "NumericNodeId" in self._parent_node_id: - # 从字符串中提取命名空间和标识符 - import re - match = re.search(r'ns=(\d+);i=(\d+)', self._parent_node_id) - if match: - ns = int(match.group(1)) - identifier = int(match.group(2)) - node_id = NodeId(identifier, ns) - self._parent_node = self._client.get_node(node_id) + # 处理父节点ID,使用与_get_node相同的解析逻辑 + import re + + nid = self._parent_node_id + + # 如果已经是 NodeId 对象,直接使用 + try: + from opcua.ua import NodeId as UaNodeId + if isinstance(nid, UaNodeId): + self._parent_node = self._client.get_node(nid) + return self._parent_node + except Exception: + pass + + # 字符串处理 + if isinstance(nid, str): + nid = nid.strip() + + # 处理包含类名的格式 + match_wrapped = re.match(r'(String|Numeric|Byte|Guid|TwoByteNode|FourByteNode)NodeId\((.*)\)', nid) + if match_wrapped: + nid = match_wrapped.group(2).strip() + + # 常见短格式 + if re.match(r'^ns=\d+;[is]=', nid): + self._parent_node = self._client.get_node(nid) else: - raise ValueError(f"无法解析父节点ID: {self._parent_node_id}") + # 提取 ns 和 i 或 s + m_num = re.search(r'ns=(\d+);i=(\d+)', nid) + m_str = re.search(r'ns=(\d+);s=(.+?)(?:\)|$)', nid) + if m_num: + ns = int(m_num.group(1)) + identifier = int(m_num.group(2)) + node_id = NodeId(identifier, ns) + self._parent_node = self._client.get_node(node_id) + elif m_str: + ns = int(m_str.group(1)) + identifier = m_str.group(2).strip() + node_id_str = f"ns={ns};s={identifier}" + self._parent_node = self._client.get_node(node_id_str) + else: + # 回退 + self._parent_node = self._client.get_node(self._parent_node_id) else: - # 直接使用节点ID字符串 self._parent_node = self._client.get_node(self._parent_node_id) except Exception as e: print(f"获取父节点失败: {self._parent_node_id}, 错误: {e}") diff --git a/unilabos/devices/workstation/post_process/__init__.py b/unilabos/devices/workstation/post_process/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/unilabos/devices/workstation/post_process/bottle_carriers.py b/unilabos/devices/workstation/post_process/bottle_carriers.py new file mode 100644 index 00000000..51943c95 --- /dev/null +++ b/unilabos/devices/workstation/post_process/bottle_carriers.py @@ -0,0 +1,93 @@ +from pylabrobot.resources import create_homogeneous_resources, Coordinate, ResourceHolder, create_ordered_items_2d + +from unilabos.resources.itemized_carrier import BottleCarrier +from unilabos.devices.workstation.post_process.bottles import POST_PROCESS_PolymerStation_Reagent_Bottle + +# 命名约定:试剂瓶-Bottle,烧杯-Beaker,烧瓶-Flask,小瓶-Vial + + +# ============================================================================ +# 聚合站(PolymerStation)载体定义(统一入口) +# ============================================================================ + +def POST_PROCESS_Raw_1BottleCarrier(name: str) -> BottleCarrier: + """聚合站-单试剂瓶载架 + + 参数: + - name: 载架名称前缀 + """ + + # 载架尺寸 (mm) + carrier_size_x = 127.8 + carrier_size_y = 85.5 + carrier_size_z = 20.0 + + # 烧杯/试剂瓶占位尺寸(使用圆形占位) + beaker_diameter = 60.0 + + # 计算中央位置 + center_x = (carrier_size_x - beaker_diameter) / 2 + center_y = (carrier_size_y - beaker_diameter) / 2 + center_z = 5.0 + + carrier = BottleCarrier( + name=name, + size_x=carrier_size_x, + size_y=carrier_size_y, + size_z=carrier_size_z, + sites=create_homogeneous_resources( + klass=ResourceHolder, + locations=[Coordinate(center_x, center_y, center_z)], + resource_size_x=beaker_diameter, + resource_size_y=beaker_diameter, + name_prefix=name, + ), + model="POST_PROCESS_Raw_1BottleCarrier", + ) + carrier.num_items_x = 1 + carrier.num_items_y = 1 + carrier.num_items_z = 1 + # 统一后缀采用 "flask_1" 命名(可按需调整) + carrier[0] = POST_PROCESS_PolymerStation_Reagent_Bottle(f"{name}_flask_1") + return carrier + +def POST_PROCESS_Reaction_1BottleCarrier(name: str) -> BottleCarrier: + """聚合站-单试剂瓶载架 + + 参数: + - name: 载架名称前缀 + """ + + # 载架尺寸 (mm) + carrier_size_x = 127.8 + carrier_size_y = 85.5 + carrier_size_z = 20.0 + + # 烧杯/试剂瓶占位尺寸(使用圆形占位) + beaker_diameter = 60.0 + + # 计算中央位置 + center_x = (carrier_size_x - beaker_diameter) / 2 + center_y = (carrier_size_y - beaker_diameter) / 2 + center_z = 5.0 + + carrier = BottleCarrier( + name=name, + size_x=carrier_size_x, + size_y=carrier_size_y, + size_z=carrier_size_z, + sites=create_homogeneous_resources( + klass=ResourceHolder, + locations=[Coordinate(center_x, center_y, center_z)], + resource_size_x=beaker_diameter, + resource_size_y=beaker_diameter, + name_prefix=name, + ), + model="POST_PROCESS_Reaction_1BottleCarrier", + ) + carrier.num_items_x = 1 + carrier.num_items_y = 1 + carrier.num_items_z = 1 + # 统一后缀采用 "flask_1" 命名(可按需调整) + carrier[0] = POST_PROCESS_PolymerStation_Reagent_Bottle(f"{name}_flask_1") + return carrier diff --git a/unilabos/devices/workstation/post_process/bottles.py b/unilabos/devices/workstation/post_process/bottles.py new file mode 100644 index 00000000..6eccdec0 --- /dev/null +++ b/unilabos/devices/workstation/post_process/bottles.py @@ -0,0 +1,20 @@ +from unilabos.resources.itemized_carrier import Bottle + + +def POST_PROCESS_PolymerStation_Reagent_Bottle( + name: str, + diameter: float = 70.0, + height: float = 120.0, + max_volume: float = 500000.0, # 500mL + barcode: str = None, +) -> Bottle: + """创建试剂瓶""" + return Bottle( + name=name, + diameter=diameter, + height=height, + max_volume=max_volume, + barcode=barcode, + model="POST_PROCESS_PolymerStation_Reagent_Bottle", + ) + diff --git a/unilabos/devices/workstation/post_process/decks.py b/unilabos/devices/workstation/post_process/decks.py new file mode 100644 index 00000000..4f7292cb --- /dev/null +++ b/unilabos/devices/workstation/post_process/decks.py @@ -0,0 +1,46 @@ +from os import name +from pylabrobot.resources import Deck, Coordinate, Rotation + +from unilabos.devices.workstation.post_process.warehouses import ( + post_process_warehouse_4x3x1, + post_process_warehouse_4x3x1_2, +) + + + +class post_process_deck(Deck): + def __init__( + self, + name: str = "post_process_deck", + size_x: float = 2000.0, + size_y: float = 1000.0, + size_z: float = 2670.0, + category: str = "deck", + setup: bool = True, + ) -> None: + super().__init__(name=name, size_x=1700.0, size_y=1350.0, size_z=2670.0) + if setup: + self.setup() + + def setup(self) -> None: + # 添加仓库 + self.warehouses = { + "原料罐堆栈": post_process_warehouse_4x3x1("原料罐堆栈"), + "反应罐堆栈": post_process_warehouse_4x3x1_2("反应罐堆栈"), + + } + # warehouse 的位置 + self.warehouse_locations = { + "原料罐堆栈": Coordinate(350.0, 55.0, 0.0), + "反应罐堆栈": Coordinate(1000.0, 55.0, 0.0), + + } + + for warehouse_name, warehouse in self.warehouses.items(): + self.assign_child_resource(warehouse, location=self.warehouse_locations[warehouse_name]) + + + + + + diff --git a/unilabos/devices/workstation/post_process/opcua_huairou.json b/unilabos/devices/workstation/post_process/opcua_huairou.json new file mode 100644 index 00000000..ac6a6378 --- /dev/null +++ b/unilabos/devices/workstation/post_process/opcua_huairou.json @@ -0,0 +1,157 @@ +{ + "register_node_list_from_csv_path": { + "path": "opcua_nodes_huairou.csv" + }, + "create_flow": [ + { + "name": "trigger_grab_action", + "description": "触发反应罐及原料罐抓取动作", + "parameters": ["reaction_tank_number", "raw_tank_number"], + "action": [ + { + "init_function": { + "func_name": "init_grab_params", + "write_nodes": ["reaction_tank_number", "raw_tank_number"] + }, + "start_function": { + "func_name": "start_grab", + "write_nodes": {"grab_trigger": true}, + "condition_nodes": ["grab_complete"], + "stop_condition_expression": "grab_complete == True", + "timeout_seconds": 999999.0 + }, + "stop_function": { + "func_name": "stop_grab", + "write_nodes": {"grab_trigger": false} + } + } + ] + }, + { + "name": "trigger_post_processing", + "description": "触发后处理动作", + "parameters": ["atomization_fast_speed", "wash_slow_speed","injection_pump_suction_speed", + "injection_pump_push_speed","raw_liquid_suction_count","first_wash_water_amount", + "second_wash_water_amount","first_powder_mixing_time","second_powder_mixing_time", + "first_powder_wash_count","second_powder_wash_count","initial_water_amount", + "pre_filtration_mixing_time","atomization_pressure_kpa"], + "action": [ + { + "init_function": { + "func_name": "init_post_processing_params", + "write_nodes": ["atomization_fast_speed", "wash_slow_speed","injection_pump_suction_speed", + "injection_pump_push_speed","raw_liquid_suction_count","first_wash_water_amount", + "second_wash_water_amount","first_powder_mixing_time","second_powder_mixing_time", + "first_powder_wash_count","second_powder_wash_count","initial_water_amount", + "pre_filtration_mixing_time","atomization_pressure_kpa"] + }, + "start_function": { + "func_name": "start_post_processing", + "write_nodes": {"post_process_trigger": true}, + "condition_nodes": ["post_process_complete"], + "stop_condition_expression": "post_process_complete == True", + "timeout_seconds": 999999.0 + }, + "stop_function": { + "func_name": "stop_post_processing", + "write_nodes": {"post_process_trigger": false} + } + } + ] + }, + { + "name": "trigger_cleaning_action", + "description": "触发清洗及管路吹气动作", + "parameters": ["nmp_outer_wall_cleaning_injection", "nmp_outer_wall_cleaning_count","nmp_outer_wall_cleaning_wait_time", + "nmp_outer_wall_cleaning_waste_time","nmp_inner_wall_cleaning_injection","nmp_inner_wall_cleaning_count", + "nmp_pump_cleaning_suction_count", + "nmp_inner_wall_cleaning_waste_time", + "nmp_stirrer_cleaning_injection", + "nmp_stirrer_cleaning_count", + "nmp_stirrer_cleaning_wait_time", + "nmp_stirrer_cleaning_waste_time", + "water_outer_wall_cleaning_injection", + "water_outer_wall_cleaning_count", + "water_outer_wall_cleaning_wait_time", + "water_outer_wall_cleaning_waste_time", + "water_inner_wall_cleaning_injection", + "water_inner_wall_cleaning_count", + "water_pump_cleaning_suction_count", + "water_inner_wall_cleaning_waste_time", + "water_stirrer_cleaning_injection", + "water_stirrer_cleaning_count", + "water_stirrer_cleaning_wait_time", + "water_stirrer_cleaning_waste_time", + "acetone_outer_wall_cleaning_injection", + "acetone_outer_wall_cleaning_count", + "acetone_outer_wall_cleaning_wait_time", + "acetone_outer_wall_cleaning_waste_time", + "acetone_inner_wall_cleaning_injection", + "acetone_inner_wall_cleaning_count", + "acetone_pump_cleaning_suction_count", + "acetone_inner_wall_cleaning_waste_time", + "acetone_stirrer_cleaning_injection", + "acetone_stirrer_cleaning_count", + "acetone_stirrer_cleaning_wait_time", + "acetone_stirrer_cleaning_waste_time", + "pipe_blowing_time", + "injection_pump_forward_empty_suction_count", + "injection_pump_reverse_empty_suction_count", + "filtration_liquid_selection"], + "action": [ + { + "init_function": { + "func_name": "init_cleaning_params", + "write_nodes": ["nmp_outer_wall_cleaning_injection", "nmp_outer_wall_cleaning_count","nmp_outer_wall_cleaning_wait_time", + "nmp_outer_wall_cleaning_waste_time","nmp_inner_wall_cleaning_injection","nmp_inner_wall_cleaning_count", + "nmp_pump_cleaning_suction_count", + "nmp_inner_wall_cleaning_waste_time", + "nmp_stirrer_cleaning_injection", + "nmp_stirrer_cleaning_count", + "nmp_stirrer_cleaning_wait_time", + "nmp_stirrer_cleaning_waste_time", + "water_outer_wall_cleaning_injection", + "water_outer_wall_cleaning_count", + "water_outer_wall_cleaning_wait_time", + "water_outer_wall_cleaning_waste_time", + "water_inner_wall_cleaning_injection", + "water_inner_wall_cleaning_count", + "water_pump_cleaning_suction_count", + "water_inner_wall_cleaning_waste_time", + "water_stirrer_cleaning_injection", + "water_stirrer_cleaning_count", + "water_stirrer_cleaning_wait_time", + "water_stirrer_cleaning_waste_time", + "acetone_outer_wall_cleaning_injection", + "acetone_outer_wall_cleaning_count", + "acetone_outer_wall_cleaning_wait_time", + "acetone_outer_wall_cleaning_waste_time", + "acetone_inner_wall_cleaning_injection", + "acetone_inner_wall_cleaning_count", + "acetone_pump_cleaning_suction_count", + "acetone_inner_wall_cleaning_waste_time", + "acetone_stirrer_cleaning_injection", + "acetone_stirrer_cleaning_count", + "acetone_stirrer_cleaning_wait_time", + "acetone_stirrer_cleaning_waste_time", + "pipe_blowing_time", + "injection_pump_forward_empty_suction_count", + "injection_pump_reverse_empty_suction_count", + "filtration_liquid_selection"] + }, + "start_function": { + "func_name": "start_cleaning", + "write_nodes": {"cleaning_and_pipe_blowing_trigger": true}, + "condition_nodes": ["cleaning_complete"], + "stop_condition_expression": "cleaning_complete == True", + "timeout_seconds": 999999.0 + }, + "stop_function": { + "func_name": "stop_cleaning", + "write_nodes": {"cleaning_and_pipe_blowing_trigger": false} + } + } + ] + } + ] +} diff --git a/unilabos/devices/workstation/post_process/opcua_nodes_huairou.csv b/unilabos/devices/workstation/post_process/opcua_nodes_huairou.csv new file mode 100644 index 00000000..454de968 --- /dev/null +++ b/unilabos/devices/workstation/post_process/opcua_nodes_huairou.csv @@ -0,0 +1,70 @@ +Name,EnglishName,NodeType,DataType,NodeLanguage,NodeId +原料罐号码,raw_tank_number,VARIABLE,INT16,Chinese,ns=4;s=OPC|原料罐号码 +反应罐号码,reaction_tank_number,VARIABLE,INT16,Chinese,ns=4;s=OPC|反应罐号码 +反应罐及原料罐抓取触发,grab_trigger,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|反应罐及原料罐抓取触发 +后处理动作触发,post_process_trigger,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|后处理动作触发 +搅拌桨雾化快速,atomization_fast_speed,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|搅拌桨雾化快速 +搅拌桨洗涤慢速,wash_slow_speed,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|搅拌桨洗涤慢速 +注射泵抽液速度,injection_pump_suction_speed,VARIABLE,INT16,Chinese,ns=4;s=OPC|注射泵抽液速度 +注射泵推液速度,injection_pump_push_speed,VARIABLE,INT16,Chinese,ns=4;s=OPC|注射泵推液速度 +抽原液次数,raw_liquid_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|抽原液次数 +第1次洗涤加水量,first_wash_water_amount,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|第1次洗涤加水量 +第2次洗涤加水量,second_wash_water_amount,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|第2次洗涤加水量 +第1次粉末搅拌时间,first_powder_mixing_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|第1次粉末搅拌时间 +第2次粉末搅拌时间,second_powder_mixing_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|第2次粉末搅拌时间 +第1次粉末洗涤次数,first_powder_wash_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|第1次粉末洗涤次数 +第2次粉末洗涤次数,second_powder_wash_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|第2次粉末洗涤次数 +最开始加水量,initial_water_amount,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|最开始加水量 +抽滤前搅拌时间,pre_filtration_mixing_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|抽滤前搅拌时间 +雾化压力Kpa,atomization_pressure_kpa,VARIABLE,INT16,Chinese,ns=4;s=OPC|雾化压力Kpa +清洗及管路吹气触发,cleaning_and_pipe_blowing_trigger,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|清洗及管路吹气触发 +废液桶满报警,waste_tank_full_alarm,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|废液桶满报警 +清水桶空报警,water_tank_empty_alarm,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|清水桶空报警 +NMP桶空报警,nmp_tank_empty_alarm,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|NMP桶空报警 +丙酮桶空报警,acetone_tank_empty_alarm,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|丙酮桶空报警 +门开报警,door_open_alarm,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|门开报警 +反应罐及原料罐抓取完成PLCtoPC,grab_complete,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|反应罐及原料罐抓取完成PLCtoPC +后处理动作完成PLCtoPC,post_process_complete,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|后处理动作完成PLCtoPC +清洗及管路吹气完成PLCtoPC,cleaning_complete,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|清洗及管路吹气完成PLCtoPC +远程模式PLCtoPC,remote_mode,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|远程模式PLCtoPC +设备准备就绪PLCtoPC,device_ready,VARIABLE,BOOLEAN,Chinese,ns=4;s=OPC|设备准备就绪PLCtoPC +NMP外壁清洗加注,nmp_outer_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|NMP外壁清洗加注 +NMP外壁清洗次数,nmp_outer_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|NMP外壁清洗次数 +NMP外壁清洗等待时间,nmp_outer_wall_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|NMP外壁清洗等待时间 +NMP外壁清洗抽废时间,nmp_outer_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|NMP外壁清洗抽废时间 +NMP内壁清洗加注,nmp_inner_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|NMP内壁清洗加注 +NMP内壁清洗次数,nmp_inner_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|NMP内壁清洗次数 +NMP泵清洗抽次数,nmp_pump_cleaning_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|NMP泵清洗抽次数 +NMP内壁清洗抽废时间,nmp_inner_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|NMP内壁清洗抽废时间 +NMP搅拌桨清洗加注,nmp_stirrer_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|NMP搅拌桨清洗加注 +NMP搅拌桨清洗次数,nmp_stirrer_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|NMP搅拌桨清洗次数 +NMP搅拌桨清洗等待时间,nmp_stirrer_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|NMP搅拌桨清洗等待时间 +NMP搅拌桨清洗抽废时间,nmp_stirrer_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|NMP搅拌桨清洗抽废时间 +清水外壁清洗加注,water_outer_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|清水外壁清洗加注 +清水外壁清洗次数,water_outer_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|清水外壁清洗次数 +清水外壁清洗等待时间,water_outer_wall_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|清水外壁清洗等待时间 +清水外壁清洗抽废时间,water_outer_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|清水外壁清洗抽废时间 +清水内壁清洗加注,water_inner_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|清水内壁清洗加注 +清水内壁清洗次数,water_inner_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|清水内壁清洗次数 +清水泵清洗抽次数,water_pump_cleaning_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|清水泵清洗抽次数 +清水内壁清洗抽废时间,water_inner_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|清水内壁清洗抽废时间 +清水搅拌桨清洗加注,water_stirrer_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|清水搅拌桨清洗加注 +清水搅拌桨清洗次数,water_stirrer_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|清水搅拌桨清洗次数 +清水搅拌桨清洗等待时间,water_stirrer_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|清水搅拌桨清洗等待时间 +清水搅拌桨清洗抽废时间,water_stirrer_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|清水搅拌桨清洗抽废时间 +丙酮外壁清洗加注,acetone_outer_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|丙酮外壁清洗加注 +丙酮外壁清洗次数,acetone_outer_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|丙酮外壁清洗次数 +丙酮外壁清洗等待时间,acetone_outer_wall_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|丙酮外壁清洗等待时间 +丙酮外壁清洗抽废时间,acetone_outer_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|丙酮外壁清洗抽废时间 +丙酮内壁清洗加注,acetone_inner_wall_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|丙酮内壁清洗加注 +丙酮内壁清洗次数,acetone_inner_wall_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|丙酮内壁清洗次数 +丙酮泵清洗抽次数,acetone_pump_cleaning_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|丙酮泵清洗抽次数 +丙酮内壁清洗抽废时间,acetone_inner_wall_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|丙酮内壁清洗抽废时间 +丙酮搅拌桨清洗加注,acetone_stirrer_cleaning_injection,VARIABLE,FLOAT,Chinese,ns=4;s=OPC|丙酮搅拌桨清洗加注 +丙酮搅拌桨清洗次数,acetone_stirrer_cleaning_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|丙酮搅拌桨清洗次数 +丙酮搅拌桨清洗等待时间,acetone_stirrer_cleaning_wait_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|丙酮搅拌桨清洗等待时间 +丙酮搅拌桨清洗抽废时间,acetone_stirrer_cleaning_waste_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|丙酮搅拌桨清洗抽废时间 +管道吹气时间,pipe_blowing_time,VARIABLE,INT32,Chinese,ns=4;s=OPC|管道吹气时间 +注射泵正向空抽次数,injection_pump_forward_empty_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|注射泵正向空抽次数 +注射泵反向空抽次数,injection_pump_reverse_empty_suction_count,VARIABLE,INT16,Chinese,ns=4;s=OPC|注射泵反向空抽次数 +抽滤液选择0水1丙酮,filtration_liquid_selection,VARIABLE,INT16,Chinese,ns=4;s=OPC|抽滤液选择0水1丙酮 \ No newline at end of file diff --git a/unilabos/devices/workstation/post_process/post_process.py b/unilabos/devices/workstation/post_process/post_process.py new file mode 100644 index 00000000..b45cded2 --- /dev/null +++ b/unilabos/devices/workstation/post_process/post_process.py @@ -0,0 +1,1781 @@ +import json +import time +import traceback +from typing import Any, Union, List, Dict, Callable, Optional, Tuple +from pydantic import BaseModel + +from opcua import Client, ua +import pandas as pd +import os + +from unilabos.device_comms.opcua_client.node.uniopcua import Base as OpcUaNodeBase +from unilabos.device_comms.opcua_client.node.uniopcua import Variable, Method, NodeType, DataType +from unilabos.device_comms.universal_driver import UniversalDriver +from unilabos.utils.log import logger +from unilabos.devices.workstation.post_process.decks import post_process_deck + +class OpcUaNode(BaseModel): + name: str + node_type: NodeType + node_id: str = "" + data_type: Optional[DataType] = None + parent_node_id: Optional[str] = None + + +class OpcUaWorkflow(BaseModel): + name: str + actions: List[ + Union[ + "OpcUaWorkflow", + Callable[ + [Callable[[str], OpcUaNodeBase]], + None + ]] + ] + + +class Action(BaseModel): + name: str + rw: bool # read是0 write是1 + + +class WorkflowAction(BaseModel): + init: Optional[Callable[[Callable[[str], OpcUaNodeBase]], bool]] = None + start: Optional[Callable[[Callable[[str], OpcUaNodeBase]], bool]] = None + stop: Optional[Callable[[Callable[[str], OpcUaNodeBase]], bool]] = None + cleanup: Optional[Callable[[Callable[[str], OpcUaNodeBase]], None]] = None + + +class OpcUaWorkflowModel(BaseModel): + name: str + actions: List[Union["OpcUaWorkflowModel", WorkflowAction]] + parameters: Optional[List[str]] = None + description: Optional[str] = None + + +""" 前后端Json解析用 """ +class NodeFunctionJson(BaseModel): + func_name: str + node_name: str + mode: str # read, write, call + value: Any = None + + +class InitFunctionJson(NodeFunctionJson): + pass + + +class StartFunctionJson(NodeFunctionJson): + write_functions: List[str] + condition_functions: List[str] + stop_condition_expression: str + + +class StopFunctionJson(NodeFunctionJson): + pass + + +class CleanupFunctionJson(NodeFunctionJson): + pass + + +class ActionJson(BaseModel): + node_function_to_create: List[NodeFunctionJson] + create_init_function: Optional[InitFunctionJson] = None + create_start_function: Optional[StartFunctionJson] = None + create_stop_function: Optional[StopFunctionJson] = None + create_cleanup_function: Optional[CleanupFunctionJson] = None + + +class SimplifiedActionJson(BaseModel): + """简化的动作JSON格式,直接定义节点列表和函数""" + nodes: Optional[Dict[str, Dict[str, Any]]] = None # 节点定义,格式为 {func_name: {node_name, mode, value}} + init_function: Optional[Dict[str, Any]] = None + start_function: Optional[Dict[str, Any]] = None + stop_function: Optional[Dict[str, Any]] = None + cleanup_function: Optional[Dict[str, Any]] = None + + +class WorkflowCreateJson(BaseModel): + name: str + action: List[Union[ActionJson, SimplifiedActionJson, 'WorkflowCreateJson', str]] + parameters: Optional[List[str]] = None + description: Optional[str] = None + + +class ExecuteProcedureJson(BaseModel): + register_node_list_from_csv_path: Optional[Dict[str, Any]] = None + create_flow: List[WorkflowCreateJson] + execute_flow: List[str] + + +class BaseClient(UniversalDriver): + client: Optional[Client] = None + _node_registry: Dict[str, OpcUaNodeBase] = {} + DEFAULT_ADDRESS_PATH = "" + _variables_to_find: Dict[str, Dict[str, Any]] = {} + _name_mapping: Dict[str, str] = {} # 英文名到中文名的映射 + _reverse_mapping: Dict[str, str] = {} # 中文名到英文名的映射 + # 直接缓存已找到的 ua.Node 对象,避免因字符串 NodeId 格式导致订阅失败 + _found_node_objects: Dict[str, Any] = {} + + def __init__(self): + super().__init__() + # 自动查找节点功能默认开启 + self._auto_find_nodes = True + # 初始化名称映射字典 + self._name_mapping = {} + self._reverse_mapping = {} + # 初始化线程锁(在子类中会被重新创建,这里提供默认实现) + import threading + self._client_lock = threading.RLock() + + def _set_client(self, client: Optional[Client]) -> None: + if client is None: + raise ValueError('client is not valid') + self.client = client + + def _connect(self) -> None: + logger.info('try to connect client...') + if self.client: + try: + self.client.connect() + logger.info('client connected!') + + # 连接后开始查找节点 + if self._variables_to_find: + self._find_nodes() + except Exception as e: + logger.error(f'client connect failed: {e}') + raise + else: + raise ValueError('client is not initialized') + + def _find_nodes(self) -> None: + """查找服务器中的节点""" + if not self.client: + raise ValueError('client is not connected') + + logger.info(f'开始查找 {len(self._variables_to_find)} 个节点...') + try: + # 获取根节点 + root = self.client.get_root_node() + objects = root.get_child(["0:Objects"]) + + # 记录查找前的状态 + before_count = len(self._node_registry) + + # 查找节点 + self._find_nodes_recursive(objects) + + # 记录查找后的状态 + after_count = len(self._node_registry) + newly_found = after_count - before_count + + logger.info(f"本次查找新增 {newly_found} 个节点,当前共 {after_count} 个") + + # 检查是否所有节点都已找到 + not_found = [] + for var_name, var_info in self._variables_to_find.items(): + if var_name not in self._node_registry: + not_found.append(var_name) + + if not_found: + logger.warning(f"⚠ 以下 {len(not_found)} 个节点未找到: {', '.join(not_found[:10])}{'...' if len(not_found) > 10 else ''}") + logger.warning(f"提示:请检查这些节点名称是否与服务器的 BrowseName 完全匹配(包括大小写、空格等)") + # 提供一个示例来帮助调试 + if not_found: + logger.info(f"尝试在服务器中查找第一个未找到的节点 '{not_found[0]}' 的相似节点...") + else: + logger.info(f"✓ 所有 {len(self._variables_to_find)} 个节点均已找到并注册") + + except Exception as e: + logger.error(f"查找节点失败: {e}") + traceback.print_exc() + + def _find_nodes_recursive(self, node) -> None: + """递归查找节点""" + try: + # 获取当前节点的浏览名称 + browse_name = node.get_browse_name() + node_name = browse_name.Name + + # 检查是否是我们要找的变量 + if node_name in self._variables_to_find and node_name not in self._node_registry: + var_info = self._variables_to_find[node_name] + node_type = var_info.get("node_type") + data_type = var_info.get("data_type") + node_id_str = str(node.nodeid) + + # 根据节点类型创建相应的对象 + if node_type == NodeType.VARIABLE: + self._node_registry[node_name] = Variable(self.client, node_name, node_id_str, data_type) + logger.info(f"✓ 找到变量节点: '{node_name}', NodeId: {node_id_str}, DataType: {data_type}") + # 缓存真实的 ua.Node 对象用于订阅 + self._found_node_objects[node_name] = node + elif node_type == NodeType.METHOD: + # 对于方法节点,需要获取父节点ID + parent_node = node.get_parent() + parent_node_id = str(parent_node.nodeid) + self._node_registry[node_name] = Method(self.client, node_name, node_id_str, parent_node_id, data_type) + logger.info(f"✓ 找到方法节点: '{node_name}', NodeId: {node_id_str}, ParentId: {parent_node_id}") + + # 递归处理子节点 + for child in node.get_children(): + self._find_nodes_recursive(child) + + except Exception as e: + # 忽略处理单个节点时的错误,继续处理其他节点 + pass + + @classmethod + def load_csv(cls, file_path: str) -> List[OpcUaNode]: + """ + 从CSV文件加载节点定义 + CSV文件需包含Name,NodeType,DataType列 + 可选包含EnglishName和NodeLanguage列 + """ + df = pd.read_csv(file_path) + df = df.drop_duplicates(subset='Name', keep='first') # 重复的数据应该报错 + nodes = [] + + # 检查是否包含英文名称列和节点语言列 + has_english_name = 'EnglishName' in df.columns + has_node_language = 'NodeLanguage' in df.columns + + # 如果存在英文名称列,创建名称映射字典 + name_mapping = {} + reverse_mapping = {} + + for _, row in df.iterrows(): + name = row.get('Name') + node_type_str = row.get('NodeType') + data_type_str = row.get('DataType') + + # 获取英文名称和节点语言(如果有) + english_name = row.get('EnglishName') if has_english_name else None + node_language = row.get('NodeLanguage') if has_node_language else 'English' # 默认为英文 + + # 如果有英文名称,添加到映射字典 + if english_name and not pd.isna(english_name) and node_language == 'Chinese': + name_mapping[english_name] = name + reverse_mapping[name] = english_name + + if not name or not node_type_str: + logger.warning(f"跳过无效行: 名称或节点类型缺失") + continue + + # 只支持VARIABLE和METHOD两种类型 + if node_type_str not in ['VARIABLE', 'METHOD']: + logger.warning(f"不支持的节点类型: {node_type_str},仅支持VARIABLE和METHOD") + continue + + try: + node_type = NodeType[node_type_str] + except KeyError: + logger.warning(f"无效的节点类型: {node_type_str}") + continue + + # 对于VARIABLE节点,必须指定数据类型 + if node_type == NodeType.VARIABLE: + if not data_type_str or pd.isna(data_type_str): + logger.warning(f"变量节点 {name} 必须指定数据类型") + continue + + try: + data_type = DataType[data_type_str] + except KeyError: + logger.warning(f"无效的数据类型: {data_type_str}") + continue + else: + # 对于METHOD节点,数据类型可选 + data_type = None + if data_type_str and not pd.isna(data_type_str): + try: + data_type = DataType[data_type_str] + except KeyError: + logger.warning(f"无效的数据类型: {data_type_str},将使用默认值") + + # 创建节点对象,节点ID留空,将通过自动查找功能获取 + nodes.append(OpcUaNode( + name=name, + node_type=node_type, + data_type=data_type + )) + + # 返回节点列表和名称映射字典 + return nodes, name_mapping, reverse_mapping + + def use_node(self, name: str) -> OpcUaNodeBase: + """ + 获取已注册的节点 + 如果节点尚未找到,会尝试再次查找 + 支持使用英文名称访问中文节点 + """ + # 检查是否使用英文名称访问中文节点 + if name in self._name_mapping: + chinese_name = self._name_mapping[name] + if chinese_name in self._node_registry: + node = self._node_registry[chinese_name] + logger.debug(f"使用节点: '{name}' -> '{chinese_name}', NodeId: {node.node_id}") + return node + elif chinese_name in self._variables_to_find: + logger.warning(f"节点 {chinese_name} (英文名: {name}) 尚未找到,尝试重新查找") + if self.client: + self._find_nodes() + if chinese_name in self._node_registry: + node = self._node_registry[chinese_name] + logger.info(f"重新查找成功: '{chinese_name}', NodeId: {node.node_id}") + return node + raise ValueError(f'节点 {chinese_name} (英文名: {name}) 未注册或未找到') + + # 直接使用原始名称查找 + if name not in self._node_registry: + if name in self._variables_to_find: + logger.warning(f"节点 {name} 尚未找到,尝试重新查找") + if self.client: + self._find_nodes() + if name in self._node_registry: + node = self._node_registry[name] + logger.info(f"重新查找成功: '{name}', NodeId: {node.node_id}") + return node + logger.error(f"❌ 节点 '{name}' 未注册或未找到。已注册节点: {list(self._node_registry.keys())[:5]}...") + raise ValueError(f'节点 {name} 未注册或未找到') + node = self._node_registry[name] + logger.debug(f"使用节点: '{name}', NodeId: {node.node_id}") + return node + + def get_node_registry(self) -> Dict[str, OpcUaNodeBase]: + return self._node_registry + + def register_node_list_from_csv_path(self, path: str = None) -> "BaseClient": + """从CSV文件注册节点""" + if path is None: + path = self.DEFAULT_ADDRESS_PATH + nodes, name_mapping, reverse_mapping = self.load_csv(path) + self._name_mapping.update(name_mapping) + self._reverse_mapping.update(reverse_mapping) + return self.register_node_list(nodes) + + def register_node_list(self, node_list: List[OpcUaNode]) -> "BaseClient": + """注册节点列表""" + if not node_list or len(node_list) == 0: + logger.warning('节点列表为空') + return self + + logger.info(f'开始注册 {len(node_list)} 个节点...') + new_nodes_count = 0 + for node in node_list: + if node is None: + continue + + if node.name in self._node_registry: + logger.debug(f'节点 "{node.name}" 已存在于注册表') + exist = self._node_registry[node.name] + if exist.type != node.node_type: + raise ValueError(f'节点 {node.name} 类型 {node.node_type} 与已存在的类型 {exist.type} 不一致') + continue + + # 将节点添加到待查找列表 + self._variables_to_find[node.name] = { + "node_type": node.node_type, + "data_type": node.data_type + } + new_nodes_count += 1 + logger.debug(f'添加节点 "{node.name}" ({node.node_type}) 到待查找列表') + + logger.info(f'节点注册完成:新增 {new_nodes_count} 个待查找节点,总计 {len(self._variables_to_find)} 个') + + # 如果客户端已连接,立即开始查找 + if self.client: + self._find_nodes() + + return self + + def run_opcua_workflow(self, workflow: OpcUaWorkflow) -> None: + if not self.client: + raise ValueError('client is not connected') + + logger.info(f'start to run workflow {workflow.name}...') + + for action in workflow.actions: + if isinstance(action, OpcUaWorkflow): + self.run_opcua_workflow(action) + elif callable(action): + action(self.use_node) + else: + raise ValueError(f'invalid action {action}') + + def call_lifecycle_fn( + self, + workflow: OpcUaWorkflowModel, + fn: Optional[Callable[[Callable], bool]], + ) -> bool: + if not fn: + raise ValueError('fn is not valid in call_lifecycle_fn') + try: + result = fn(self.use_node) + # 处理函数返回值可能是元组的情况 + if isinstance(result, tuple) and len(result) == 2: + # 第二个元素是错误标志,True表示出错,False表示成功 + value, error_flag = result + return not error_flag # 转换成True表示成功,False表示失败 + return result + except Exception as e: + traceback.print_exc() + logger.error(f'execute {workflow.name} lifecycle failed, err: {e}') + return False + + def run_opcua_workflow_model(self, workflow: OpcUaWorkflowModel) -> bool: + if not self.client: + raise ValueError('client is not connected') + + logger.info(f'start to run workflow {workflow.name}...') + + for action in workflow.actions: + if isinstance(action, OpcUaWorkflowModel): + if self.run_opcua_workflow_model(action): + logger.info(f"{action.name} workflow done.") + continue + else: + logger.error(f"{action.name} workflow failed") + return False + elif isinstance(action, WorkflowAction): + init = action.init + start = action.start + stop = action.stop + cleanup = action.cleanup + if not init and not start and not stop: + raise ValueError(f'invalid action {action}') + + is_err = False + try: + if init and not self.call_lifecycle_fn(workflow, init): + raise ValueError(f"{workflow.name} init action failed") + if not self.call_lifecycle_fn(workflow, start): + raise ValueError(f"{workflow.name} start action failed") + if not self.call_lifecycle_fn(workflow, stop): + raise ValueError(f"{workflow.name} stop action failed") + logger.info(f"{workflow.name} action done.") + except Exception as e: + is_err = True + traceback.print_exc() + logger.error(f"{workflow.name} action failed, err: {e}") + finally: + logger.info(f"{workflow.name} try to run cleanup") + if cleanup: + self.call_lifecycle_fn(workflow, cleanup) + else: + logger.info(f"{workflow.name} cleanup is not defined") + if is_err: + return False + return True + else: + raise ValueError(f'invalid action type {type(action)}') + + return True + + function_name: Dict[str, Callable[[Callable[[str], OpcUaNodeBase]], bool]] = {} + + def create_node_function(self, func_name: str = None, node_name: str = None, mode: str = None, value: Any = None, **kwargs) -> Callable[[Callable[[str], OpcUaNodeBase]], bool]: + def execute_node_function(use_node: Callable[[str], OpcUaNodeBase]) -> Union[bool, Tuple[Any, bool]]: + target_node = use_node(node_name) + + # 检查是否有对应的参数值可用 + current_value = value + if hasattr(self, '_workflow_params') and func_name in self._workflow_params: + current_value = self._workflow_params[func_name] + print(f"使用参数值 {func_name} = {current_value}") + else: + print(f"执行 {node_name}, {type(target_node).__name__}, {target_node.node_id}, {mode}, {current_value}") + + if mode == 'read': + result_str = self.read_node(node_name) + + try: + # 将字符串转换为字典 + result_str = result_str.replace("'", '"') # 替换单引号为双引号以便JSON解析 + result_dict = json.loads(result_str) + + # 从字典获取值和错误标志 + val = result_dict.get("value") + err = result_dict.get("error") + + print(f"读取 {node_name} 返回值 = {val} (类型: {type(val).__name__}, 错误 = {err}") + return val, err + except Exception as e: + print(f"解析读取结果失败: {e}, 原始结果: {result_str}") + return None, True + elif mode == 'write': + # 构造完整的JSON输入,包含node_name和value + input_json = json.dumps({"node_name": node_name, "value": current_value}) + result_str = self.write_node(input_json) + + try: + # 解析返回的字符串为字典 + result_str = result_str.replace("'", '"') # 替换单引号为双引号以便JSON解析 + result = json.loads(result_str) + success = result.get("success", False) + print(f"写入 {node_name} = {current_value}, 结果 = {success}") + return success + except Exception as e: + print(f"解析写入结果失败: {e}, 原始结果: {result_str}") + return False + elif mode == 'call' and hasattr(target_node, 'call'): + args = current_value if isinstance(current_value, list) else [current_value] + result = target_node.call(*args) + print(f"调用方法 {node_name} 参数 = {args}, 返回值 = {result}") + return result + return False + + if func_name is None: + func_name = f"{node_name}_{mode}_{str(value)}" + + print(f"创建 node function: {mode}, {func_name}") + self.function_name[func_name] = execute_node_function + + return execute_node_function + + def create_init_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): + """ + 创建初始化函数 + + 参数: + func_name: 函数名称 + write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} + 值可以是具体值,也可以是参数名称字符串(将从_workflow_params中查找) + """ + if write_nodes is None: + raise ValueError("必须提供write_nodes参数") + + def execute_init_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: + """根据 _workflow_params 为各节点写入真实数值。 + + 约定: + - write_nodes 为 list 时: 节点名 == 参数名,从 _workflow_params[node_name] 取值; + - write_nodes 为 dict 时: + * value 为字符串且在 _workflow_params 中: 当作参数名去取值; + * 否则 value 视为常量直接写入。 + """ + + params = getattr(self, "_workflow_params", {}) or {} + + if isinstance(write_nodes, list): + # 节点列表形式: 节点名与参数名一致 + for node_name in write_nodes: + if node_name not in params: + print(f"初始化函数: 参数中未找到 {node_name}, 跳过写入") + continue + + current_value = params[node_name] + print(f"初始化函数: 写入节点 {node_name} = {current_value}") + input_json = json.dumps({"node_name": node_name, "value": current_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"初始化函数: 写入结果 = {success}") + except Exception as e: + print(f"初始化函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + elif isinstance(write_nodes, dict): + # 映射形式: 节点名 -> 参数名或常量 + for node_name, node_value in write_nodes.items(): + if isinstance(node_value, str) and node_value in params: + current_value = params[node_value] + print(f"初始化函数: 从参数获取值 {node_value} = {current_value}") + else: + current_value = node_value + print(f"初始化函数: 使用常量值 写入 {node_name} = {current_value}") + + print(f"初始化函数: 写入节点 {node_name} = {current_value}") + input_json = json.dumps({"node_name": node_name, "value": current_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"初始化函数: 写入结果 = {success}") + except Exception as e: + print(f"初始化函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + return True + + if func_name is None: + func_name = f"init_function_{str(time.time())}" + + print(f"创建初始化函数: {func_name}") + self.function_name[func_name] = execute_init_function + return execute_init_function + + def create_stop_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): + """ + 创建停止函数 + + 参数: + func_name: 函数名称 + write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} + """ + if write_nodes is None: + raise ValueError("必须提供write_nodes参数") + + def execute_stop_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: + if isinstance(write_nodes, list): + # 处理节点列表,默认值都是False + for node_name in write_nodes: + # 直接写入False + print(f"停止函数: 写入节点 {node_name} = False") + input_json = json.dumps({"node_name": node_name, "value": False}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"停止函数: 写入结果 = {success}") + except Exception as e: + print(f"停止函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + elif isinstance(write_nodes, dict): + # 处理节点字典,使用指定的值 + for node_name, node_value in write_nodes.items(): + print(f"停止函数: 写入节点 {node_name} = {node_value}") + input_json = json.dumps({"node_name": node_name, "value": node_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"停止函数: 写入结果 = {success}") + except Exception as e: + print(f"停止函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + return True + + if func_name is None: + func_name = f"stop_function_{str(time.time())}" + + print(f"创建停止函数: {func_name}") + self.function_name[func_name] = execute_stop_function + return execute_stop_function + + def create_cleanup_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): + """ + 创建清理函数 + + 参数: + func_name: 函数名称 + write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} + """ + if write_nodes is None: + raise ValueError("必须提供write_nodes参数") + + def execute_cleanup_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: + if isinstance(write_nodes, list): + # 处理节点列表,默认值都是False + for node_name in write_nodes: + # 直接写入False + print(f"清理函数: 写入节点 {node_name} = False") + input_json = json.dumps({"node_name": node_name, "value": False}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"清理函数: 写入结果 = {success}") + except Exception as e: + print(f"清理函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + elif isinstance(write_nodes, dict): + # 处理节点字典,使用指定的值 + for node_name, node_value in write_nodes.items(): + print(f"清理函数: 写入节点 {node_name} = {node_value}") + input_json = json.dumps({"node_name": node_name, "value": node_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"清理函数: 写入结果 = {success}") + except Exception as e: + print(f"清理函数: 解析写入结果失败: {e}, 原始结果: {result_str}") + return True + + if func_name is None: + func_name = f"cleanup_function_{str(time.time())}" + + print(f"创建清理函数: {func_name}") + self.function_name[func_name] = execute_cleanup_function + return execute_cleanup_function + + def create_start_function(self, func_name: str, stop_condition_expression: str = "True", write_nodes: Union[Dict[str, Any], List[str]] = None, condition_nodes: Union[Dict[str, str], List[str]] = None): + """ + 创建开始函数 + + 参数: + func_name: 函数名称 + stop_condition_expression: 停止条件表达式,可直接引用节点名称 + write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} + condition_nodes: 条件节点列表 [节点名1, 节点名2] + """ + def execute_start_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: + """开始函数: 写入触发节点, 然后轮询条件节点直到满足停止条件。""" + + params = getattr(self, "_workflow_params", {}) or {} + + # 先处理写入节点(触发位等) + if write_nodes: + if isinstance(write_nodes, list): + # 列表形式: 节点名与参数名一致, 若无参数则直接写 True + for node_name in write_nodes: + if node_name in params: + current_value = params[node_name] + else: + current_value = True + + print(f"直接写入节点 {node_name} = {current_value}") + input_json = json.dumps({"node_name": node_name, "value": current_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"直接写入 {node_name} = {current_value}, 结果: {success}") + except Exception as e: + print(f"解析直接写入结果失败: {e}, 原始结果: {result_str}") + elif isinstance(write_nodes, dict): + # 字典形式: 节点名 -> 常量值(如 True/False) + for node_name, node_value in write_nodes.items(): + if node_name in params: + current_value = params[node_name] + else: + current_value = node_value + + print(f"直接写入节点 {node_name} = {current_value}") + input_json = json.dumps({"node_name": node_name, "value": current_value}) + result_str = self.write_node(input_json) + try: + result_str = result_str.replace("'", '"') + result = json.loads(result_str) + success = result.get("success", False) + print(f"直接写入 {node_name} = {current_value}, 结果: {success}") + except Exception as e: + print(f"解析直接写入结果失败: {e}, 原始结果: {result_str}") + + # 如果没有条件节点,立即返回 + if not condition_nodes: + return True + + # 处理条件检查和等待 + while True: + next_loop = False + condition_source = {} + + # 直接读取条件节点 + if isinstance(condition_nodes, list): + # 处理节点列表 + for i, node_name in enumerate(condition_nodes): + # 直接读取节点 + result_str = self.read_node(node_name) + try: + time.sleep(1) + result_str = result_str.replace("'", '"') + result_dict = json.loads(result_str) + read_res = result_dict.get("value") + read_err = result_dict.get("error", False) + print(f"直接读取 {node_name} 返回值 = {read_res}, 错误 = {read_err}") + + if read_err: + next_loop = True + break + + # 将节点值存入条件源字典,使用节点名称作为键 + condition_source[node_name] = read_res + # 为了向后兼容,也保留read_i格式 + condition_source[f"read_{i}"] = read_res + except Exception as e: + print(f"解析直接读取结果失败: {e}, 原始结果: {result_str}") + read_res, read_err = None, True + next_loop = True + break + elif isinstance(condition_nodes, dict): + # 处理节点字典 + for condition_func, node_name in condition_nodes.items(): + # 直接读取节点 + result_str = self.read_node(node_name) + try: + result_str = result_str.replace("'", '"') + result_dict = json.loads(result_str) + read_res = result_dict.get("value") + read_err = result_dict.get("error", False) + print(f"直接读取 {node_name} 返回值 = {read_res}, 错误 = {read_err}") + + if read_err: + next_loop = True + break + + # 将节点值存入条件源字典 + condition_source[node_name] = read_res + # 也保存使用函数名作为键 + condition_source[condition_func] = read_res + except Exception as e: + print(f"解析直接读取结果失败: {e}, 原始结果: {result_str}") + next_loop = True + break + + if not next_loop: + if stop_condition_expression: + # 添加调试信息 + print(f"条件源数据: {condition_source}") + condition_source["__RESULT"] = None + + # 确保安全地执行条件表达式 + try: + # 先尝试使用eval更安全的方式计算表达式 + result = eval(stop_condition_expression, {}, condition_source) + condition_source["__RESULT"] = result + except Exception as e: + print(f"使用eval执行表达式失败: {e}") + try: + # 回退到exec方式 + exec(f"__RESULT = {stop_condition_expression}", {}, condition_source) + except Exception as e2: + print(f"使用exec执行表达式也失败: {e2}") + condition_source["__RESULT"] = False + + res = condition_source["__RESULT"] + print(f"取得计算结果: {res}, 条件表达式: {stop_condition_expression}") + + if res: + print("满足停止条件,结束工作流") + break + else: + # 如果没有停止条件,直接退出 + break + else: + time.sleep(0.3) + + return True + + self.function_name[func_name] = execute_start_function + return execute_start_function + + create_action_from_json = None + + def create_action_from_json(self, data: Union[Dict, Any]) -> WorkflowAction: + """ + 从JSON配置创建工作流动作 + + 参数: + data: 动作JSON数据 + + 返回: + WorkflowAction对象 + """ + # 初始化所需变量 + start_function = None + write_nodes = {} + condition_nodes = [] + stop_function = None + init_function = None + cleanup_function = None + + # 提取start_function相关信息 + if hasattr(data, "start_function") and data.start_function: + start_function = data.start_function + if "write_nodes" in start_function: + write_nodes = start_function["write_nodes"] + if "condition_nodes" in start_function: + condition_nodes = start_function["condition_nodes"] + elif isinstance(data, dict) and data.get("start_function"): + start_function = data.get("start_function") + if "write_nodes" in start_function: + write_nodes = start_function["write_nodes"] + if "condition_nodes" in start_function: + condition_nodes = start_function["condition_nodes"] + + # 提取stop_function信息 + if hasattr(data, "stop_function") and data.stop_function: + stop_function = data.stop_function + elif isinstance(data, dict) and data.get("stop_function"): + stop_function = data.get("stop_function") + + # 提取init_function信息 + if hasattr(data, "init_function") and data.init_function: + init_function = data.init_function + elif isinstance(data, dict) and data.get("init_function"): + init_function = data.get("init_function") + + # 提取cleanup_function信息 + if hasattr(data, "cleanup_function") and data.cleanup_function: + cleanup_function = data.cleanup_function + elif isinstance(data, dict) and data.get("cleanup_function"): + cleanup_function = data.get("cleanup_function") + + # 创建工作流动作组件 + init = None + start = None + stop = None + cleanup = None + + # 处理init function + if init_function: + init_params = {"func_name": init_function.get("func_name")} + if "write_nodes" in init_function: + init_params["write_nodes"] = init_function["write_nodes"] + else: + # 如果没有write_nodes,创建一个空字典 + init_params["write_nodes"] = {} + + init = self.create_init_function(**init_params) + + # 处理start function + if start_function: + start_params = { + "func_name": start_function.get("func_name"), + "stop_condition_expression": start_function.get("stop_condition_expression", "True"), + "write_nodes": write_nodes, + "condition_nodes": condition_nodes + } + start = self.create_start_function(**start_params) + + # 处理stop function + if stop_function: + stop_params = { + "func_name": stop_function.get("func_name"), + "write_nodes": stop_function.get("write_nodes", {}) + } + stop = self.create_stop_function(**stop_params) + + # 处理cleanup function + if cleanup_function: + cleanup_params = { + "func_name": cleanup_function.get("func_name"), + "write_nodes": cleanup_function.get("write_nodes", {}) + } + cleanup = self.create_cleanup_function(**cleanup_params) + + return WorkflowAction(init=init, start=start, stop=stop, cleanup=cleanup) + + workflow_name: Dict[str, OpcUaWorkflowModel] = {} + + def create_workflow_from_json(self, data: List[Dict]) -> None: + """ + 从JSON配置创建工作流程序 + + 参数: + data: 工作流配置列表 + """ + for ind, flow_dict in enumerate(data): + print(f"正在创建 workflow {ind}, {flow_dict['name']}") + actions = [] + + for i in flow_dict["action"]: + if isinstance(i, str): + print(f"沿用已有 workflow 作为 action: {i}") + action = self.workflow_name[i] + else: + print("创建 action") + # 直接将字典转换为SimplifiedActionJson对象或直接使用字典 + action = self.create_action_from_json(i) + + actions.append(action) + + # 获取参数 + parameters = flow_dict.get("parameters", []) + + flow_instance = OpcUaWorkflowModel( + name=flow_dict["name"], + actions=actions, + parameters=parameters, + description=flow_dict.get("description", "") + ) + print(f"创建完成 workflow: {flow_dict['name']}") + self.workflow_name[flow_dict["name"]] = flow_instance + + def execute_workflow_from_json(self, data: List[str]) -> None: + for i in data: + print(f"正在执行 workflow: {i}") + self.run_opcua_workflow_model(self.workflow_name[i]) + + def execute_procedure_from_json(self, data: Union[ExecuteProcedureJson, Dict]) -> None: + """从JSON配置执行工作流程序""" + if isinstance(data, dict): + # 处理字典类型 + register_params = data.get("register_node_list_from_csv_path") + create_flow = data.get("create_flow", []) + execute_flow = data.get("execute_flow", []) + else: + # 处理Pydantic模型类型 + register_params = data.register_node_list_from_csv_path + create_flow = data.create_flow + execute_flow = data.execute_flow if hasattr(data, "execute_flow") else [] + + # 注册节点 + if register_params: + print(f"注册节点 csv: {register_params}") + self.register_node_list_from_csv_path(**register_params) + + # 创建工作流 + print("创建工作流") + self.create_workflow_from_json(create_flow) + + # 注册工作流为实例方法 + self.register_workflows_as_methods() + + # 如果存在execute_flow字段,则执行指定的工作流(向后兼容) + if execute_flow: + print("执行工作流") + self.execute_workflow_from_json(execute_flow) + + def register_workflows_as_methods(self) -> None: + """将工作流注册为实例方法""" + for workflow_name, workflow in self.workflow_name.items(): + # 获取工作流的参数信息(如果存在) + workflow_params = getattr(workflow, 'parameters', []) or [] + workflow_desc = getattr(workflow, 'description', None) or f"执行工作流: {workflow_name}" + + # 创建执行工作流的方法 + def create_workflow_method(wf_name=workflow_name, wf=workflow, params=workflow_params): + def workflow_method(*args, **kwargs): + logger.info(f"执行工作流: {wf_name}, 参数: {args}, {kwargs}") + + # 处理传入的参数 + if params and (args or kwargs): + # 将位置参数转换为关键字参数 + params_dict = {} + for i, param_name in enumerate(params): + if i < len(args): + params_dict[param_name] = args[i] + + # 合并关键字参数 + params_dict.update(kwargs) + + # 保存参数,供节点函数使用 + self._workflow_params = params_dict + else: + self._workflow_params = {} + + # 执行工作流 + result = self.run_opcua_workflow_model(wf) + + # 清理参数 + self._workflow_params = {} + + return result + + # 设置方法的文档字符串 + workflow_method.__doc__ = workflow_desc + if params: + param_doc = ", ".join(params) + workflow_method.__doc__ += f"\n参数: {param_doc}" + + return workflow_method + + # 注册为实例方法 + method = create_workflow_method() + setattr(self, workflow_name, method) + logger.info(f"已将工作流 '{workflow_name}' 注册为实例方法") + + def read_node(self, node_name: str) -> Dict[str, Any]: + """ + 读取节点值的便捷方法 + 返回包含result字段的字典 + """ + # 使用锁保护客户端访问 + with self._client_lock: + try: + node = self.use_node(node_name) + value, error = node.read() + + # 创建结果字典 + result = { + "value": value, + "error": error, + "node_name": node_name, + "timestamp": time.time() + } + + # 返回JSON字符串 + return json.dumps(result) + except Exception as e: + logger.error(f"读取节点 {node_name} 失败: {e}") + # 创建错误结果字典 + result = { + "value": None, + "error": True, + "node_name": node_name, + "error_message": str(e), + "timestamp": time.time() + } + return json.dumps(result) + + def write_node(self, json_input: str) -> str: + """ + 写入节点值的便捷方法 + 接受单个JSON格式的字符串作为输入,包含节点名称和值 + eg:'{\"node_name\":\"反应罐号码\",\"value\":\"2\"}' + 返回JSON格式的字符串,包含操作结果 + """ + # 使用锁保护客户端访问 + with self._client_lock: + try: + # 解析JSON格式的输入 + if not isinstance(json_input, str): + json_input = str(json_input) + + try: + input_data = json.loads(json_input) + if not isinstance(input_data, dict): + return json.dumps({"error": True, "error_message": "输入必须是包含node_name和value的JSON对象", "success": False}) + + # 从JSON中提取节点名称和值 + node_name = input_data.get("node_name") + value = input_data.get("value") + + if node_name is None: + return json.dumps({"error": True, "error_message": "JSON中缺少node_name字段", "success": False}) + except json.JSONDecodeError as e: + return json.dumps({"error": True, "error_message": f"JSON解析错误: {str(e)}", "success": False}) + + node = self.use_node(node_name) + error = node.write(value) + + # 创建结果字典 + result = { + "value": value, + "error": error, + "node_name": node_name, + "timestamp": time.time(), + "success": not error + } + + return json.dumps(result) + except Exception as e: + logger.error(f"写入节点失败: {e}") + result = { + "error": True, + "error_message": str(e), + "timestamp": time.time(), + "success": False + } + return json.dumps(result) + + def call_method(self, node_name: str, *args) -> Tuple[Any, bool]: + """ + 调用方法节点的便捷方法 + 返回 (返回值, 是否出错) + """ + try: + node = self.use_node(node_name) + if hasattr(node, 'call'): + return node.call(*args) + else: + logger.error(f"节点 {node_name} 不是方法节点") + return None, True + except Exception as e: + logger.error(f"调用方法 {node_name} 失败: {e}") + return None, True + + +class OpcUaClient(BaseClient): + def __init__( + self, + url: str, + deck: Optional[Union[post_process_deck, Dict[str, Any]]] = None, + config_path: str = None, + username: str = None, + password: str = None, + use_subscription: bool = True, + cache_timeout: float = 5.0, + subscription_interval: int = 500, + *args, + **kwargs, + ): + # 降低OPCUA库的日志级别 + import logging + logging.getLogger("opcua").setLevel(logging.WARNING) + + super().__init__() + + # ===== 关键修改:参照 BioyondWorkstation 处理 deck ===== + + super().__init__() + + # 处理 deck 参数 + if deck is None: + self.deck = post_process_deck(setup=True) + elif isinstance(deck, dict): + self.deck = post_process_deck(setup=True) + elif hasattr(deck, 'children'): + self.deck = deck + else: + raise ValueError(f"deck 参数类型不支持: {type(deck)}") + + if self.deck is None: + raise ValueError("Deck 配置不能为空") + + # 统计仓库信息 + warehouse_count = 0 + if hasattr(self.deck, 'children'): + warehouse_count = len(self.deck.children) + logger.info(f"Deck 初始化完成,加载 {warehouse_count} 个资源") + + + # OPC UA 客户端初始化 + client = Client(url) + + if username and password: + client.set_user(username) + client.set_password(password) + + self._set_client(client) + + # 订阅相关属性 + self._use_subscription = use_subscription + self._subscription = None + self._subscription_handles = {} + self._subscription_interval = subscription_interval + + # 缓存相关属性 + self._node_values = {} # 修改为支持时间戳的缓存结构 + self._cache_timeout = cache_timeout + + # 连接状态监控 + self._connection_check_interval = 30.0 # 连接检查间隔(秒) + self._connection_monitor_running = False + self._connection_monitor_thread = None + + # 添加线程锁,保护OPC UA客户端的并发访问 + import threading + self._client_lock = threading.RLock() + + # 连接到服务器 + self._connect() + + # 如果提供了配置文件路径,则加载配置并注册工作流 + if config_path: + self.load_config(config_path) + + # 启动连接监控 + self._start_connection_monitor() + + + def _connect(self) -> None: + """连接到OPC UA服务器""" + logger.info('尝试连接到 OPC UA 服务器...') + if self.client: + try: + self.client.connect() + logger.info('✓ 客户端已连接!') + + # 连接后开始查找节点 + if self._variables_to_find: + self._find_nodes() + + # 如果启用订阅模式,设置订阅 + if self._use_subscription: + self._setup_subscriptions() + else: + logger.info("订阅模式已禁用,将使用按需读取模式") + + except Exception as e: + logger.error(f'客户端连接失败: {e}') + raise + else: + raise ValueError('客户端未初始化') + + class SubscriptionHandler: + """freeopcua订阅处理器:必须实现 datachange_notification 方法""" + def __init__(self, outer): + self.outer = outer + + def datachange_notification(self, node, val, data): + # 委托给外层类的处理函数 + try: + self.outer._on_subscription_datachange(node, val, data) + except Exception as e: + logger.error(f"订阅数据回调处理失败: {e}") + + # 可选:事件通知占位,避免库调用时报缺失 + def event_notification(self, event): + pass + + def _setup_subscriptions(self): + """设置 OPC UA 订阅""" + if not self.client or not self._use_subscription: + return + + with self._client_lock: + try: + logger.info(f"开始设置订阅 (发布间隔: {self._subscription_interval}ms)...") + + # 创建订阅 + handler = OpcUaClient.SubscriptionHandler(self) + self._subscription = self.client.create_subscription( + self._subscription_interval, + handler + ) + + # 为所有变量节点创建监控项 + subscribed_count = 0 + skipped_count = 0 + + for node_name, node in self._node_registry.items(): + # 只为变量节点创建订阅 + if node.type == NodeType.VARIABLE and node.node_id: + try: + # 优先使用在查找阶段缓存的真实 ua.Node 对象 + ua_node = self._found_node_objects.get(node_name) + if ua_node is None: + ua_node = self.client.get_node(node.node_id) + handle = self._subscription.subscribe_data_change(ua_node) + self._subscription_handles[node_name] = handle + subscribed_count += 1 + logger.debug(f"✓ 已订阅节点: {node_name}") + except Exception as e: + skipped_count += 1 + logger.warning(f"✗ 订阅节点 {node_name} 失败: {e}") + else: + skipped_count += 1 + + logger.info(f"订阅设置完成: 成功 {subscribed_count} 个, 跳过 {skipped_count} 个") + + except Exception as e: + logger.error(f"设置订阅失败: {e}") + traceback.print_exc() + # 订阅失败时回退到按需读取模式 + self._use_subscription = False + logger.warning("订阅模式设置失败,已自动切换到按需读取模式") + + def _on_subscription_datachange(self, node, val, data): + """订阅数据变化处理器(供内部 SubscriptionHandler 调用)""" + try: + node_id = str(node.nodeid) + current_time = time.time() + # 查找对应的节点名称 + for node_name, node_obj in self._node_registry.items(): + if node_obj.node_id == node_id: + self._node_values[node_name] = { + 'value': val, + 'timestamp': current_time, + 'source': 'subscription' + } + logger.debug(f"订阅更新: {node_name} = {val}") + break + except Exception as e: + logger.error(f"处理订阅数据失败: {e}") + + def get_node_value(self, name, use_cache=True, force_read=False): + """ + 获取节点值(智能缓存版本) + + 参数: + name: 节点名称(支持中文名或英文名) + use_cache: 是否使用缓存 + force_read: 是否强制从服务器读取(忽略缓存) + """ + # 处理名称映射 + if name in self._name_mapping: + chinese_name = self._name_mapping[name] + elif name in self._node_registry: + chinese_name = name + else: + raise ValueError(f"未找到名称为 '{name}' 的节点") + + # 如果强制读取,直接从服务器读取 + if force_read: + with self._client_lock: + value, _ = self.use_node(chinese_name).read() + # 更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'forced_read' + } + return value + + # 检查缓存 + if use_cache and chinese_name in self._node_values: + cache_entry = self._node_values[chinese_name] + cache_age = time.time() - cache_entry['timestamp'] + + # 如果是订阅模式,缓存永久有效(由订阅更新) + # 如果是按需读取模式,检查缓存超时 + if cache_entry.get('source') == 'subscription' or cache_age < self._cache_timeout: + logger.debug(f"从缓存读取: {chinese_name} = {cache_entry['value']} (age: {cache_age:.2f}s, source: {cache_entry.get('source', 'unknown')})") + return cache_entry['value'] + + # 缓存过期或不存在,从服务器读取 + with self._client_lock: + try: + value, error = self.use_node(chinese_name).read() + if not error: + # 更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'on_demand_read' + } + return value + else: + logger.warning(f"读取节点 {chinese_name} 失败") + return None + except Exception as e: + logger.error(f"读取节点 {chinese_name} 出错: {e}") + return None + + def set_node_value(self, name, value): + """ + 设置节点值 + 写入成功后会立即更新本地缓存 + """ + # 处理名称映射 + if name in self._name_mapping: + chinese_name = self._name_mapping[name] + elif name in self._node_registry: + chinese_name = name + else: + raise ValueError(f"未找到名称为 '{name}' 的节点") + + with self._client_lock: + try: + node = self.use_node(chinese_name) + error = node.write(value) + + if not error: + # 写入成功,立即更新缓存 + self._node_values[chinese_name] = { + 'value': value, + 'timestamp': time.time(), + 'source': 'write' + } + logger.debug(f"写入成功: {chinese_name} = {value}") + return True + else: + logger.warning(f"写入节点 {chinese_name} 失败") + return False + except Exception as e: + logger.error(f"写入节点 {chinese_name} 出错: {e}") + return False + + def _check_connection(self) -> bool: + """检查连接状态""" + try: + with self._client_lock: + if self.client: + # 尝试获取命名空间数组来验证连接 + self.client.get_namespace_array() + return True + except Exception as e: + logger.warning(f"连接检查失败: {e}") + return False + return False + + def _connection_monitor_worker(self): + """连接监控线程工作函数""" + self._connection_monitor_running = True + logger.info(f"连接监控线程已启动 (检查间隔: {self._connection_check_interval}秒)") + + reconnect_attempts = 0 + max_reconnect_attempts = 5 + + while self._connection_monitor_running: + try: + # 检查连接状态 + if not self._check_connection(): + logger.warning("检测到连接断开,尝试重新连接...") + reconnect_attempts += 1 + + if reconnect_attempts <= max_reconnect_attempts: + try: + # 尝试重新连接 + with self._client_lock: + if self.client: + try: + self.client.disconnect() + except: + pass + + self.client.connect() + logger.info("✓ 重新连接成功") + + # 重新设置订阅 + if self._use_subscription: + self._setup_subscriptions() + + reconnect_attempts = 0 + except Exception as e: + logger.error(f"重新连接失败 (尝试 {reconnect_attempts}/{max_reconnect_attempts}): {e}") + time.sleep(5) # 重连失败后等待5秒 + else: + logger.error(f"达到最大重连次数 ({max_reconnect_attempts}),停止重连") + self._connection_monitor_running = False + else: + # 连接正常,重置重连计数 + reconnect_attempts = 0 + + except Exception as e: + logger.error(f"连接监控出错: {e}") + + # 等待下次检查 + time.sleep(self._connection_check_interval) + + def _start_connection_monitor(self): + """启动连接监控线程""" + if self._connection_monitor_thread is not None and self._connection_monitor_thread.is_alive(): + logger.warning("连接监控线程已在运行") + return + + import threading + self._connection_monitor_thread = threading.Thread( + target=self._connection_monitor_worker, + daemon=True, + name="OpcUaConnectionMonitor" + ) + self._connection_monitor_thread.start() + + def _stop_connection_monitor(self): + """停止连接监控线程""" + self._connection_monitor_running = False + if self._connection_monitor_thread and self._connection_monitor_thread.is_alive(): + self._connection_monitor_thread.join(timeout=2.0) + logger.info("连接监控线程已停止") + + def read_node(self, node_name: str) -> str: + """ + 读取节点值的便捷方法(使用缓存) + 返回JSON格式字符串 + """ + try: + # 使用get_node_value方法,自动处理缓存 + value = self.get_node_value(node_name, use_cache=True) + + # 获取缓存信息 + chinese_name = self._name_mapping.get(node_name, node_name) + cache_info = self._node_values.get(chinese_name, {}) + + result = { + "value": value, + "error": False, + "node_name": node_name, + "timestamp": time.time(), + "cache_age": time.time() - cache_info.get('timestamp', time.time()), + "source": cache_info.get('source', 'unknown') + } + + return json.dumps(result) + except Exception as e: + logger.error(f"读取节点 {node_name} 失败: {e}") + result = { + "value": None, + "error": True, + "node_name": node_name, + "error_message": str(e), + "timestamp": time.time() + } + return json.dumps(result) + + def get_cache_stats(self) -> Dict[str, Any]: + """获取缓存统计信息""" + current_time = time.time() + stats = { + 'total_cached_nodes': len(self._node_values), + 'subscription_nodes': 0, + 'on_demand_nodes': 0, + 'expired_nodes': 0, + 'cache_timeout': self._cache_timeout, + 'using_subscription': self._use_subscription + } + + for node_name, cache_entry in self._node_values.items(): + source = cache_entry.get('source', 'unknown') + cache_age = current_time - cache_entry['timestamp'] + + if source == 'subscription': + stats['subscription_nodes'] += 1 + elif source in ['on_demand_read', 'forced_read', 'write']: + stats['on_demand_nodes'] += 1 + + if cache_age > self._cache_timeout: + stats['expired_nodes'] += 1 + + return stats + + def print_cache_stats(self): + """打印缓存统计信息""" + stats = self.get_cache_stats() + print("\n" + "="*80) + print("缓存统计信息") + print("="*80) + print(f"总缓存节点数: {stats['total_cached_nodes']}") + print(f"订阅模式: {'启用' if stats['using_subscription'] else '禁用'}") + print(f" - 订阅更新节点: {stats['subscription_nodes']}") + print(f" - 按需读取节点: {stats['on_demand_nodes']}") + print(f" - 已过期节点: {stats['expired_nodes']}") + print(f"缓存超时时间: {stats['cache_timeout']}秒") + print("="*80 + "\n") + + def load_config(self, config_path: str) -> None: + """从JSON配置文件加载并注册工作流""" + try: + with open(config_path, 'r', encoding='utf-8') as f: + config_data = json.load(f) + + # 处理节点注册 + if "register_node_list_from_csv_path" in config_data: + config_dir = os.path.dirname(os.path.abspath(config_path)) + + if "path" in config_data["register_node_list_from_csv_path"]: + csv_path = config_data["register_node_list_from_csv_path"]["path"] + if not os.path.isabs(csv_path): + csv_path = os.path.join(config_dir, csv_path) + config_data["register_node_list_from_csv_path"]["path"] = csv_path + + self.register_node_list_from_csv_path(**config_data["register_node_list_from_csv_path"]) + + if self.client and self._variables_to_find: + logger.info("CSV加载完成,开始查找服务器节点...") + self._find_nodes() + + # 处理工作流创建 + if "create_flow" in config_data: + self.create_workflow_from_json(config_data["create_flow"]) + self.register_workflows_as_methods() + + # 将所有节点注册为属性 + self._register_nodes_as_attributes() + + # 打印统计信息 + found_count = len(self._node_registry) + total_count = len(self._variables_to_find) + if found_count < total_count: + logger.warning(f"节点查找完成:找到 {found_count}/{total_count} 个节点") + else: + logger.info(f"✓ 节点查找完成:所有 {found_count} 个节点均已找到") + + # 如果使用订阅模式,重新设置订阅(确保新节点被订阅) + if self._use_subscription and found_count > 0: + self._setup_subscriptions() + + logger.info(f"成功从 {config_path} 加载配置") + except Exception as e: + logger.error(f"加载配置文件 {config_path} 失败: {e}") + traceback.print_exc() + + def disconnect(self): + """断开连接并清理资源""" + logger.info("正在断开连接...") + + # 停止连接监控 + self._stop_connection_monitor() + + # 删除订阅 + if self._subscription: + try: + with self._client_lock: + self._subscription.delete() + logger.info("订阅已删除") + except Exception as e: + logger.warning(f"删除订阅失败: {e}") + + # 断开客户端连接 + if self.client: + try: + with self._client_lock: + self.client.disconnect() + logger.info("✓ OPC UA 客户端已断开连接") + except Exception as e: + logger.error(f"断开连接失败: {e}") + + def _register_nodes_as_attributes(self): + """将所有节点注册为实例属性""" + for node_name, node in self._node_registry.items(): + if not node.node_id or node.node_id == "": + logger.warning(f"⚠ 节点 '{node_name}' 的 node_id 为空,跳过注册为属性") + continue + + eng_name = self._reverse_mapping.get(node_name) + attr_name = eng_name if eng_name else node_name.replace(' ', '_').replace('-', '_') + + def create_property_getter(node_key): + def getter(self): + return self.get_node_value(node_key, use_cache=True) + return getter + + setattr(OpcUaClient, attr_name, property(create_property_getter(node_name))) + logger.debug(f"已注册节点 '{node_name}' 为属性 '{attr_name}'") + + def post_init(self, ros_node): + """ROS2 节点就绪后的初始化""" + if not (hasattr(self, 'deck') and self.deck): + return + + if not (hasattr(ros_node, 'resource_tracker') and ros_node.resource_tracker): + logger.warning("resource_tracker 不存在,无法注册 deck") + return + + # 1. 本地注册(必需) + ros_node.resource_tracker.add_resource(self.deck) + + # 2. 上传云端 + try: + from unilabos.ros.nodes.base_device_node import ROS2DeviceNode + ROS2DeviceNode.run_async_func( + ros_node.update_resource, + True, + resources=[self.deck] + ) + logger.info("Deck 已上传到云端") + except Exception as e: + logger.error(f"上传失败: {e}") + + +if __name__ == '__main__': + # 示例用法 + + # 使用配置文件创建客户端并自动注册工作流 + import os + current_dir = os.path.dirname(os.path.abspath(__file__)) + config_path = os.path.join(current_dir, "opcua_huairou.json") + + # 创建OPC UA客户端并加载配置 + try: + client = OpcUaClient( + url="opc.tcp://192.168.1.88:4840/freeopcua/server/", # 替换为实际的OPC UA服务器地址 + config_path="D:\\Uni-Lab-OS\\unilabos\\device_comms\\opcua_client\\opcua_huairou.json" # 传入配置文件路径 + ) + + # 列出所有已注册的工作流 + print("\n已注册的工作流:") + for workflow_name in client.workflow_name: + print(f" - {workflow_name}") + + # 测试trigger_grab_action工作流 - 使用英文参数名 + print("\n测试trigger_grab_action工作流 - 使用英文参数名:") + client.trigger_grab_action(reaction_tank_number=2, raw_tank_number=2) + # client.set_node_value("reaction_tank_number", 2) + + + # 读取节点值 - 使用英文节点名 + grab_complete = client.get_node_value("grab_complete") + reaction_tank = client.get_node_value("reaction_tank_number") + raw_tank = client.get_node_value("raw_tank_number") + + print(f"\n执行后状态检查 (使用英文节点名):") + print(f" - 抓取完成状态: {grab_complete}") + print(f" - 当前反应罐号码: {reaction_tank}") + print(f" - 当前原料罐号码: {raw_tank}") + + # 测试节点值写入 - 使用英文节点名 + print("\n测试节点值写入 (使用英文节点名):") + success = client.set_node_value("atomization_fast_speed", 150.5) + print(f" - 写入搅拌浆雾化快速 = 150.5, 结果: {success}") + + # 读取写入的值 + atomization_speed = client.get_node_value("atomization_fast_speed") + print(f" - 读取搅拌浆雾化快速: {atomization_speed}") + + # 断开连接 + client.disconnect() + + except Exception as e: + print(f"错误: {e}") + traceback.print_exc() + + diff --git a/unilabos/devices/workstation/post_process/post_process_station.json b/unilabos/devices/workstation/post_process/post_process_station.json new file mode 100644 index 00000000..4433a159 --- /dev/null +++ b/unilabos/devices/workstation/post_process/post_process_station.json @@ -0,0 +1,45 @@ +{ + "nodes": [ + { + "id": "post_process_station", + "name": "post_process_station", + "children": [ + "post_process_deck" + ], + "parent": null, + "type": "device", + "class": "post_process_station", + "config": { + "url": "opc.tcp://LAPTOP-AN6QGCSD:53530/OPCUA/SimulationServer", + "config_path": "C:\\Users\\Roy\\Desktop\\DPLC\\Uni-Lab-OS\\unilabos\\devices\\workstation\\post_process\\opcua_huairou.json", + "deck": { + "data": { + "_resource_child_name": "post_process_deck", + "_resource_type": "unilabos.devices.workstation.post_process.decks:post_process_deck" + } + } + }, + "data": { + } + }, + { + "id": "post_process_deck", + "name": "post_process_deck", + "sample_id": null, + "children": [], + "parent": "post_process_station", + "type": "deck", + "class": "post_process_deck", + "position": { + "x": 0, + "y": 0, + "z": 0 + }, + "config": { + "type": "post_process_deck", + "setup": true + }, + "data": {} + } + ] +} \ No newline at end of file diff --git a/unilabos/devices/workstation/post_process/post_process_warehouse.py b/unilabos/devices/workstation/post_process/post_process_warehouse.py new file mode 100644 index 00000000..62dda166 --- /dev/null +++ b/unilabos/devices/workstation/post_process/post_process_warehouse.py @@ -0,0 +1,160 @@ +from typing import Dict, Optional, List, Union +from pylabrobot.resources import Coordinate +from pylabrobot.resources.carrier import ResourceHolder, create_homogeneous_resources + +from unilabos.resources.itemized_carrier import ItemizedCarrier, ResourcePLR + + +LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + +def warehouse_factory( + name: str, + num_items_x: int = 1, + num_items_y: int = 4, + num_items_z: int = 4, + dx: float = 137.0, + dy: float = 96.0, + dz: float = 120.0, + item_dx: float = 10.0, + item_dy: float = 10.0, + item_dz: float = 10.0, + resource_size_x: float = 127.0, + resource_size_y: float = 86.0, + resource_size_z: float = 25.0, + removed_positions: Optional[List[int]] = None, + empty: bool = False, + category: str = "warehouse", + model: Optional[str] = None, + col_offset: int = 0, # 列起始偏移量,用于生成5-8等命名 + layout: str = "col-major", # 新增:排序方式,"col-major"=列优先,"row-major"=行优先 +): + # 创建位置坐标 + locations = [] + + for layer in range(num_items_z): # 层 + for row in range(num_items_y): # 行 + for col in range(num_items_x): # 列 + # 计算位置 + x = dx + col * item_dx + + # 根据 layout 决定 y 坐标计算 + if layout == "row-major": + # 行优先:row=0(第1行) 应该显示在上方,y 值最小 + y = dy + row * item_dy + else: + # 列优先:保持原逻辑 + y = dy + (num_items_y - row - 1) * item_dy + + z = dz + (num_items_z - layer - 1) * item_dz + locations.append(Coordinate(x, y, z)) + + if removed_positions: + locations = [loc for i, loc in enumerate(locations) if i not in removed_positions] + + _sites = create_homogeneous_resources( + klass=ResourceHolder, + locations=locations, + resource_size_x=resource_size_x, + resource_size_y=resource_size_y, + resource_size_z=resource_size_z, + name_prefix=name, + ) + + len_x, len_y = (num_items_x, num_items_y) if num_items_z == 1 else (num_items_y, num_items_z) if num_items_x == 1 else (num_items_x, num_items_z) + + # 🔑 修改:使用数字命名,最上面是4321,最下面是12,11,10,9 + # 命名顺序必须与坐标生成顺序一致:层 → 行 → 列 + keys = [] + for layer in range(num_items_z): # 遍历每一层 + for row in range(num_items_y): # 遍历每一行 + for col in range(num_items_x): # 遍历每一列 + # 倒序计算全局行号:row=0 应该对应 global_row=0(第1行:4321) + # row=1 应该对应 global_row=1(第2行:8765) + # row=2 应该对应 global_row=2(第3行:12,11,10,9) + # 但前端显示时 row=2 在最上面,所以需要反转 + reversed_row = (num_items_y - 1 - row) # row=0→reversed_row=2, row=1→reversed_row=1, row=2→reversed_row=0 + global_row = layer * num_items_y + reversed_row + + # 每行的最大数字 = (global_row + 1) * num_items_x + col_offset + base_num = (global_row + 1) * num_items_x + col_offset + + # 从右到左递减:4,3,2,1 + key = str(base_num - col) + keys.append(key) + + sites = {i: site for i, site in zip(keys, _sites.values())} + + return WareHouse( + name=name, + size_x=dx + item_dx * num_items_x, + size_y=dy + item_dy * num_items_y, + size_z=dz + item_dz * num_items_z, + num_items_x = num_items_x, + num_items_y = num_items_y, + num_items_z = num_items_z, + ordering_layout=layout, # 传递排序方式到 ordering_layout + sites=sites, + category=category, + model=model, + ) + + +class WareHouse(ItemizedCarrier): + """堆栈载体类 - 可容纳16个板位的载体(4层x4行x1列)""" + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + num_items_x: int, + num_items_y: int, + num_items_z: int, + layout: str = "x-y", + sites: Optional[Dict[Union[int, str], Optional[ResourcePLR]]] = None, + category: str = "warehouse", + model: Optional[str] = None, + ordering_layout: str = "col-major", + **kwargs + ): + super().__init__( + name=name, + size_x=size_x, + size_y=size_y, + size_z=size_z, + # ordered_items=ordered_items, + # ordering=ordering, + num_items_x=num_items_x, + num_items_y=num_items_y, + num_items_z=num_items_z, + layout=layout, + sites=sites, + category=category, + model=model, + ) + + # 保存排序方式,供graphio.py的坐标映射使用 + # 使用独立属性避免与父类的layout冲突 + self.ordering_layout = ordering_layout + + def serialize(self) -> dict: + """序列化时保存 ordering_layout 属性""" + data = super().serialize() + data['ordering_layout'] = self.ordering_layout + return data + + def get_site_by_layer_position(self, row: int, col: int, layer: int) -> ResourceHolder: + if not (0 <= layer < 4 and 0 <= row < 4 and 0 <= col < 1): + raise ValueError("无效的位置: layer={}, row={}, col={}".format(layer, row, col)) + + site_index = layer * 4 + row * 1 + col + return self.sites[site_index] + + def add_rack_to_position(self, row: int, col: int, layer: int, rack) -> None: + site = self.get_site_by_layer_position(row, col, layer) + site.assign_child_resource(rack) + + def get_rack_at_position(self, row: int, col: int, layer: int): + site = self.get_site_by_layer_position(row, col, layer) + return site.resource diff --git a/unilabos/devices/workstation/post_process/warehouses.py b/unilabos/devices/workstation/post_process/warehouses.py new file mode 100644 index 00000000..385f026c --- /dev/null +++ b/unilabos/devices/workstation/post_process/warehouses.py @@ -0,0 +1,38 @@ +from unilabos.devices.workstation.post_process.post_process_warehouse import WareHouse, warehouse_factory + + + +# =================== Other =================== + + +def post_process_warehouse_4x3x1(name: str) -> WareHouse: + """创建post_process 4x3x1仓库""" + return warehouse_factory( + name=name, + num_items_x=4, + num_items_y=3, + num_items_z=1, + dx=10.0, + dy=10.0, + dz=10.0, + item_dx=137.0, + item_dy=96.0, + item_dz=120.0, + category="warehouse", + ) + +def post_process_warehouse_4x3x1_2(name: str) -> WareHouse: + """已弃用:创建post_process 4x3x1仓库""" + return warehouse_factory( + name=name, + num_items_x=4, + num_items_y=3, + num_items_z=1, + dx=12.0, + dy=12.0, + dz=12.0, + item_dx=137.0, + item_dy=96.0, + item_dz=120.0, + category="warehouse", + ) diff --git a/unilabos/registry/devices/post_process_station.yaml b/unilabos/registry/devices/post_process_station.yaml new file mode 100644 index 00000000..cf4a11b0 --- /dev/null +++ b/unilabos/registry/devices/post_process_station.yaml @@ -0,0 +1,630 @@ +post_process_station: + category: + - post_process_station + class: + action_value_mappings: + disconnect: + feedback: {} + goal: + command: {} + goal_default: + command: '' + handles: {} + result: + success: success + schema: + description: '' + properties: + feedback: + properties: + status: + type: string + required: + - status + title: SendCmd_Feedback + type: object + goal: + properties: + command: + type: string + required: + - command + title: SendCmd_Goal + type: object + result: + properties: + return_info: + type: string + success: + type: boolean + required: + - return_info + - success + title: SendCmd_Result + type: object + required: + - goal + title: SendCmd + type: object + type: SendCmd + read_node: + feedback: + result: result + goal: + command: node_name + goal_default: + command: '' + handles: {} + result: + success: success + schema: + description: '' + properties: + feedback: + properties: + status: + type: string + required: + - status + title: SendCmd_Feedback + type: object + goal: + properties: + command: + type: string + required: + - command + title: SendCmd_Goal + type: object + result: + properties: + return_info: + type: string + success: + type: boolean + required: + - return_info + - success + title: SendCmd_Result + type: object + required: + - goal + title: SendCmd + type: object + type: SendCmd + trigger_cleaning_action: + feedback: {} + goal: + acetone_inner_wall_cleaning_count: acetone_inner_wall_cleaning_count + acetone_inner_wall_cleaning_injection: acetone_inner_wall_cleaning_injection + acetone_inner_wall_cleaning_waste_time: acetone_inner_wall_cleaning_waste_time + acetone_outer_wall_cleaning_count: acetone_outer_wall_cleaning_count + acetone_outer_wall_cleaning_injection: acetone_outer_wall_cleaning_injection + acetone_outer_wall_cleaning_wait_time: acetone_outer_wall_cleaning_wait_time + acetone_outer_wall_cleaning_waste_time: acetone_outer_wall_cleaning_waste_time + acetone_pump_cleaning_suction_count: acetone_pump_cleaning_suction_count + acetone_stirrer_cleaning_count: acetone_stirrer_cleaning_count + acetone_stirrer_cleaning_injection: acetone_stirrer_cleaning_injection + acetone_stirrer_cleaning_wait_time: acetone_stirrer_cleaning_wait_time + acetone_stirrer_cleaning_waste_time: acetone_stirrer_cleaning_waste_time + filtration_liquid_selection: filtration_liquid_selection + injection_pump_forward_empty_suction_count: injection_pump_forward_empty_suction_count + injection_pump_reverse_empty_suction_count: injection_pump_reverse_empty_suction_count + nmp_inner_wall_cleaning_count: nmp_inner_wall_cleaning_count + nmp_inner_wall_cleaning_injection: nmp_inner_wall_cleaning_injection + nmp_inner_wall_cleaning_waste_time: nmp_inner_wall_cleaning_waste_time + nmp_outer_wall_cleaning_count: nmp_outer_wall_cleaning_count + nmp_outer_wall_cleaning_injection: nmp_outer_wall_cleaning_injection + nmp_outer_wall_cleaning_wait_time: nmp_outer_wall_cleaning_wait_time + nmp_outer_wall_cleaning_waste_time: nmp_outer_wall_cleaning_waste_time + nmp_pump_cleaning_suction_count: nmp_pump_cleaning_suction_count + nmp_stirrer_cleaning_count: nmp_stirrer_cleaning_count + nmp_stirrer_cleaning_injection: nmp_stirrer_cleaning_injection + nmp_stirrer_cleaning_wait_time: nmp_stirrer_cleaning_wait_time + nmp_stirrer_cleaning_waste_time: nmp_stirrer_cleaning_waste_time + pipe_blowing_time: pipe_blowing_time + water_inner_wall_cleaning_count: water_inner_wall_cleaning_count + water_inner_wall_cleaning_injection: water_inner_wall_cleaning_injection + water_inner_wall_cleaning_waste_time: water_inner_wall_cleaning_waste_time + water_outer_wall_cleaning_count: water_outer_wall_cleaning_count + water_outer_wall_cleaning_injection: water_outer_wall_cleaning_injection + water_outer_wall_cleaning_wait_time: water_outer_wall_cleaning_wait_time + water_outer_wall_cleaning_waste_time: water_outer_wall_cleaning_waste_time + water_pump_cleaning_suction_count: water_pump_cleaning_suction_count + water_stirrer_cleaning_count: water_stirrer_cleaning_count + water_stirrer_cleaning_injection: water_stirrer_cleaning_injection + water_stirrer_cleaning_wait_time: water_stirrer_cleaning_wait_time + water_stirrer_cleaning_waste_time: water_stirrer_cleaning_waste_time + goal_default: + acetone_inner_wall_cleaning_count: 0 + acetone_inner_wall_cleaning_injection: 0.0 + acetone_inner_wall_cleaning_waste_time: 0 + acetone_outer_wall_cleaning_count: 0 + acetone_outer_wall_cleaning_injection: 0.0 + acetone_outer_wall_cleaning_wait_time: 0 + acetone_outer_wall_cleaning_waste_time: 0 + acetone_pump_cleaning_suction_count: 0 + acetone_stirrer_cleaning_count: 0 + acetone_stirrer_cleaning_injection: 0.0 + acetone_stirrer_cleaning_wait_time: 0 + acetone_stirrer_cleaning_waste_time: 0 + filtration_liquid_selection: 0 + injection_pump_forward_empty_suction_count: 0 + injection_pump_reverse_empty_suction_count: 0 + nmp_inner_wall_cleaning_count: 0 + nmp_inner_wall_cleaning_injection: 0.0 + nmp_inner_wall_cleaning_waste_time: 0 + nmp_outer_wall_cleaning_count: 0 + nmp_outer_wall_cleaning_injection: 0.0 + nmp_outer_wall_cleaning_wait_time: 0 + nmp_outer_wall_cleaning_waste_time: 0 + nmp_pump_cleaning_suction_count: 0 + nmp_stirrer_cleaning_count: 0 + nmp_stirrer_cleaning_injection: 0.0 + nmp_stirrer_cleaning_wait_time: 0 + nmp_stirrer_cleaning_waste_time: 0 + pipe_blowing_time: 0 + water_inner_wall_cleaning_count: 0 + water_inner_wall_cleaning_injection: 0.0 + water_inner_wall_cleaning_waste_time: 0 + water_outer_wall_cleaning_count: 0 + water_outer_wall_cleaning_injection: 0.0 + water_outer_wall_cleaning_wait_time: 0 + water_outer_wall_cleaning_waste_time: 0 + water_pump_cleaning_suction_count: 0 + water_stirrer_cleaning_count: 0 + water_stirrer_cleaning_injection: 0.0 + water_stirrer_cleaning_wait_time: 0 + water_stirrer_cleaning_waste_time: 0 + handles: {} + result: + return_info: return_info + schema: + description: '' + properties: + feedback: + properties: {} + required: [] + title: PostProcessTriggerClean_Feedback + type: object + goal: + properties: + acetone_inner_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_inner_wall_cleaning_injection: + type: number + acetone_inner_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_outer_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_outer_wall_cleaning_injection: + type: number + acetone_outer_wall_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_outer_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_pump_cleaning_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_stirrer_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_stirrer_cleaning_injection: + type: number + acetone_stirrer_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + acetone_stirrer_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + filtration_liquid_selection: + maximum: 2147483647 + minimum: -2147483648 + type: integer + injection_pump_forward_empty_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + injection_pump_reverse_empty_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_inner_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_inner_wall_cleaning_injection: + type: number + nmp_inner_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_outer_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_outer_wall_cleaning_injection: + type: number + nmp_outer_wall_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_outer_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_pump_cleaning_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_stirrer_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_stirrer_cleaning_injection: + type: number + nmp_stirrer_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + nmp_stirrer_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + pipe_blowing_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_inner_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_inner_wall_cleaning_injection: + type: number + water_inner_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_outer_wall_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_outer_wall_cleaning_injection: + type: number + water_outer_wall_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_outer_wall_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_pump_cleaning_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_stirrer_cleaning_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_stirrer_cleaning_injection: + type: number + water_stirrer_cleaning_wait_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + water_stirrer_cleaning_waste_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + required: + - nmp_outer_wall_cleaning_injection + - nmp_outer_wall_cleaning_count + - nmp_outer_wall_cleaning_wait_time + - nmp_outer_wall_cleaning_waste_time + - nmp_inner_wall_cleaning_injection + - nmp_inner_wall_cleaning_count + - nmp_pump_cleaning_suction_count + - nmp_inner_wall_cleaning_waste_time + - nmp_stirrer_cleaning_injection + - nmp_stirrer_cleaning_count + - nmp_stirrer_cleaning_wait_time + - nmp_stirrer_cleaning_waste_time + - water_outer_wall_cleaning_injection + - water_outer_wall_cleaning_count + - water_outer_wall_cleaning_wait_time + - water_outer_wall_cleaning_waste_time + - water_inner_wall_cleaning_injection + - water_inner_wall_cleaning_count + - water_pump_cleaning_suction_count + - water_inner_wall_cleaning_waste_time + - water_stirrer_cleaning_injection + - water_stirrer_cleaning_count + - water_stirrer_cleaning_wait_time + - water_stirrer_cleaning_waste_time + - acetone_outer_wall_cleaning_injection + - acetone_outer_wall_cleaning_count + - acetone_outer_wall_cleaning_wait_time + - acetone_outer_wall_cleaning_waste_time + - acetone_inner_wall_cleaning_injection + - acetone_inner_wall_cleaning_count + - acetone_pump_cleaning_suction_count + - acetone_inner_wall_cleaning_waste_time + - acetone_stirrer_cleaning_injection + - acetone_stirrer_cleaning_count + - acetone_stirrer_cleaning_wait_time + - acetone_stirrer_cleaning_waste_time + - pipe_blowing_time + - injection_pump_forward_empty_suction_count + - injection_pump_reverse_empty_suction_count + - filtration_liquid_selection + title: PostProcessTriggerClean_Goal + type: object + result: + properties: + return_info: + type: string + required: + - return_info + title: PostProcessTriggerClean_Result + type: object + required: + - goal + title: PostProcessTriggerClean + type: object + type: PostProcessTriggerClean + trigger_grab_action: + feedback: {} + goal: + raw_tank_number: raw_tank_number + reaction_tank_number: reaction_tank_number + goal_default: + raw_tank_number: 0 + reaction_tank_number: 0 + handles: {} + result: + return_info: return_info + schema: + description: '' + properties: + feedback: + properties: {} + required: [] + title: PostProcessGrab_Feedback + type: object + goal: + properties: + raw_tank_number: + maximum: 2147483647 + minimum: -2147483648 + type: integer + reaction_tank_number: + maximum: 2147483647 + minimum: -2147483648 + type: integer + required: + - reaction_tank_number + - raw_tank_number + title: PostProcessGrab_Goal + type: object + result: + properties: + return_info: + type: string + required: + - return_info + title: PostProcessGrab_Result + type: object + required: + - goal + title: PostProcessGrab + type: object + type: PostProcessGrab + trigger_post_processing: + feedback: {} + goal: + atomization_fast_speed: atomization_fast_speed + atomization_pressure_kpa: atomization_pressure_kpa + first_powder_mixing_tim: first_powder_mixing_tim + first_powder_wash_count: first_powder_wash_count + first_wash_water_amount: first_wash_water_amount + initial_water_amount: initial_water_amount + injection_pump_push_speed: injection_pump_push_speed + injection_pump_suction_speed: injection_pump_suction_speed + pre_filtration_mixing_time: pre_filtration_mixing_time + raw_liquid_suction_count: raw_liquid_suction_count + second_powder_mixing_time: second_powder_mixing_time + second_powder_wash_count: second_powder_wash_count + second_wash_water_amount: second_wash_water_amount + wash_slow_speed: wash_slow_speed + goal_default: + atomization_fast_speed: 0.0 + atomization_pressure_kpa: 0 + first_powder_mixing_tim: 0 + first_powder_wash_count: 0 + first_wash_water_amount: 0.0 + initial_water_amount: 0.0 + injection_pump_push_speed: 0 + injection_pump_suction_speed: 0 + pre_filtration_mixing_time: 0 + raw_liquid_suction_count: 0 + second_powder_mixing_time: 0 + second_powder_wash_count: 0 + second_wash_water_amount: 0.0 + wash_slow_speed: 0.0 + handles: {} + result: + return_info: return_info + schema: + description: '' + properties: + feedback: + properties: {} + required: [] + title: PostProcessTriggerPostPro_Feedback + type: object + goal: + properties: + atomization_fast_speed: + type: number + atomization_pressure_kpa: + maximum: 2147483647 + minimum: -2147483648 + type: integer + first_powder_mixing_tim: + maximum: 2147483647 + minimum: -2147483648 + type: integer + first_powder_wash_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + first_wash_water_amount: + type: number + initial_water_amount: + type: number + injection_pump_push_speed: + maximum: 2147483647 + minimum: -2147483648 + type: integer + injection_pump_suction_speed: + maximum: 2147483647 + minimum: -2147483648 + type: integer + pre_filtration_mixing_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + raw_liquid_suction_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + second_powder_mixing_time: + maximum: 2147483647 + minimum: -2147483648 + type: integer + second_powder_wash_count: + maximum: 2147483647 + minimum: -2147483648 + type: integer + second_wash_water_amount: + type: number + wash_slow_speed: + type: number + required: + - atomization_fast_speed + - wash_slow_speed + - injection_pump_suction_speed + - injection_pump_push_speed + - raw_liquid_suction_count + - first_wash_water_amount + - second_wash_water_amount + - first_powder_mixing_tim + - second_powder_mixing_time + - first_powder_wash_count + - second_powder_wash_count + - initial_water_amount + - pre_filtration_mixing_time + - atomization_pressure_kpa + title: PostProcessTriggerPostPro_Goal + type: object + result: + properties: + return_info: + type: string + required: + - return_info + title: PostProcessTriggerPostPro_Result + type: object + required: + - goal + title: PostProcessTriggerPostPro + type: object + type: PostProcessTriggerPostPro + write_node: + feedback: + result: result + goal: + command: json_input + goal_default: + command: '' + handles: {} + result: + success: success + schema: + description: '' + properties: + feedback: + properties: + status: + type: string + required: + - status + title: SendCmd_Feedback + type: object + goal: + properties: + command: + type: string + required: + - command + title: SendCmd_Goal + type: object + result: + properties: + return_info: + type: string + success: + type: boolean + required: + - return_info + - success + title: SendCmd_Result + type: object + required: + - goal + title: SendCmd + type: object + type: SendCmd + module: unilabos.devices.workstation.post_process.post_process:OpcUaClient + status_types: + acetone_tank_empty_alarm: Bool + atomization_fast_speed: Float64 + atomization_pressure_kpa: Int32 + cleaning_complete: Bool + device_ready: Bool + door_open_alarm: Bool + grab_complete: Bool + grab_trigger: Bool + injection_pump_push_speed: Int32 + injection_pump_suction_speed: Int32 + nmp_tank_empty_alarm: Bool + post_process_complete: Bool + post_process_trigger: Bool + raw_tank_number: Int32 + reaction_tank_number: Int32 + remote_mode: Bool + wash_slow_speed: Float64 + waste_tank_full_alarm: Bool + water_tank_empty_alarm: Bool + type: python + config_info: [] + description: 后处理站 + handles: [] + icon: post_process_station.webp + init_param_schema: {} + version: 1.0.0 diff --git a/unilabos/registry/resources/post_process/bottle_carriers.yaml b/unilabos/registry/resources/post_process/bottle_carriers.yaml new file mode 100644 index 00000000..df0391a1 --- /dev/null +++ b/unilabos/registry/resources/post_process/bottle_carriers.yaml @@ -0,0 +1,25 @@ +POST_PROCESS_Raw_1BottleCarrier: + category: + - bottle_carriers + class: + module: unilabos.devices.workstation.post_process.bottle_carriers:POST_PROCESS_Raw_1BottleCarrier + type: pylabrobot + description: POST_PROCESS_Raw_1BottleCarrier + handles: [] + icon: '' + init_param_schema: {} + registry_type: resource + version: 1.0.0 + +POST_PROCESS_Reaction_1BottleCarrier: + category: + - bottle_carriers + class: + module: unilabos.devices.workstation.post_process.bottle_carriers:POST_PROCESS_Reaction_1BottleCarrier + type: pylabrobot + description: POST_PROCESS_Reaction_1BottleCarrier + handles: [] + icon: '' + init_param_schema: {} + registry_type: resource + version: 1.0.0 diff --git a/unilabos/registry/resources/post_process/bottles.yaml b/unilabos/registry/resources/post_process/bottles.yaml new file mode 100644 index 00000000..4243a216 --- /dev/null +++ b/unilabos/registry/resources/post_process/bottles.yaml @@ -0,0 +1,11 @@ +POST_PROCESS_PolymerStation_Reagent_Bottle: + category: + - bottles + class: + module: unilabos.devices.workstation.post_process.bottles:POST_PROCESS_PolymerStation_Reagent_Bottle + type: pylabrobot + handles: [] + icon: '' + init_param_schema: {} + version: 1.0.0 + diff --git a/unilabos/registry/resources/post_process/deck.yaml b/unilabos/registry/resources/post_process/deck.yaml new file mode 100644 index 00000000..7c1f49de --- /dev/null +++ b/unilabos/registry/resources/post_process/deck.yaml @@ -0,0 +1,12 @@ +post_process_deck: + category: + - post_process_deck + class: + module: unilabos.devices.workstation.post_process.decks:post_process_deck + type: pylabrobot + description: post_process_deck + handles: [] + icon: '' + init_param_schema: {} + registry_type: resource + version: 1.0.0