|
| 1 | +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | + |
| 7 | +import logging |
| 8 | +from contextlib import contextmanager |
| 9 | +from typing import Callable, TypeVar |
| 10 | + |
| 11 | +import torch |
| 12 | +from torch.distributed.tensor import DTensor |
| 13 | +from torch.distributed.tensor._op_schema import ( |
| 14 | + OpSchema, |
| 15 | + OutputSharding, |
| 16 | + RuntimeSchemaInfo, |
| 17 | + StrategyType, |
| 18 | +) |
| 19 | +from typing_extensions import ParamSpec |
| 20 | + |
| 21 | +logger = logging.getLogger(__name__) |
| 22 | + |
| 23 | +aten = torch.ops.aten |
| 24 | + |
| 25 | +_T = TypeVar("_T") |
| 26 | +_P = ParamSpec("_P") |
| 27 | + |
| 28 | + |
| 29 | +# -------------define universal op strategy------------- |
| 30 | +replicate_op_strategy = torch.distributed.tensor._ops.utils.replicate_op_strategy |
| 31 | + |
| 32 | + |
| 33 | +class StrategyPool: |
| 34 | + def __init__(self) -> None: |
| 35 | + # reference to existing strategy from the DTensor upstream |
| 36 | + self.op_strategy_funcs: dict[ |
| 37 | + torch._ops.OpOverload, Callable[[OpSchema], StrategyType] |
| 38 | + ] = DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs |
| 39 | + # reference to existing rules |
| 40 | + self.op_to_rules: dict[ |
| 41 | + torch._ops.OpOverload, Callable[[OpSchema], OutputSharding] |
| 42 | + ] = DTensor._op_dispatcher.sharding_propagator.op_to_rules |
| 43 | + # we probably don't need to care about existing op_to_schema_info for AP |
| 44 | + self.op_to_schema_info = ( |
| 45 | + DTensor._op_dispatcher.sharding_propagator.op_to_schema_info |
| 46 | + ) |
| 47 | + |
| 48 | + self.enable_implicit_replication: bool = False |
| 49 | + self.implicit_strategy_op_tracker: list[torch._ops.OpOverload] = [] |
| 50 | + |
| 51 | + def get_op_strategy( |
| 52 | + self, op: torch._ops.OpOverload, op_schema: OpSchema |
| 53 | + ) -> StrategyType: |
| 54 | + if op not in self.op_strategy_funcs: |
| 55 | + if not self.enable_implicit_replication: |
| 56 | + raise NotImplementedError( |
| 57 | + f"Operator {op} does not have a sharding strategy registered." |
| 58 | + ) |
| 59 | + else: |
| 60 | + self.implicit_strategy_op_tracker.append(op) |
| 61 | + logger.warning( |
| 62 | + f"implicitly register sharding strategy op {op.name()} using {replicate_op_strategy.__name__}" |
| 63 | + ) |
| 64 | + self.register_op_strategy(op)(replicate_op_strategy) |
| 65 | + return self.op_strategy_funcs[op](op_schema) |
| 66 | + |
| 67 | + def register_op_strategy( |
| 68 | + self, |
| 69 | + op: torch._ops.OpOverload, |
| 70 | + schema_info=RuntimeSchemaInfo(needs_pytree=True), |
| 71 | + ) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: |
| 72 | + # pyre-fixme[2]: Parameter must be annotated. |
| 73 | + # always enable pytree as dispatching overhead is not a concern in AP. |
| 74 | + def wrapper(impl): |
| 75 | + if isinstance(op, list): |
| 76 | + overloads = op |
| 77 | + else: |
| 78 | + overloads = [op] |
| 79 | + |
| 80 | + for overload in overloads: |
| 81 | + self.op_strategy_funcs[overload] = impl |
| 82 | + self.op_to_schema_info[overload] = schema_info |
| 83 | + return impl |
| 84 | + |
| 85 | + return wrapper |
| 86 | + |
| 87 | + @contextmanager |
| 88 | + def replicate_for_unsupported_operators(self): |
| 89 | + """ |
| 90 | + Context manager for setting and clearing implicit strategy. |
| 91 | + """ |
| 92 | + try: |
| 93 | + if self.enable_implicit_replication: |
| 94 | + raise RuntimeError( |
| 95 | + "Implicit strategy is already enabled. Cannot enable it again." |
| 96 | + ) |
| 97 | + self.enable_implicit_replication = True |
| 98 | + yield |
| 99 | + finally: |
| 100 | + self.enable_implicit_replication = False |
| 101 | + op_to_remove = self.implicit_strategy_op_tracker |
| 102 | + for op_overload in op_to_remove: |
| 103 | + if op_overload in self.op_strategy_funcs: |
| 104 | + del self.op_strategy_funcs[op_overload] |
| 105 | + if op_overload in self.op_to_schema_info: |
| 106 | + del self.op_to_schema_info[op_overload] |
| 107 | + self.implicit_strategy_op_tracker.clear() |
| 108 | + |
| 109 | + # TODO: automatic generate redistribute cost for strategies. There exists a |
| 110 | + # `fill_missing_redistribute_cost` in autoparallel/utils.py, which is a hack |
| 111 | + # to generate redistribute cost given input specs, and only tested on |
| 112 | + # certain ops. We can potentially make an improvement. |
| 113 | + def fill_missing_redistribute_cost( |
| 114 | + self, op: torch._ops.OpOverload, op_schema: OpSchema |
| 115 | + ): |
| 116 | + """ |
| 117 | + Fill missing redistribute cost for strategies. |
| 118 | + """ |
| 119 | + ... |
0 commit comments