|
| 1 | +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import logging |
| 7 | +from contextlib import ExitStack, contextmanager |
| 8 | + |
| 9 | +import torch |
| 10 | +from torch.distributed.tensor import DTensor |
| 11 | +from torch.distributed.tensor._op_schema import OpSchema, StrategyType |
| 12 | +from torch.distributed.tensor._ops.utils import register_op_strategy |
| 13 | + |
| 14 | +logger = logging.getLogger(__name__) |
| 15 | + |
| 16 | +aten = torch.ops.aten |
| 17 | + |
| 18 | +# reference to existing sharding_propagator DTensor upstream |
| 19 | +propagator = DTensor._op_dispatcher.sharding_propagator |
| 20 | + |
| 21 | +enable_implicit_replication = False |
| 22 | +_current_stack = None |
| 23 | + |
| 24 | +replicate_op_strategy = torch.distributed.tensor._ops.utils.replicate_op_strategy |
| 25 | + |
| 26 | + |
| 27 | +# TODO: remove and refer to |
| 28 | +# https://github.com/pytorch/pytorch/blob/9c107606629de6383f55e3b48b42e594d23407b1/test/distributed/tensor/test_op_strategy.py#L446 |
| 29 | +# once the function is moved outside of the test folder in upstream |
| 30 | +@contextmanager |
| 31 | +def op_strategy_context(op_overload, strategy_func, schema_info=None): |
| 32 | + """ |
| 33 | + Context manager for setting and clearing op strategies. |
| 34 | + Args: |
| 35 | + op_overload: The operator overload to set or clear the strategy for. |
| 36 | + strategy_func: The strategy function to set for the operator overload. |
| 37 | + schema_info: Optional schema information for the operator overload. |
| 38 | + Yields: |
| 39 | + None |
| 40 | + """ |
| 41 | + propagator = DTensor._op_dispatcher.sharding_propagator |
| 42 | + try: |
| 43 | + # register the op strategy |
| 44 | + register_op_strategy(op_overload, schema_info=schema_info)(strategy_func) |
| 45 | + yield |
| 46 | + finally: |
| 47 | + # clear this op strategy cache |
| 48 | + if op_overload in propagator.op_strategy_funcs: |
| 49 | + del propagator.op_strategy_funcs[op_overload] |
| 50 | + if op_overload in propagator.op_to_schema_info: |
| 51 | + del propagator.op_to_schema_info[op_overload] |
| 52 | + propagator.propagate_op_sharding.cache.cache_clear() |
| 53 | + |
| 54 | + |
| 55 | +def get_op_strategy(op: torch._ops.OpOverload, op_schema: OpSchema) -> StrategyType: |
| 56 | + global enable_implicit_replication, _current_stack |
| 57 | + |
| 58 | + if op not in propagator.op_strategy_funcs: |
| 59 | + if not enable_implicit_replication: |
| 60 | + raise NotImplementedError( |
| 61 | + f"Operator {op} does not have a sharding strategy registered." |
| 62 | + ) |
| 63 | + else: |
| 64 | + # Use the current stack if available |
| 65 | + if _current_stack is not None: |
| 66 | + _current_stack.enter_context( |
| 67 | + op_strategy_context(op, replicate_op_strategy) |
| 68 | + ) |
| 69 | + else: |
| 70 | + # No stack available, just register permanently |
| 71 | + register_op_strategy(op)(replicate_op_strategy) |
| 72 | + logger.warning( |
| 73 | + f"implicitly registering `{op}` with `{replicate_op_strategy.__name__}`" |
| 74 | + ) |
| 75 | + return propagator.op_strategy_funcs[op](op_schema) |
| 76 | + |
| 77 | + |
| 78 | +@contextmanager |
| 79 | +def with_implicit_strategies(): |
| 80 | + """Context manager to enable implicit replication and clean up strategies.""" |
| 81 | + global enable_implicit_replication, _current_stack |
| 82 | + |
| 83 | + # Create a fresh ExitStack for this context |
| 84 | + with ExitStack() as local_stack: |
| 85 | + # Store the stack as a global variable |
| 86 | + old_stack = _current_stack |
| 87 | + _current_stack = local_stack |
| 88 | + |
| 89 | + # Enable implicit replication |
| 90 | + old_value = enable_implicit_replication |
| 91 | + enable_implicit_replication = True |
| 92 | + try: |
| 93 | + yield |
| 94 | + finally: |
| 95 | + # Restore the original values |
| 96 | + _current_stack = old_stack |
| 97 | + enable_implicit_replication = old_value |
| 98 | + |
| 99 | + |
| 100 | +# TODO: automatic generate redistribute cost for strategies. There exists a |
| 101 | +# `fill_missing_redistribute_cost` in autoparallel/utils.py, which is a hack |
| 102 | +# to generate redistribute cost given input specs, and only tested on |
| 103 | +# certain ops. We can potentially make an improvement. |
| 104 | +def fill_missing_redistribute_cost(op: torch._ops.OpOverload, op_schema: OpSchema): |
| 105 | + """ |
| 106 | + Fill missing redistribute cost for strategies. |
| 107 | + """ |
| 108 | + ... |
0 commit comments