Skip to content

Commit

Permalink
[pytorch] deprecate static dispatch (pytorch#43564)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#43564

Static dispatch was originally introduced for mobile selective build.

Since we have added selective build support for dynamic dispatch and
tested it in FB production for months, we can deprecate static dispatch
to reduce the complexity of the codebase.

Test Plan: Imported from OSS

Reviewed By: ezyang

Differential Revision: D23324452

Pulled By: ljk53

fbshipit-source-id: d2970257616a8c6337f90249076fca1ae93090c7
  • Loading branch information
ljk53 authored and facebook-github-bot committed Aug 27, 2020
1 parent 3afd24d commit 3a0e35c
Show file tree
Hide file tree
Showing 21 changed files with 8 additions and 198 deletions.
5 changes: 0 additions & 5 deletions .circleci/cimodel/data/simple/mobile_definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,6 @@ def gen_tree(self):
[DOCKER_REQUIREMENT_ASAN],
["build"]
),
MobileJob(
DOCKER_IMAGE_ASAN,
[DOCKER_REQUIREMENT_ASAN],
["custom", "build", "static"]
),

# Use LLVM-DEV toolchain in android-ndk-r19c docker image
MobileJob(
Expand Down
7 changes: 0 additions & 7 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6652,13 +6652,6 @@ workflows:
name: pytorch_linux_xenial_py3_clang5_mobile_build
requires:
- docker-pytorch-linux-xenial-py3-clang5-asan
- pytorch_linux_build:
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-static
build_only: "1"
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan
name: pytorch_linux_xenial_py3_clang5_mobile_custom_build_static
requires:
- docker-pytorch-linux-xenial-py3-clang5-asan
- pytorch_linux_build:
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-dynamic
build_only: "1"
Expand Down
4 changes: 1 addition & 3 deletions .jenkins/pytorch/build-mobile.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@ retry pip install --pre torch torchvision \

# Run end-to-end process of building mobile library, linking into the predictor
# binary, and running forward pass with a real model.
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
echo "LLVM_DIR: ${LLVM_DIR}"
TEST_CUSTOM_BUILD_DYNAMIC=1 test/mobile/custom_build/build.sh
Expand Down
1 change: 0 additions & 1 deletion BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ header_template_rule(
substitutions = {
"cmakedefine": "define",
"#define FEATURE_TORCH_MOBILE": "/* #undef FEATURE_TORCH_MOBILE */",
"#define USE_STATIC_DISPATCH": "/* #undef USE_STATIC_DISPATCH */",
"#define C10_USE_NUMA": "/* #undef C10_USE_NUMA */",
},
)
Expand Down
1 change: 0 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,6 @@ option(BUILD_PYTHON "Build Python binaries" ON)
option(BUILD_CAFFE2_OPS "Build Caffe2 operators" ON)
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
option(BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" OFF)
option(USE_STATIC_DISPATCH "Use static dispatch for ATen operators" OFF)
cmake_dependent_option(
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
"BUILD_SHARED_LIBS AND BUILD_CUSTOM_PROTOBUF" OFF)
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/autocast_mode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,6 @@ Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const c10::op
"safe to autocast.");
}


#ifndef USE_STATIC_DISPATCH
namespace {
/*****************************************************************************************************************
This section performs load-time registration for autocast wrappers.
Expand Down Expand Up @@ -378,7 +376,6 @@ TORCH_LIBRARY_IMPL(aten, Autocast, m) {
}

}
#endif

} // namespace autocast
} // namespace at
2 changes: 0 additions & 2 deletions aten/src/ATen/cudnn/AutocastRNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,12 @@ _cudnn_rnn_cast_reflatten(const Tensor & input,
#endif // AT_CUDNN_ENABLED()
}

#ifndef USE_STATIC_DISPATCH
namespace {
TORCH_LIBRARY_IMPL(aten, Autocast, m) {
m.impl("_cudnn_rnn",
TORCH_FN((&at::autocast::_cudnn_rnn_cast_reflatten)));
}
} // anonymous namespace
#endif

} // namespace autocast
} // namespace at
110 changes: 3 additions & 107 deletions aten/src/ATen/function_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,10 @@ def TypedDict(name, attrs, total=True): # type: ignore
// ${schema_string}
${return_type} Tensor::${api_name}(${method_formals}) const {
#ifdef USE_STATIC_DISPATCH
${static_dispatch_method_body}
#else
static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
.typed<${tensor_method_cpp_signature}>();
return op.call(${tensor_method_actuals});
#endif
}
""")

Expand All @@ -172,45 +168,13 @@ def TypedDict(name, attrs, total=True): # type: ignore
// ${schema_string}
${return_type} ${api_name}(${formals}) {
#ifdef USE_STATIC_DISPATCH
${static_dispatch_function_body}
#else
static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
.typed<${function_cpp_signature}>();
return op.call(${function_actuals});
#endif
}
""")

# In order to rely on the linker to strip unused ops, it requires us to dispatch statically
# in Functions.h and TensorMethods.cpp.
#
# NB: The default body also needs to apply a variable guard, as in some
# situations what we think is a default body actually does have an
# explicit derivative, and thereby would have gotten unwrapped by
# the time you get to the implementation.
STATIC_DISPATCH_FUNCTION_DEFAULT_BODY = CodeTemplate("""\
at::AutoNonVariableTypeMode _var_guard(true);
${return_call} TypeDefault::${type_wrapper_name}(${actuals});
""")

STATIC_DISPATCH_FUNCTION_SWITCH_BODY = CodeTemplate("""\
at::AutoNonVariableTypeMode _var_guard(true);
${dispatch_key_init}
switch (dispatchKeyToBackend(${dispatch_key_var_name})) {
${static_dispatch_function_cases}
default:
AT_ERROR("${api_name} not implemented for ", at::toString(${dispatch_key_var_name}));
}
""")

STATIC_DISPATCH_FUNCTION_SWITCH_CASE = CodeTemplate("""\
case Backend::${backend}:
${return_call} ${backend}Type::${type_wrapper_name}(${actuals});
break;
""")

IFDEF_BLOCK = CodeTemplate("""\
#ifdef ${ifdef_guard}
${content}
Expand Down Expand Up @@ -246,10 +210,6 @@ def TypedDict(name, attrs, total=True): # type: ignore
('ComplexDouble', 'ComplexDouble', 'ComplexDouble', False),
]

static_dispatch_backends = ['CPU', 'QuantizedCPU', 'Vulkan']
static_dispatch_backends_ifdef_guard = {'Vulkan' : 'USE_VULKAN'}


class NYIError(Exception):
"""Indicates we don't support this declaration yet"""

Expand Down Expand Up @@ -1136,44 +1096,6 @@ def swizzle_self(f): # blegh

method_actuals = maybe_unwrap_optional_tensors(option, formals, option['method_actuals'])

if isinstance(type_method_dispatch, dict):
static_dispatch_function_cases = []
# NB: As this code is currently written, there will NEVER be
# a backend generated for variable dispatch. There is nothing
# stopping us from actually implementing this, however, if you
# really wanted variable on mobile, there's nothing stopping
# you from implementing this (however, you would have an
# annoying phase problem, since code generation for variable
# happens in tools/ which happens later than here.)
#
# If you pass in a variable to the dispatch, and variable is
# enabled, this switch will fail. This is intentional: you
# probably need to disable variable globally in the mobile
# calling code.
for backend in static_dispatch_backends:
if backend in type_method_dispatch:
static_dispatch_function_case = STATIC_DISPATCH_FUNCTION_SWITCH_CASE.substitute(
option,
backend=backend,
backend_function=type_method_dispatch[backend],
actuals=method_actuals)
if (backend in static_dispatch_backends_ifdef_guard):
static_dispatch_function_cases.append(IFDEF_BLOCK.substitute(
option,
ifdef_guard=static_dispatch_backends_ifdef_guard[backend],
content=static_dispatch_function_case))
else:
static_dispatch_function_cases.append(static_dispatch_function_case)

static_dispatch_method_body = STATIC_DISPATCH_FUNCTION_SWITCH_BODY.substitute(
option,
dispatch_key_var_name=dispatch_key_var_name,
dispatch_key_init=dispatch_key_init,
static_dispatch_function_cases=static_dispatch_function_cases)
else:
static_dispatch_method_body = STATIC_DISPATCH_FUNCTION_DEFAULT_BODY.substitute(
option, actuals=method_actuals)

# See NOTE[UnboxedOnly]
if option['use_c10_dispatcher'] == 'full':
tensor_method_actuals = option['schema_order_method_actuals']
Expand All @@ -1184,13 +1106,12 @@ def swizzle_self(f): # blegh
tensor_method_cpp_signature = option['cpp_signature']

method_definition = TENSOR_METHOD_DEFINITION.substitute(
option, static_dispatch_method_body=static_dispatch_method_body,
option,
tensor_method_actuals=tensor_method_actuals,
tensor_method_cpp_signature=tensor_method_cpp_signature
)
return FunctionCode(
declaration=TENSOR_METHOD_DECLARATION.substitute(
option, static_dispatch_method_body=static_dispatch_method_body),
declaration=TENSOR_METHOD_DECLARATION.substitute(option),
definition=method_definition)

def gen_namespace_function(option, multidispatch_formals):
Expand All @@ -1204,31 +1125,6 @@ def gen_namespace_function(option, multidispatch_formals):

actuals = maybe_unwrap_optional_tensors(option, formals, option['actuals'])

if isinstance(type_method_dispatch, dict):
static_dispatch_function_cases = []
for backend in static_dispatch_backends:
if backend in type_method_dispatch:
static_dispatch_function_case = STATIC_DISPATCH_FUNCTION_SWITCH_CASE.substitute(
option,
backend=backend,
backend_function=type_method_dispatch[backend],
actuals=actuals)
if (backend in static_dispatch_backends_ifdef_guard):
static_dispatch_function_cases.append(IFDEF_BLOCK.substitute(
option,
ifdef_guard=static_dispatch_backends_ifdef_guard[backend],
content=static_dispatch_function_case))
else:
static_dispatch_function_cases.append(static_dispatch_function_case)
static_dispatch_function_body = STATIC_DISPATCH_FUNCTION_SWITCH_BODY.substitute(
option,
dispatch_key_var_name=dispatch_key_var_name,
dispatch_key_init=dispatch_key_init,
static_dispatch_function_cases=static_dispatch_function_cases)
else:
static_dispatch_function_body = STATIC_DISPATCH_FUNCTION_DEFAULT_BODY.substitute(
option, actuals=actuals)

# See NOTE[UnboxedOnly]
if option['use_c10_dispatcher'] == 'full':
function_actuals = option['schema_order_actuals']
Expand All @@ -1239,7 +1135,7 @@ def gen_namespace_function(option, multidispatch_formals):
function_cpp_signature = option['cpp_signature']

fn_definition = FUNCTION_DEFINITION.substitute(
option, static_dispatch_function_body=static_dispatch_function_body,
option,
function_actuals=function_actuals,
function_cpp_signature=function_cpp_signature)

Expand Down
6 changes: 0 additions & 6 deletions aten/src/ATen/native/TensorProperties.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,20 +53,14 @@ bool cudnn_is_acceptable(const Tensor& self) {
}

Tensor detach(const Tensor& self) {
#ifndef USE_STATIC_DISPATCH
// this just exists to give us a hook in VariableType and an entry in Declarations.yaml
//AT_ERROR("detach is not implemented for Tensor");
#endif
// this is no-op for USE_STATIC_DISPATCH mode
return self;
}

Tensor & detach_(Tensor & self) {
#ifndef USE_STATIC_DISPATCH
// this just exists to give us a hook in VariableType and an entry in Declarations.yaml
//AT_ERROR("detach_ is not implemented for Tensor");
#endif
// this is no-op for USE_STATIC_DISPATCH mode
return self;
}

Expand Down
9 changes: 0 additions & 9 deletions aten/src/ATen/templates/TensorMethods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,6 @@
#include <ATen/quantized/Quantizer.h>
#include <torch/csrc/WindowsTorchApiMacro.h>

#ifdef USE_STATIC_DISPATCH
#include <ATen/TypeDefault.h>
#include <ATen/CPUType.h>
#include <ATen/QuantizedCPUType.h>
#ifdef USE_VULKAN
#include <ATen/VulkanType.h>
#endif
#endif

namespace at {

Tensor Tensor::cpu() const {
Expand Down
5 changes: 0 additions & 5 deletions c10/macros/cmake_macros.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,4 @@
// to converging libtorch and caffe2 mobile builds and removing it eventually.
#cmakedefine FEATURE_TORCH_MOBILE

// If defined it will use static dispatch for ATen operators.
// Should expose this macro for projects including ATen headers to inherient
// the same option.
#cmakedefine USE_STATIC_DISPATCH

#endif // C10_MACROS_CMAKE_MACROS_H_
1 change: 0 additions & 1 deletion caffe2/core/macros.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -79,5 +79,4 @@ static_assert(
{"USE_MKLDNN", "${CAFFE2_USE_MKLDNN}"}, \
{"USE_NVTX", "${CAFFE2_USE_NVTX}"}, \
{"USE_TRT", "${CAFFE2_USE_TRT}"}, \
{"USE_STATIC_DISPATCH", "${USE_STATIC_DISPATCH}"}, \
}
2 changes: 1 addition & 1 deletion cmake/Codegen.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ if(INTERN_BUILD_ATEN_OPS)
endif()

if(SELECTED_OP_LIST)
if(NOT USE_STATIC_DISPATCH AND NOT OP_DEPENDENCY)
if(NOT OP_DEPENDENCY)
message(INFO "Use default op dependency graph .yaml file for custom build with dynamic dispatch.")
set(OP_DEPENDENCY ${CMAKE_CURRENT_LIST_DIR}/../tools/code_analyzer/default_op_deps.yaml)
endif()
Expand Down
1 change: 0 additions & 1 deletion cmake/Summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ function(caffe2_print_configuration_summary)
message(STATUS " TORCH_VERSION : ${TORCH_VERSION}")
message(STATUS " CAFFE2_VERSION : ${CAFFE2_VERSION}")
message(STATUS " BUILD_CAFFE2_MOBILE : ${BUILD_CAFFE2_MOBILE}")
message(STATUS " USE_STATIC_DISPATCH : ${USE_STATIC_DISPATCH}")
message(STATUS " BUILD_BINARY : ${BUILD_BINARY}")
message(STATUS " BUILD_CUSTOM_PROTOBUF : ${BUILD_CUSTOM_PROTOBUF}")
if(${CAFFE2_LINK_LOCAL_PROTOBUF})
Expand Down
1 change: 0 additions & 1 deletion scripts/build_android.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ CMAKE_ARGS=()

if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
# Build PyTorch mobile
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
Expand Down
3 changes: 1 addition & 2 deletions scripts/build_ios.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ CMAKE_ARGS=()

if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
# Build PyTorch mobile
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
Expand Down Expand Up @@ -62,7 +61,7 @@ fi

# IOS_PLATFORM controls type of iOS platform (see ios-cmake)
if [ -n "${IOS_PLATFORM:-}" ]; then
CMAKE_ARGS+=("-DIOS_PLATFORM=${IOS_PLATFORM}")
CMAKE_ARGS+=("-DIOS_PLATFORM=${IOS_PLATFORM}")
if [ "${IOS_PLATFORM}" == "WATCHOS" ]; then
# enable bitcode by default for watchos
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")
Expand Down
1 change: 0 additions & 1 deletion scripts/build_mobile.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ echo "Bash: $(/bin/bash --version | head -1)"
echo "Caffe2 path: $CAFFE2_ROOT"

CMAKE_ARGS=()
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
Expand Down
Loading

0 comments on commit 3a0e35c

Please sign in to comment.