diff --git a/aten/src/ATen/FunctionalTensorWrapper.cpp b/aten/src/ATen/FunctionalTensorWrapper.cpp index 762321c76d4c38..d691650884f562 100644 --- a/aten/src/ATen/FunctionalTensorWrapper.cpp +++ b/aten/src/ATen/FunctionalTensorWrapper.cpp @@ -484,8 +484,8 @@ c10::optional to_functional_tensor(const c10::optional& tensor) } return c10::nullopt; } -c10::List> to_functional_tensor(const c10::List>& t_list) { - c10::List> outputs; +c10::List<::std::optional> to_functional_tensor(const c10::List<::std::optional>& t_list) { + c10::List<::std::optional> outputs; outputs.reserve(t_list.size()); for (const auto i : c10::irange(t_list.size())) { outputs.push_back(to_functional_tensor(t_list[i])); @@ -536,8 +536,8 @@ std::vector from_functional_tensor(ITensorListRef t_list) { } return outputs; } -c10::List> from_functional_tensor(const c10::List>& t_list) { - c10::List> outputs; +c10::List<::std::optional> from_functional_tensor(const c10::List<::std::optional>& t_list) { + c10::List<::std::optional> outputs; outputs.reserve(t_list.size()); for (const auto i : c10::irange(t_list.size())) { outputs.push_back(from_functional_tensor(t_list[i], /*assert_functional=*/false)); @@ -572,7 +572,7 @@ void sync(ITensorListRef t_list) { sync(t); } } -void sync(const c10::List>& t_list) { +void sync(const c10::List<::std::optional>& t_list) { for (const auto i : c10::irange(t_list.size())) { sync(t_list[i]); } @@ -652,7 +652,7 @@ bool isFunctionalTensor(const c10::optional& t) { } } -bool isFunctionalTensor(const c10::List>& t_list) { +bool isFunctionalTensor(const c10::List<::std::optional>& t_list) { if (t_list.empty()) return false; auto functional_count = 0; for (const auto i : c10::irange(t_list.size())) { diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h index b6bb4710900c55..eb29b4d5ad739f 100644 --- a/aten/src/ATen/TensorIndexing.h +++ b/aten/src/ATen/TensorIndexing.h @@ -317,10 +317,10 @@ static inline void recordTensorIndex( (*dim_ptr)++; }; -static inline c10::List> typeConvertIndices( +static inline c10::List<::std::optional> typeConvertIndices( const Tensor& /*self*/, std::vector&& indices) { - c10::List> converted_inds; + c10::List<::std::optional> converted_inds; converted_inds.reserve(indices.size()); for (auto&& i : std::move(indices)) { converted_inds.push_back(std::move(i)); diff --git a/aten/src/ATen/core/op_registration/op_registration_test.cpp b/aten/src/ATen/core/op_registration/op_registration_test.cpp index a1c9c63052f1df..377cb403cdcfd1 100644 --- a/aten/src/ATen/core/op_registration/op_registration_test.cpp +++ b/aten/src/ATen/core/op_registration/op_registration_test.cpp @@ -1154,15 +1154,15 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { "(int[]? a) -> int[]?"); // Test list of optional (with empty list) - testArgTypes>>::test( - c10::List>(c10::List>({})), [] (const c10::List>& v) {EXPECT_EQ(0, v.size());}, - c10::List>(c10::List>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to>>().size());}, + testArgTypes>>::test( + c10::List<::std::optional>(c10::List<::std::optional>({})), [] (const c10::List<::std::optional>& v) {EXPECT_EQ(0, v.size());}, + c10::List<::std::optional>(c10::List<::std::optional>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to>>().size());}, "(int?[] a) -> int?[]"); // Test list of optional (with values) - testArgTypes>>::test( - c10::List>(c10::List>({3, c10::nullopt, 2})), [] (const c10::List>& v) {expectListEquals>({3, c10::nullopt, 2}, v);}, - c10::List>(c10::List>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals>({3, c10::nullopt, 2}, v.to>>());}, + testArgTypes>>::test( + c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional>& v) {expectListEquals>({3, c10::nullopt, 2}, v);}, + c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals>({3, c10::nullopt, 2}, v.to>>());}, "(int?[] a) -> int?[]"); // dict types @@ -1234,15 +1234,15 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { "(Dict(int, Tensor) a) -> Dict(int, Tensor)"); // weird deeply nested type - using DeeplyNestedType = c10::List>>>>; + using DeeplyNestedType = c10::List>>>>; auto makeDeeplyNestedObject = [] () -> DeeplyNestedType { c10::Dict inner3; inner3.insert(1, "1"); - c10::List>> inner2; + c10::List<::std::optional>> inner2; inner2.push_back(std::move(inner3)); - c10::Dict>>> inner1; + c10::Dict>>> inner1; inner1.insert("key", std::move(inner2)); - c10::List>>>> result; + c10::List>>>> result; result.push_back(inner1); return result; }; diff --git a/aten/src/ATen/templates/RegisterFunctionalization.cpp b/aten/src/ATen/templates/RegisterFunctionalization.cpp index 4eb587ab468d29..fabc12a03fa9f6 100644 --- a/aten/src/ATen/templates/RegisterFunctionalization.cpp +++ b/aten/src/ATen/templates/RegisterFunctionalization.cpp @@ -85,8 +85,8 @@ inline c10::List to_meta(const c10::List& t_list) { return outputs; } -inline c10::List> to_meta(const c10::List>& t_list) { - c10::List> outputs; +inline c10::List<::std::optional> to_meta(const c10::List<::std::optional>& t_list) { + c10::List<::std::optional> outputs; outputs.reserve(t_list.size()); for (const auto i : c10::irange(t_list.size())) { outputs.push_back(to_meta(t_list[i])); diff --git a/caffe2/contrib/aten/aten_op.cc b/caffe2/contrib/aten/aten_op.cc index dba68d21c2dd1c..ac15e9cc6fc208 100644 --- a/caffe2/contrib/aten/aten_op.cc +++ b/caffe2/contrib/aten/aten_op.cc @@ -6,10 +6,10 @@ namespace caffe2 { namespace internal { at::Tensor index_with_uint8_handling( const at::Tensor& self, - const torch::List>& indices) { + const torch::List>& indices) { // Support BC only for the simplest case of mask indexing if (indices.size() == 1) { - c10::optional first = indices[0]; + std::optional first = indices[0]; if (first.has_value() && first->scalar_type() == at::kByte) { TORCH_WARN( diff --git a/caffe2/contrib/aten/aten_op_template.h b/caffe2/contrib/aten/aten_op_template.h index 281913c4911e98..21a90d9d21cfb2 100644 --- a/caffe2/contrib/aten/aten_op_template.h +++ b/caffe2/contrib/aten/aten_op_template.h @@ -22,7 +22,7 @@ using at::Half; // for AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ...) namespace internal { TORCH_API at::Tensor index_with_uint8_handling( const at::Tensor& self, - const torch::List>& indices); + const torch::List>& indices); } template @@ -94,8 +94,8 @@ class ATenOp : public Operator { return results; } - torch::List> peekSliceOptionals(size_t i, size_t len, size_t N) { - torch::List> results; + torch::List> peekSliceOptionals(size_t i, size_t len, size_t N) { + torch::List> results; results.reserve(len); for (size_t ii = i; ii < i + len; ++ii) { results.push_back(peek(ii, N)); diff --git a/caffe2/contrib/aten/gen_op.py b/caffe2/contrib/aten/gen_op.py index 5189af6f2c5369..d1408e762bda6f 100755 --- a/caffe2/contrib/aten/gen_op.py +++ b/caffe2/contrib/aten/gen_op.py @@ -73,7 +73,7 @@ def value_is_tensor_type(v): TENSORLIST_TYPE = [ 'at::TensorList', 'const at::ITensorListRef &', - 'const c10::List> &', + 'const c10::List<::std::optional> &', ] # for each aten type, how do we handle a return value of that type? @@ -298,7 +298,7 @@ def emit_assignments(o, env): env['statements'].append( 'auto {} = peekSlice({}, InputSize() - {}, InputSize());' .format(arg['name'], real_inputs, static_tensor_inputs)) - elif arg['type'] == 'const c10::List> &': + elif arg['type'] == 'const c10::List<::std::optional> &': # NOTE: do not advance real_inputs here. After this we will # switch to indexing the "stack" from the end env['statements'].append( diff --git a/test/test_overrides.py b/test/test_overrides.py index 1a065d99f68ba8..936303e459c3a5 100644 --- a/test/test_overrides.py +++ b/test/test_overrides.py @@ -638,7 +638,7 @@ def _simple_type_parser(func, arg_name, arg_type): return instance_gen() elif arg_type == "TensorList" or arg_type == "ITensorListRef": return [instance_gen(), instance_gen()] - elif arg_type == "c10::List>": + elif arg_type == "c10::List<::std::optional>": return [instance_gen(), instance_gen()] elif arg_type == "IntArrayRef" or arg_type == "SymIntArrayRef": size = arg.get("size", 2) diff --git a/torch/csrc/lazy/core/shape_inference.cpp b/torch/csrc/lazy/core/shape_inference.cpp index c1b3424c8df437..30f55afea2555d 100644 --- a/torch/csrc/lazy/core/shape_inference.cpp +++ b/torch/csrc/lazy/core/shape_inference.cpp @@ -177,21 +177,21 @@ std::vector compute_shape_abs(const at::Tensor& self) { std::vector compute_shape_bernoulli( const at::Tensor& self, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_bernoulli( const at::Tensor& self, double p, - c10::optional generator) { + ::std::optional generator) { return compute_shape_bernoulli(self, generator); } std::vector compute_shape_binary_cross_entropy( const at::Tensor& self, const at::Tensor& target, - const c10::optional& weight, + const ::std::optional& weight, int64_t reduction) { if (reduction == at::Reduction::None) { return {Shape(self.scalar_type(), self.sizes().vec())}; @@ -203,7 +203,7 @@ std::vector compute_shape_binary_cross_entropy_backward( const at::Tensor& grad_output, const at::Tensor& self, const at::Tensor& target, - const c10::optional& weight, + const ::std::optional& weight, int64_t reduction) { return {Shape(self.scalar_type(), self.sizes().vec())}; } @@ -286,7 +286,7 @@ std::vector compute_shape_convolution_backward( std::vector compute_shape_convolution( const at::Tensor& input, const at::Tensor& weight, - const c10::optional& bias, + const ::std::optional& bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, @@ -390,19 +390,19 @@ std::vector compute_shape_embedding( } std::vector compute_shape_std(const at::Tensor& self, bool unbiased) { - return compute_shape_std(self, c10::nullopt, c10::nullopt, false); + return compute_shape_std(self, ::std::nullopt, ::std::nullopt, false); } std::vector compute_shape_std( const at::Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { - return compute_shape_std(self, dim, c10::nullopt, keepdim); + return compute_shape_std(self, dim, ::std::nullopt, keepdim); } std::vector compute_shape_std( const at::Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, + const ::std::optional& correction, bool keepdim) { if (dim.has_value()) { auto shape = at::native::shape_from_dim_mask( @@ -530,10 +530,10 @@ TORCH_API std::vector compute_shape_cholesky( std::vector compute_shape_native_batch_norm( const at::Tensor& input, - const c10::optional& weight, - const c10::optional& bias, - const c10::optional& running_mean, - const c10::optional& running_var, + const ::std::optional& weight, + const ::std::optional& bias, + const ::std::optional& running_mean, + const ::std::optional& running_var, bool training, double momentum, double eps) { @@ -570,11 +570,11 @@ std::vector compute_shape_native_batch_norm( std::vector compute_shape_native_batch_norm_backward( const at::Tensor& grad_out, const at::Tensor& input, - const c10::optional& weight, - const c10::optional& running_mean, - const c10::optional& running_var, - const c10::optional& save_mean, - const c10::optional& save_invstd, + const ::std::optional& weight, + const ::std::optional& running_mean, + const ::std::optional& running_var, + const ::std::optional& save_mean, + const ::std::optional& save_invstd, bool train, double eps, ::std::array output_mask) { @@ -602,8 +602,8 @@ std::vector compute_shape_native_batch_norm_backward( std::vector compute_shape_native_layer_norm( const at::Tensor& input, at::IntArrayRef normalized_shape, - const c10::optional& weight, - const c10::optional& bias, + const ::std::optional& weight, + const ::std::optional& bias, double eps) { // Copied from aten/src/ATen/native/layer_norm.cpp::layer_norm_cpu_out. auto input_shape = input.sizes().vec(); @@ -631,8 +631,8 @@ std::vector compute_shape_native_layer_norm_backward( at::IntArrayRef normalized_shape, const at::Tensor& mean, const at::Tensor& rstd, - const c10::optional& weight, - const c10::optional& bias, + const ::std::optional& weight, + const ::std::optional& bias, ::std::array output_mask) { std::vector shapes; shapes.emplace_back( @@ -650,7 +650,7 @@ std::vector compute_shape_native_layer_norm_backward( std::vector compute_shape_mean( const at::Tensor& self, - c10::optional dtype) { + ::std::optional dtype) { if (dtype.has_value()) { return {Shape(dtype.value(), {})}; } @@ -661,10 +661,10 @@ std::vector compute_shape_new_empty_strided( const at::Tensor& self, at::IntArrayRef size, at::IntArrayRef stride, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory) { return {Shape(dtype.has_value() ? *dtype : self.scalar_type(), size.vec())}; } @@ -677,7 +677,7 @@ std::vector compute_shape_mv( std::vector compute_shape_native_dropout( const at::Tensor& input, double p, - c10::optional train) { + ::std::optional train) { return { Shape(input.scalar_type(), input.sizes().vec()), Shape(c10::ScalarType::Bool, input.sizes().vec())}; @@ -692,22 +692,22 @@ std::vector compute_shape_native_dropout_backward( std::vector compute_shape_random( const at::Tensor& self, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } std::vector compute_shape_random( const at::Tensor& self, int64_t to, - c10::optional generator) { + ::std::optional generator) { return compute_shape_random(self, generator); } std::vector compute_shape_random( const at::Tensor& self, int64_t from, - c10::optional to, - c10::optional generator) { + ::std::optional to, + ::std::optional generator) { return compute_shape_random(self, generator); } @@ -717,7 +717,7 @@ std::vector compute_shape_relu(const at::Tensor& self) { std::vector compute_shape_sum( const at::Tensor& self, - c10::optional dtype) { + ::std::optional dtype) { if (dtype.has_value()) { return {Shape(dtype.value(), {})}; } @@ -836,7 +836,7 @@ std::vector compute_shape_log_sigmoid_backward( std::vector compute_shape_nll_loss2d_forward( const at::Tensor& self, const at::Tensor& target, - const c10::optional& weight, + const ::std::optional& weight, int64_t reduction, int64_t ignore_index) { // Based on definition of @@ -851,7 +851,7 @@ std::vector compute_shape_nll_loss2d_backward( const at::Tensor& grad_output, const at::Tensor& self, const at::Tensor& target, - const c10::optional& weight, + const ::std::optional& weight, int64_t reduction, int64_t ignore_index, const at::Tensor& total_weight) { @@ -1075,12 +1075,12 @@ std::vector compute_shape_clamp_min( std::vector compute_shape__to_copy( const at::Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + ::std::optional dtype, + ::std::optional layout, + ::std::optional device, + ::std::optional pin_memory, bool non_blocking, - c10::optional memory_format) { + ::std::optional memory_format) { if (dtype) { return {Shape(*dtype, self.sizes().vec())}; } @@ -1089,7 +1089,7 @@ std::vector compute_shape__to_copy( TORCH_API std::vector compute_shape_clone( const at::Tensor& self, - c10::optional memory_format) { + ::std::optional memory_format) { return {Shape(self.scalar_type(), self.sizes().vec())}; } @@ -1175,7 +1175,7 @@ std::vector compute_shape_view( std::vector compute_shape_cast( const Output& input, const at::ScalarType& dtype, - const c10::optional& stype) { + const ::std::optional& stype) { Shape shape = input.shape(); shape.set_scalar_type(dtype); return {shape}; @@ -1274,17 +1274,17 @@ std::vector compute_shape_select_scatter( auto self_meta = at::native::empty_strided_meta_symint( self.sym_sizes(), self.sym_strides(), - /*dtype=*/c10::make_optional(self.scalar_type()), - /*layout=*/c10::make_optional(self.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(self.scalar_type()), + /*layout=*/::std::make_optional(self.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto src_meta = at::native::empty_strided_meta_symint( src.sym_sizes(), src.sym_strides(), - /*dtype=*/c10::make_optional(src.scalar_type()), - /*layout=*/c10::make_optional(src.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(src.scalar_type()), + /*layout=*/::std::make_optional(src.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto out_meta = at::compositeexplicitautogradnonfunctional::select_scatter( self_meta, src_meta, dim, index); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; @@ -1299,17 +1299,17 @@ std::vector compute_shape_diagonal_scatter( auto self_meta = at::native::empty_strided_meta_symint( self.sym_sizes(), self.sym_strides(), - /*dtype=*/c10::make_optional(self.scalar_type()), - /*layout=*/c10::make_optional(self.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(self.scalar_type()), + /*layout=*/::std::make_optional(self.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto src_meta = at::native::empty_strided_meta_symint( src.sym_sizes(), src.sym_strides(), - /*dtype=*/c10::make_optional(src.scalar_type()), - /*layout=*/c10::make_optional(src.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(src.scalar_type()), + /*layout=*/::std::make_optional(src.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto out_meta = at::compositeexplicitautogradnonfunctional::diagonal_scatter( self_meta, src_meta, offset, dim1, dim2); return {Shape(out_meta.scalar_type(), out_meta.sizes().vec())}; @@ -1319,23 +1319,23 @@ std::vector compute_shape_slice_scatter_symint( const at::Tensor& self, const at::Tensor& src, int64_t dim, - c10::optional start, - c10::optional end, + ::std::optional start, + ::std::optional end, c10::SymInt step) { auto self_meta = at::native::empty_strided_meta_symint( self.sym_sizes(), self.sym_strides(), - /*dtype=*/c10::make_optional(self.scalar_type()), - /*layout=*/c10::make_optional(self.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(self.scalar_type()), + /*layout=*/::std::make_optional(self.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto src_meta = at::native::empty_strided_meta_symint( src.sym_sizes(), src.sym_strides(), - /*dtype=*/c10::make_optional(src.scalar_type()), - /*layout=*/c10::make_optional(src.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(src.scalar_type()), + /*layout=*/::std::make_optional(src.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto out_meta = at::compositeexplicitautogradnonfunctional::slice_scatter_symint( self_meta, src_meta, dim, start, end, step); @@ -1347,21 +1347,21 @@ std::vector compute_shape_as_strided_scatter_symint( const at::Tensor& src, at::SymIntArrayRef size, at::SymIntArrayRef stride, - c10::optional storage_offset) { + ::std::optional storage_offset) { auto self_meta = at::native::empty_strided_meta_symint( self.sym_sizes(), self.sym_strides(), - /*dtype=*/c10::make_optional(self.scalar_type()), - /*layout=*/c10::make_optional(self.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(self.scalar_type()), + /*layout=*/::std::make_optional(self.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto src_meta = at::native::empty_strided_meta_symint( src.sym_sizes(), src.sym_strides(), - /*dtype=*/c10::make_optional(src.scalar_type()), - /*layout=*/c10::make_optional(src.layout()), - /*device=*/c10::make_optional(c10::Device(c10::kMeta)), - /*pin_memory=*/c10::nullopt); + /*dtype=*/::std::make_optional(src.scalar_type()), + /*layout=*/::std::make_optional(src.layout()), + /*device=*/::std::make_optional(c10::Device(c10::kMeta)), + /*pin_memory=*/::std::nullopt); auto out_meta = at::compositeexplicitautogradnonfunctional::as_strided_scatter_symint( self_meta, src_meta, size, stride, storage_offset); @@ -1372,7 +1372,7 @@ std::vector compute_shape_normal_functional( const at::Tensor& self, double mean, double std, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } @@ -1380,7 +1380,7 @@ std::vector compute_shape_uniform( const at::Tensor& self, double from, double to, - c10::optional generator) { + ::std::optional generator) { return {Shape(self.scalar_type(), self.sizes().vec())}; } diff --git a/torch/csrc/lazy/core/shape_inference.h b/torch/csrc/lazy/core/shape_inference.h index a8388a0b223576..77eeaaa563187f 100644 --- a/torch/csrc/lazy/core/shape_inference.h +++ b/torch/csrc/lazy/core/shape_inference.h @@ -24,16 +24,16 @@ TORCH_API std::vector compute_shape__adaptive_avg_pool3d(con TORCH_API std::vector compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self); TORCH_API std::vector compute_shape_abs(const at::Tensor & self); TORCH_API std::vector compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); -TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, c10::optional generator); -TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional generator); -TORCH_API std::vector compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); -TORCH_API std::vector compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, ::std::optional generator); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, ::std::optional generator); +TORCH_API std::vector compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction); +TORCH_API std::vector compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction); TORCH_API std::vector compute_shape_cat(at::TensorList tensors, int64_t dim); TORCH_API std::vector compute_shape_cholesky(const at::Tensor & self, bool upper); TORCH_API std::vector compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min); -TORCH_API std::vector compute_shape_clone(const at::Tensor & self, c10::optional memory_format); +TORCH_API std::vector compute_shape_clone(const at::Tensor & self, ::std::optional memory_format); TORCH_API std::vector compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value); -TORCH_API std::vector compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); +TORCH_API std::vector compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); TORCH_API std::vector compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); TORCH_API std::vector compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); TORCH_API std::vector compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); @@ -57,23 +57,23 @@ TORCH_API std::vector compute_shape_logical_xor(const at::Te TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); TORCH_API std::vector compute_shape_max(const at::Tensor & self); -TORCH_API std::vector compute_shape_mean(const at::Tensor & self, c10::optional dtype); +TORCH_API std::vector compute_shape_mean(const at::Tensor & self, ::std::optional dtype); TORCH_API std::vector compute_shape_min(const at::Tensor & self); TORCH_API std::vector compute_shape_mv(const at::Tensor & self, const at::Tensor & vec); -TORCH_API std::vector compute_shape_native_batch_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); -TORCH_API std::vector compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); -TORCH_API std::vector compute_shape_native_dropout(const at::Tensor & input, double p, c10::optional train); +TORCH_API std::vector compute_shape_native_batch_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); +TORCH_API std::vector compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask); +TORCH_API std::vector compute_shape_native_dropout(const at::Tensor & input, double p, ::std::optional train); TORCH_API std::vector compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale); -TORCH_API std::vector compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); -TORCH_API std::vector compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); -TORCH_API std::vector compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); -TORCH_API std::vector compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); -TORCH_API std::vector compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API std::vector compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API std::vector compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask); +TORCH_API std::vector compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API std::vector compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API std::vector compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index); TORCH_API std::vector compute_shape_nonzero(const at::Tensor & self); -TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, c10::optional generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, c10::optional generator); -TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator); +TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, ::std::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, ::std::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, ::std::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator); TORCH_API std::vector compute_shape_relu(const at::Tensor & self); TORCH_API std::vector compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats); TORCH_API std::vector compute_shape_slogdet(const at::Tensor & self); @@ -82,9 +82,9 @@ TORCH_API std::vector compute_shape_sort(const at::Tensor & TORCH_API std::vector compute_shape_stack(at::TensorList tensors, int64_t dim); TORCH_API std::vector compute_shape_std(const at::Tensor & self, bool unbiased); TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); -TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); -TORCH_API std::vector compute_shape_sum(const at::Tensor & self, c10::optional dtype); -TORCH_API std::vector compute_shape__to_copy(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format); +TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim); +TORCH_API std::vector compute_shape_sum(const at::Tensor & self, ::std::optional dtype); +TORCH_API std::vector compute_shape__to_copy(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, bool non_blocking, ::std::optional memory_format); TORCH_API std::vector compute_shape_take(const at::Tensor & self, const at::Tensor & index); TORCH_API std::vector compute_shape_trace(const at::Tensor & self); TORCH_API std::vector compute_shape_zero(const at::Tensor & self); @@ -92,13 +92,13 @@ TORCH_API std::vector compute_shape_narrow_copy_symint(const TORCH_API std::vector compute_shape_hardswish(const at::Tensor & self); TORCH_API std::vector compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); TORCH_API std::vector compute_shape_selu(const at::Tensor & self); -TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional generator); +TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, ::std::optional generator); // Non-Native ops TORCH_API std::vector compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type); TORCH_API std::vector compute_shape_expand(const Output& input0, const std::vector& size, const bool& is_scalar_expand); TORCH_API std::vector compute_shape_view(const Output& input0, const std::vector& output_sizes); -TORCH_API std::vector compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const c10::optional& stype); +TORCH_API std::vector compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const ::std::optional& stype); // View Ops // (Now that functionalization pass is used, we should kill these in a later PR) @@ -117,8 +117,8 @@ TORCH_API std::vector compute_shape_unsqueeze(const Output& input, const TORCH_API std::vector compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index); TORCH_API std::vector compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); -TORCH_API std::vector compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); -TORCH_API std::vector compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +TORCH_API std::vector compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step); +TORCH_API std::vector compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset); // clang-format on } // namespace lazy } // namespace torch diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index 4ed195b2aac263..9ea90e8911dbd3 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -30,7 +30,7 @@ static std::unordered_map type_map = { {"double", ParameterType::DOUBLE}, {"complex", ParameterType::COMPLEX}, {"TensorList", ParameterType::TENSOR_LIST}, - {"c10::List>", ParameterType::TENSOR_LIST}, + {"c10::List<::std::optional>", ParameterType::TENSOR_LIST}, {"IntArrayRef", ParameterType::INT_LIST}, {"SymIntArrayRef", ParameterType::SYM_INT_LIST}, {"ArrayRef", ParameterType::FLOAT_LIST}, diff --git a/torchgen/api/cpp.py b/torchgen/api/cpp.py index f5466030daa6ba..55ae8758b2b3c3 100644 --- a/torchgen/api/cpp.py +++ b/torchgen/api/cpp.py @@ -312,7 +312,7 @@ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequenc JIT_TO_CPP_DEFAULT = { "False": "false", "True": "true", - "None": "c10::nullopt", # UGH this one is type directed + "None": "::std::nullopt", # UGH this one is type directed "Mean": "at::Reduction::Mean", "[]": "{}", "contiguous_format": "MemoryFormat::Contiguous", @@ -347,7 +347,7 @@ def default_expr(d: str, t: Type, *, symint: bool) -> str: if isinstance(t, OptionalType): if d == "None": - return "c10::nullopt" + return "::std::nullopt" return default_expr(d, t.elem, symint=symint) diff --git a/torchgen/api/python.py b/torchgen/api/python.py index 1a3b4505d9df63..cdad0cb4e00f02 100644 --- a/torchgen/api/python.py +++ b/torchgen/api/python.py @@ -62,9 +62,9 @@ # Note: the scattered TensorOptions fields are packed into 'options'. # # auto dispatch_empty = -# [](IntArrayRef size, c10::optional names, +# [](IntArrayRef size, std::optional names, # const TensorOptions & options, -# c10::optional memory_format) -> Tensor { +# std::optional memory_format) -> Tensor { # pybind11::gil_scoped_release no_gil; # return torch::empty(size, names, options, memory_format); # }; @@ -93,9 +93,9 @@ # Where does 'names' come from? It involves special local init: # # auto __names = _r.toDimnameListOptional(1); -# c10::optional names = -# __names ? c10::make_optional(DimnameList(__names.value())) -# : c10::nullopt; +# std::optional names = +# __names ? std::make_optional(DimnameList(__names.value())) +# : std::nullopt; # # Where does 'options' come from? It involves special local init # for TensorOptions. Note that Python side has the additional @@ -235,6 +235,8 @@ def argument_str(self, *, method: bool = False, symint: bool = True) -> str: default = { "nullptr": "None", "c10::nullopt": "None", + "::std::nullopt": "None", + "std::nullopt": "None", "{}": "None", }.get(self.default, self.default) return f"{type_str} {name}={default}" @@ -280,6 +282,8 @@ def argument_str_pyi( default = { "nullptr": "None", "c10::nullopt": "None", + "::std::nullopt": "None", + "std::nullopt": "None", "{}": "None", "MemoryFormat::Contiguous": "contiguous_format", "QScheme::PER_TENSOR_AFFINE": "per_tensor_affine", @@ -697,9 +701,9 @@ def argument_type_str( return f"ScalarList[{size}]" if size is not None else "ScalarList" elif str(t.elem) == "Tensor?": if simple_type: - return "c10::List>" + return "c10::List<::std::optional>" else: - return "const c10::List> &" + return "const c10::List<::std::optional> &" elif str(t.elem) == "Dimname": return f"DimnameList[{size}]" if size is not None else "DimnameList" elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint) @@ -1308,7 +1312,13 @@ def arg_parser_unpack_method( return "generator" elif str(t.elem) == "Dimname[]": return "toDimnameListOptional" - elif not has_default_init and default in (None, "None", "c10::nullopt"): + elif not has_default_init and default in ( + None, + "None", + "c10::nullopt", + "::std::nullopt", + "std::nullopt", + ): # If default is None: append 'Optional' to elem's unpacking method return ( arg_parser_unpack_method(t.elem, None, None, symint=symint) + "Optional" @@ -1430,7 +1440,7 @@ def dispatch_lambda_exprs( inits.extend( [ f"auto __{name} = {arg_parser_expr};", - f"c10::optional {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950 + f"::std::optional {name} = __{name} ? ::std::make_optional(DimnameList(__{name}.value())) : ::std::nullopt;", # noqa: B950 ] ) lambda_args_exprs[name] = name diff --git a/torchgen/api/translate.py b/torchgen/api/translate.py index f59b6eab24d6a0..98f0c251acbde4 100644 --- a/torchgen/api/translate.py +++ b/torchgen/api/translate.py @@ -323,7 +323,7 @@ def direct_solve(goal: NamedCType) -> str: # If we're calling a factory op from its out= variant, # We don't actually care about the value of pin_memory. out_tensor = direct_solve(out_tensor_ctype) - return "c10::nullopt" + return "::std::nullopt" # We can always do translations from value types to reference types, like vector -> IntArrayRef elif goal.type == BaseCType(intArrayRefT): @@ -347,7 +347,7 @@ def direct_solve(goal: NamedCType) -> str: argname = direct_solve( NamedCType(goal.name, OptionalCType(BaseCType(longT))) ) - return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional(c10::SymInt(*{argname})) : ::std::nullopt" elif goal.type == BaseCType(longT): symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT))) return f"{symInt_type}.guard_int(__FILE__, __LINE__)" @@ -355,7 +355,7 @@ def direct_solve(goal: NamedCType) -> str: argname = direct_solve( NamedCType(goal.name, OptionalCType(BaseCType(SymIntT))) ) - return f"{argname}.has_value() ? c10::make_optional({argname}->guard_int(__FILE__, __LINE__)) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional({argname}->guard_int(__FILE__, __LINE__)) : ::std::nullopt" elif goal.type == BaseCType(optionalIntArrayRefT): try: return direct_solve(NamedCType(goal.name, optionalLongVec_ctype)) @@ -363,14 +363,14 @@ def direct_solve(goal: NamedCType) -> str: argname = direct_solve( NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT)) ) - return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : ::std::nullopt" elif goal.type == BaseCType(optionalSymIntArrayRefT): # TODO: You might also want to solve this from longSymVec_ctype or # an optional version of it argname = direct_solve( NamedCType(goal.name, BaseCType(optionalIntArrayRefT)) ) - return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*{argname})) : ::std::nullopt" elif goal.type == BaseCType(optionalScalarRefT): return direct_solve(NamedCType(goal.name, optionalScalar_ctype)) elif goal.type == BaseCType(optionalTensorRefT): @@ -398,22 +398,22 @@ def direct_solve(goal: NamedCType) -> str: goal.name, BaseCType(optionalIntArrayRefT) ) argname = direct_solve(optionalIntArrayRef_ctype) - return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional({argname}->vec()) : ::std::nullopt" elif goal.type == OptionalCType(BaseCType(scalarT)): optionalScalarRef_ctype = NamedCType( goal.name, BaseCType(optionalScalarRefT) ) argname = direct_solve(optionalScalarRef_ctype) - return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional({argname}) : ::std::nullopt" elif goal.type == OptionalCType(BaseCType(scalarT)): optionalTensorRef_ctype = NamedCType( goal.name, BaseCType(optionalTensorRefT) ) argname = direct_solve(optionalTensorRef_ctype) - return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt" + return f"{argname}.has_value() ? ::std::make_optional({argname}) : ::std::nullopt" # Technically, we also need to handle cases of C++ containers holding reference types. # But there currently aren't any ops that require lambda capture codegen - # With arguments like std::vector. + # With arguments like ::std::vector. # If that changes, we'll have to add the translation here. # We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor. diff --git a/torchgen/api/types/types.py b/torchgen/api/types/types.py index 16eff73638e469..debc640a66612c 100644 --- a/torchgen/api/types/types.py +++ b/torchgen/api/types/types.py @@ -34,7 +34,7 @@ TENSOR_LIST_LIKE_CTYPES = [ "at::TensorList", - "const c10::List> &", + "const c10::List<::std::optional> &", "const at::ITensorListRef &", ] @@ -133,10 +133,10 @@ class OptionalCType(CType): def cpp_type(self, *, strip_ref: bool = False) -> str: # Do not pass `strip_ref` recursively. - return f"c10::optional<{self.elem.cpp_type()}>" + return f"::std::optional<{self.elem.cpp_type()}>" def cpp_type_registration_declarations(self) -> str: - return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>" + return f"::std::optional<{self.elem.cpp_type_registration_declarations()}>" def remove_const_ref(self) -> "CType": return OptionalCType(self.elem.remove_const_ref()) diff --git a/torchgen/api/unboxing.py b/torchgen/api/unboxing.py index df4430c49b7457..3cc20fe9d59f27 100644 --- a/torchgen/api/unboxing.py +++ b/torchgen/api/unboxing.py @@ -43,8 +43,8 @@ # ``` # - Dimname[]? names # ```cpp -# c10::optional names_opt = (std::move(peek(stack, 1, 7))).toOptional(); -# c10::optional> names_opt_out; +# ::std::optional names_opt = (std::move(peek(stack, 1, 7))).toOptional(); +# ::std::optional> names_opt_out; # if (names_opt.has_value()) { # ~~~~~~~~~~~ <-- Unwrapping optional shell # const c10::IValue names_opt_in = names_opt.value(); @@ -58,23 +58,23 @@ # } # at::ArrayRef names_list_out(names_vec); # -# names_opt_out = c10::optional>(names_list_out); +# names_opt_out = ::std::optional>(names_list_out); # } else { -# names_opt_out = c10::optional>(); +# names_opt_out = ::std::optional>(); # } # ``` # - ScalarType? dtype (similarly for the rest of the arguments) # ```cpp -# c10::optional dtype_opt = (std::move(peek(stack, 2, 7))).toOptional(); -# c10::optional dtype_opt_out; +# ::std::optional dtype_opt = (std::move(peek(stack, 2, 7))).toOptional(); +# ::std::optional dtype_opt_out; # if (dtype_opt.has_value()) { # const c10::IValue dtype_opt_in = dtype_opt.value(); # at::ScalarType dtype_base = dtype_opt_in.to(); # ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it # directly using ".to()" API. -# dtype_opt_out = c10::optional(dtype_base); +# dtype_opt_out = ::std::optional(dtype_base); # } else { -# dtype_opt_out = c10::optional(); +# dtype_opt_out = ::std::optional(); # } # ``` # @@ -184,7 +184,7 @@ def _gen_code_optional_type( res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name) return ( f""" -c10::optional {arg_name}_opt = {arg_name}.toOptional(); +auto {arg_name}_opt = {arg_name}.toOptional(); {ctype.cpp_type(strip_ref=True)} {out_name}; if ({arg_name}_opt.has_value()) {{ const c10::IValue {in_name} = {arg_name}_opt.value(); @@ -216,7 +216,7 @@ def _gen_code_list_type( "\n" ) ) - # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List> + # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<::std::optional> elif isinstance(t.elem, OptionalType): code.extend( f""" diff --git a/torchgen/dest/lazy_ir.py b/torchgen/dest/lazy_ir.py index e00f4acd764ea5..9cd3dd419fc84f 100644 --- a/torchgen/dest/lazy_ir.py +++ b/torchgen/dest/lazy_ir.py @@ -59,13 +59,13 @@ def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str: if arg.is_symint_or_list: # TODO: I don't understand when you should put lazy_ in the name # or not - return f"{arg.name} ? c10::make_optional(GetSymIntValue(*{arg.name})) : c10::nullopt" + return f"{arg.name} ? std::make_optional(GetSymIntValue(*{arg.name})) : ::std::nullopt" elif arg.is_wrapped_scalar: return f"node_{arg.name}" return ( f"lazy_{arg.name} ? " - f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : " - "c10::nullopt" + f"std::make_optional(lazy_{arg.name}->GetIrValue()) : " + "::std::nullopt" ) else: raise AssertionError( @@ -253,8 +253,8 @@ def gen(self, schema: LazyIrSchema) -> List[str]: scalar_initializers = ",\n ".join( [ # This code is just special casing the mapping from string_view -> strings - f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)" - if a.lazy_type.cpp_type() == "c10::optional" + f"{a.name}({a.name}.has_value() ? ::std::make_optional(std::string(*{a.name})) : ::std::nullopt)" + if a.lazy_type.cpp_type() == "::std::optional" else f"{a.name}({a.name})" for a in scalar_args ] @@ -265,8 +265,8 @@ def gen(self, schema: LazyIrSchema) -> List[str]: [ f"std::string {a.name};" if a.lazy_type.cpp_type() == "c10::string_view" - else f"c10::optional {a.name};" - if a.lazy_type.cpp_type() == "c10::optional" + else f"::std::optional {a.name};" + if a.lazy_type.cpp_type() == "::std::optional" else f"{a.lazy_type.cpp_type()} {a.name};" for a in scalar_args ] @@ -419,9 +419,9 @@ def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str: if isinstance(arg.lazy_type, OptionalCType): lazy_tensor_decls.append( f"""auto node_{arg.name} = {arg.name} ? - c10::make_optional(torch::lazy::LazyGraphExecutor::Get()-> + std::make_optional(torch::lazy::LazyGraphExecutor::Get()-> GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)): - c10::nullopt;""" + ::std::nullopt;""" ) else: lazy_tensor_decls.append( diff --git a/torchgen/dest/register_dispatch_key.py b/torchgen/dest/register_dispatch_key.py index 114b641c5b4dbf..fced019cc4e308 100644 --- a/torchgen/dest/register_dispatch_key.py +++ b/torchgen/dest/register_dispatch_key.py @@ -127,11 +127,11 @@ def gen_maybe_create_proxy_helper(backend_index: BackendIndex) -> List[str]: if empty_strided_impl is None else [ f""" -c10::optional maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{ +std::optional maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{ if (out.strides() != strides) {{ return {empty_strided_impl}(sizes, strides, options); }} - return c10::nullopt; + return std::nullopt; }} """ ] @@ -260,7 +260,7 @@ def gen_device_check( if type == DeviceCheckType.NoCheck: return " // No device check\n" - device_check = "c10::optional common_device = nullopt;\n" + device_check = "std::optional common_device = std::nullopt;\n" device_check += "(void)common_device; // Suppress unused variable warning\n" for arg in args: # Only tensor like arguments are eligible @@ -688,11 +688,11 @@ def gen_class( elif k is SchemaKind.inplace: output_type = "std::reference_wrapper" output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()" - proxy_field = f"std::array, {len(f.func.returns)}> proxy_outputs_;" + proxy_field = f"std::array<::std::optional, {len(f.func.returns)}> proxy_outputs_;" elif k is SchemaKind.out: output_type = "std::reference_wrapper" output_value = "proxy_outputs_[output_idx].has_value() ? *proxy_outputs_[output_idx] : outputs_[output_idx].get()" - proxy_field = f"std::array, {len(f.func.returns)}> proxy_outputs_;" + proxy_field = f"std::array<::std::optional, {len(f.func.returns)}> proxy_outputs_;" if self.backend_index.dispatch_key == DispatchKey.CUDA: if self.rocm: diff --git a/torchgen/executorch/api/unboxing.py b/torchgen/executorch/api/unboxing.py index 9a8f717ddbb28d..74b5e580b1d8d4 100644 --- a/torchgen/executorch/api/unboxing.py +++ b/torchgen/executorch/api/unboxing.py @@ -171,7 +171,7 @@ def _gen_code_list_type( ) ) # pytorch codegen: - # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List> + # we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<::std::optional> elif ( isinstance(t.elem, OptionalType) and isinstance(t.elem.elem, BaseType) @@ -180,8 +180,8 @@ def _gen_code_list_type( code.extend( f""" #ifdef USE_ATEN_LIB -at::ArrayRef> {in_name} = {arg_name}.toListOptionalTensor(); -c10::List> {out_name}; +auto {in_name} = {arg_name}.toListOptionalTensor(); +c10::List<::std::optional> {out_name}; for (auto {elem_name}: {in_name}) {{ {out_name}.push_back({elem_name}); }} diff --git a/torchgen/gen_aoti_c_shim.py b/torchgen/gen_aoti_c_shim.py index 88bbccfc635330..b5250161875a62 100644 --- a/torchgen/gen_aoti_c_shim.py +++ b/torchgen/gen_aoti_c_shim.py @@ -108,7 +108,7 @@ def convert_arg_type_and_name(typ: Type, name: str) -> Tuple[List[str], List[str c_types[j] = c_types[j] + "*" if aten_type.startswith("c10::ArrayRef<"): # ArrayRef is passed as pointer + size, but no need to add "*" to the size argument - new_aten_types.append(f"c10::optional<{aten_type}>") + new_aten_types.append(f"::std::optional<{aten_type}>") base_type = aten_type[len("c10::ArrayRef<") : -1] new_callsite_exprs.append( f"pointer_to_optional_list<{base_type}>({names[j]}, {names[j+1]})" @@ -116,13 +116,13 @@ def convert_arg_type_and_name(typ: Type, name: str) -> Tuple[List[str], List[str j += 2 elif aten_type == "c10::Device": # Device is passed as device_type + device_index - new_aten_types.append("c10::optional") + new_aten_types.append("::std::optional") new_callsite_exprs.append( f"pointer_to_optional_device({names[j]}, {names[j+1]})" ) j += 2 else: - new_aten_types.append(f"c10::optional<{aten_type}>") + new_aten_types.append(f"::std::optional<{aten_type}>") new_callsite_exprs.append( f"pointer_to_optional<{aten_type}>({names[j]})" ) @@ -152,8 +152,8 @@ def convert_arg_type_and_name(typ: Type, name: str) -> Tuple[List[str], List[str # construct std::array instead assert typ.size is not None callsite_exprs.append(f"pointer_to_list<{typ.size}>({name})") - elif atype == "c10::optional": - # convert from std::vector> to c10::List> + elif atype == "::std::optional": + # convert from std::vector<::std::optional> to c10::List<::std::optional> callsite_exprs.append( f"c10::List<{atype}>(c10::ArrayRef<{atype}>(pointer_to_list<{atype}>({name}, {name}_len_)))" ) diff --git a/torchgen/gen_lazy_tensor.py b/torchgen/gen_lazy_tensor.py index bdc65ce2eace2f..4f1c3a8513a57f 100644 --- a/torchgen/gen_lazy_tensor.py +++ b/torchgen/gen_lazy_tensor.py @@ -153,19 +153,19 @@ def get_ltc_helper_fns() -> str: // undefined tensors can't be converted to the meta device, since they don't have sizes/strides if (!tensor.defined()) return tensor; auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), \ -/*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), \ -/*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt); +/*dtype=*/std::make_optional(tensor.scalar_type()), /*layout=*/std::make_optional(tensor.layout()), \ +/*device=*/std::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/std::nullopt); // needs to handle wrapped numbers, so dtype promotion works properly. if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) { out.unsafeGetTensorImpl()->set_wrapped_number(true); } return out; } -c10::optional to_meta(const c10::optional& tensor) { +std::optional to_meta(const std::optional& tensor) { if (tensor.has_value()) { return to_meta(*tensor); } - return c10::nullopt; + return std::nullopt; } std::vector to_meta(at::ITensorListRef t_list) {