From eee3b2f74b7e4e97280890510c47ba732738f3a2 Mon Sep 17 00:00:00 2001 From: cyy Date: Sun, 16 Jun 2024 22:04:37 +0800 Subject: [PATCH] Use std::string_view --- test/cpp/test_aten_xla_tensor_4.cpp | 4 ++-- torch_xla/csrc/aten_autograd_ops.cpp | 2 +- torch_xla/csrc/aten_autograd_ops.h | 4 ++-- torch_xla/csrc/aten_xla_type.cpp | 20 ++++++++++---------- torch_xla/csrc/cross_replica_reduces.cpp | 2 +- torch_xla/csrc/cross_replica_reduces.h | 2 +- torch_xla/csrc/ops/scatter_reduce.cpp | 2 +- torch_xla/csrc/ops/scatter_reduce.h | 4 ++-- torch_xla/csrc/tensor_methods.cpp | 8 ++++---- torch_xla/csrc/tensor_methods.h | 8 ++++---- 10 files changed, 28 insertions(+), 28 deletions(-) diff --git a/test/cpp/test_aten_xla_tensor_4.cpp b/test/cpp/test_aten_xla_tensor_4.cpp index 6c21ed5f901e..20b0789ca8cc 100644 --- a/test/cpp/test_aten_xla_tensor_4.cpp +++ b/test/cpp/test_aten_xla_tensor_4.cpp @@ -391,7 +391,7 @@ TEST_F(AtenXlaTensorTest, TestDiv) { } TEST_F(AtenXlaTensorTest, TestDivWithRoundingMode) { - std::optional rounding_modes[] = {"trunc", "floor", + std::optional rounding_modes[] = {"trunc", "floor", std::nullopt}; for (const auto& rounding_mode : rounding_modes) { for (torch::ScalarType scalar_type1 : @@ -453,7 +453,7 @@ TEST_F(AtenXlaTensorTest, TestDivInPlace) { } TEST_F(AtenXlaTensorTest, TestDivInPlaceWithRoundingMode) { - std::optional rounding_modes[] = {"trunc", "floor", + std::optional rounding_modes[] = {"trunc", "floor", std::nullopt}; for (const auto& rounding_mode : rounding_modes) { for (torch::ScalarType scalar_type1 : {torch::kFloat}) { diff --git a/torch_xla/csrc/aten_autograd_ops.cpp b/torch_xla/csrc/aten_autograd_ops.cpp index 40b9790a1211..83204c55d76e 100644 --- a/torch_xla/csrc/aten_autograd_ops.cpp +++ b/torch_xla/csrc/aten_autograd_ops.cpp @@ -22,7 +22,7 @@ bool IsNonTrivialDilation(at::IntArrayRef dilation) { namespace aten_autograd_ops { torch::Tensor EinsumAutogradFunction::forward( - torch::autograd::AutogradContext* ctx, const c10::string_view equation, + torch::autograd::AutogradContext* ctx, const std::string_view equation, at::TensorList tensors) { std::string eq_str = std::string(equation); ctx->saved_data["equation"] = eq_str; diff --git a/torch_xla/csrc/aten_autograd_ops.h b/torch_xla/csrc/aten_autograd_ops.h index d1cc8a980482..99f203db4bc4 100644 --- a/torch_xla/csrc/aten_autograd_ops.h +++ b/torch_xla/csrc/aten_autograd_ops.h @@ -13,7 +13,7 @@ namespace aten_autograd_ops { struct EinsumAutogradFunction : public torch::autograd::Function { static torch::Tensor forward(torch::autograd::AutogradContext* ctx, - c10::string_view equation, + std::string_view equation, at::TensorList tensors); static torch::autograd::variable_list backward( torch::autograd::AutogradContext* ctx, @@ -60,4 +60,4 @@ torch::Tensor max_pool2d_backward(torch::Tensor grad_output, torch::Tensor self, } // namespace aten_autograd_ops } // namespace torch_xla -#endif // XLA_TORCH_XLA_CSRC_ATEN_AUTOGRAD_OPS_H_ \ No newline at end of file +#endif // XLA_TORCH_XLA_CSRC_ATEN_AUTOGRAD_OPS_H_ diff --git a/torch_xla/csrc/aten_xla_type.cpp b/torch_xla/csrc/aten_xla_type.cpp index 3459b8935e86..d04f32a46252 100644 --- a/torch_xla/csrc/aten_xla_type.cpp +++ b/torch_xla/csrc/aten_xla_type.cpp @@ -1363,7 +1363,7 @@ at::Tensor XLANativeFunctions::div(const at::Tensor& self, at::Tensor XLANativeFunctions::div( const at::Tensor& self, const at::Tensor& other, - std::optional rounding_mode) { + std::optional rounding_mode) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); at::ScalarType dtype = at::result_type(self, other); auto operands = GetBinaryOperands(self, UnwrapNumber(other, dtype)); @@ -1401,7 +1401,7 @@ at::Tensor XLANativeFunctions::dot(const at::Tensor& self, bridge::GetXlaTensor(self), bridge::GetXlaTensor(tensor))); } -at::Tensor XLANativeFunctions::einsum(c10::string_view equation, +at::Tensor XLANativeFunctions::einsum(std::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { std::string cleansed_equation = std::string(equation); @@ -1660,7 +1660,7 @@ at::Tensor XLANativeFunctions::gather(const at::Tensor& self, int64_t dim, } at::Tensor XLANativeFunctions::gelu(const at::Tensor& self, - c10::string_view approximate) { + std::string_view approximate) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return bridge::AtenFromXlaTensor( tensor_methods::gelu(bridge::GetXlaTensor(self), approximate)); @@ -1668,7 +1668,7 @@ at::Tensor XLANativeFunctions::gelu(const at::Tensor& self, at::Tensor XLANativeFunctions::gelu_backward(const at::Tensor& grad, const at::Tensor& self, - c10::string_view approximate) { + std::string_view approximate) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); at::ScalarType result_type = at::result_type(grad, self); return bridge::AtenFromXlaTensor(tensor_methods::gelu_backward( @@ -3031,7 +3031,7 @@ at::Tensor XLANativeFunctions::rsub(const at::Tensor& self, at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Tensor& src, - std::optional reduce) { + std::optional reduce) { XLATensorPtr self_tensor = bridge::GetXlaTensor(self); if (!reduce.has_value()) { return bridge::AtenFromXlaTensor( @@ -3052,7 +3052,7 @@ at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, at::Tensor scatter_reduce_helper(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Scalar& value, - std::optional reduce) { + std::optional reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); XLATensorPtr self_tensor = bridge::GetXlaTensor(self); if (!reduce.has_value()) { @@ -3087,7 +3087,7 @@ at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Tensor& src, - c10::string_view reduce) { + std::string_view reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return scatter_reduce_helper(self, dim, index, src, reduce); } @@ -3095,7 +3095,7 @@ at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, at::Tensor XLANativeFunctions::scatter(const at::Tensor& self, int64_t dim, const at::Tensor& index, const at::Scalar& value, - c10::string_view reduce) { + std::string_view reduce) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); return scatter_reduce_helper(self, dim, index, value, reduce); } @@ -3111,7 +3111,7 @@ at::Tensor XLANativeFunctions::scatter_add(const at::Tensor& self, int64_t dim, // supported at::Tensor XLANativeFunctions::scatter_reduce( const at::Tensor& self, int64_t dim, const at::Tensor& index, - const at::Tensor& src, c10::string_view reduce, bool include_self) { + const at::Tensor& src, std::string_view reduce, bool include_self) { TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); if ((reduce == "sum" || reduce == "prod" || reduce == "amin" || reduce == "amax") && @@ -3741,7 +3741,7 @@ at::Tensor& XLANativeFunctions::zero_(at::Tensor& self) { std::tuple XLANativeFunctions::_linalg_svd( const at::Tensor& self, bool full_matrices, bool compute_uv, - std::optional /* driver */) { + std::optional /* driver */) { // The optional driver string is only for CUDA with a cuSOLVER backend. TORCH_LAZY_FN_COUNTER_TIMED_TRACING("xla::"); // As per https://pytorch.org/docs/stable/generated/torch.svd.html, diff --git a/torch_xla/csrc/cross_replica_reduces.cpp b/torch_xla/csrc/cross_replica_reduces.cpp index 72b7eed9b844..ba63fff77f90 100644 --- a/torch_xla/csrc/cross_replica_reduces.cpp +++ b/torch_xla/csrc/cross_replica_reduces.cpp @@ -441,7 +441,7 @@ void SetAllReduceToken(const torch::lazy::BackendDevice& device, g_all_reduce_tokens[device.ordinal()] = token; } -AllReduceType GetReduceType(c10::string_view reduce_type) { +AllReduceType GetReduceType(std::string_view reduce_type) { if (reduce_type == "sum") { return AllReduceType::kSum; } else if (reduce_type == "mul") { diff --git a/torch_xla/csrc/cross_replica_reduces.h b/torch_xla/csrc/cross_replica_reduces.h index 6993309fd5e0..9be083a92277 100644 --- a/torch_xla/csrc/cross_replica_reduces.h +++ b/torch_xla/csrc/cross_replica_reduces.h @@ -115,7 +115,7 @@ const torch::lazy::Value& GetAllReduceToken( void SetAllReduceToken(const torch::lazy::BackendDevice& device, const std::shared_ptr& token); -AllReduceType GetReduceType(c10::string_view reduce_type); +AllReduceType GetReduceType(std::string_view reduce_type); } // namespace torch_xla diff --git a/torch_xla/csrc/ops/scatter_reduce.cpp b/torch_xla/csrc/ops/scatter_reduce.cpp index ee453bcaae2d..4c31edd0f186 100644 --- a/torch_xla/csrc/ops/scatter_reduce.cpp +++ b/torch_xla/csrc/ops/scatter_reduce.cpp @@ -10,7 +10,7 @@ namespace torch_xla { ScatterReduce::ScatterReduce(const torch::lazy::Value& input, const torch::lazy::Value& index, const torch::lazy::Value& src, - c10::string_view reduce, bool include_self, + std::string_view reduce, bool include_self, int64_t dim) : XlaNode(torch::lazy::OpKind(at::aten::scatter_reduce), {input, index, src}, GetXlaShape(input), diff --git a/torch_xla/csrc/ops/scatter_reduce.h b/torch_xla/csrc/ops/scatter_reduce.h index ebdd4a499afd..8ffeeaa4409a 100644 --- a/torch_xla/csrc/ops/scatter_reduce.h +++ b/torch_xla/csrc/ops/scatter_reduce.h @@ -9,7 +9,7 @@ class ScatterReduce : public XlaNode { public: ScatterReduce(const torch::lazy::Value& input, const torch::lazy::Value& index, const torch::lazy::Value& src, - c10::string_view reduce, bool include_self, int64_t dim); + std::string_view reduce, bool include_self, int64_t dim); std::string ToString() const override; @@ -27,4 +27,4 @@ class ScatterReduce : public XlaNode { } // namespace torch_xla -#endif // XLA_TORCH_XLA_CSRC_OPS_SCATTER_REDUCE_H_ \ No newline at end of file +#endif // XLA_TORCH_XLA_CSRC_OPS_SCATTER_REDUCE_H_ diff --git a/torch_xla/csrc/tensor_methods.cpp b/torch_xla/csrc/tensor_methods.cpp index cc1b34e9043b..a5e4073e335e 100644 --- a/torch_xla/csrc/tensor_methods.cpp +++ b/torch_xla/csrc/tensor_methods.cpp @@ -1250,7 +1250,7 @@ XLATensorPtr diagonal(const XLATensorPtr& input, int64_t offset, int64_t dim1, } XLATensorPtr div(const XLATensorPtr& input, const XLATensorPtr& other, - const std::optional& rounding_mode, + const std::optional& rounding_mode, std::optional logical_element_type) { at::ScalarType scalar_type = at::typeMetaToScalarType(c10::get_default_dtype()); @@ -1548,7 +1548,7 @@ XLATensorPtr ge(const XLATensorPtr& input, const XLATensorPtr& other) { } XLATensorPtr gelu(const XLATensorPtr& input, - const c10::string_view approximate) { + const std::string_view approximate) { if (approximate == "none") { return input->CreateFrom(Gelu(input->GetIrValue())); } else if (approximate == "tanh") { @@ -1559,7 +1559,7 @@ XLATensorPtr gelu(const XLATensorPtr& input, } XLATensorPtr gelu_backward(const XLATensorPtr& grad, const XLATensorPtr& input, - const c10::string_view approximate) { + const std::string_view approximate) { if (approximate == "none") { return input->CreateFrom( GeluBackward(grad->GetIrValue(), input->GetIrValue())); @@ -2718,7 +2718,7 @@ XLATensorPtr scatter_add(const XLATensorPtr& input, int64_t dim, XLATensorPtr scatter_reduce(const XLATensorPtr& input, int64_t dim, const XLATensorPtr& index, const XLATensorPtr& src, - c10::string_view reduce, bool include_self) { + std::string_view reduce, bool include_self) { return input->CreateFrom(torch::lazy::MakeNode( input->GetIrValue(), index->GetIrValue(), src->GetIrValue(), reduce, include_self, diff --git a/torch_xla/csrc/tensor_methods.h b/torch_xla/csrc/tensor_methods.h index df0c64d9a999..f310edc4164d 100644 --- a/torch_xla/csrc/tensor_methods.h +++ b/torch_xla/csrc/tensor_methods.h @@ -377,7 +377,7 @@ XLATensorPtr diagonal(const XLATensorPtr& input, int64_t offset, int64_t dim1, XLATensorPtr div( const XLATensorPtr& input, const XLATensorPtr& other, - const std::optional& rounding_mode = std::nullopt, + const std::optional& rounding_mode = std::nullopt, std::optional logical_element_type = std::nullopt); XLATensorPtr div(const XLATensorPtr& input, const at::Scalar& other); @@ -459,10 +459,10 @@ XLATensorPtr ge(const XLATensorPtr& input, const at::Scalar& other); XLATensorPtr ge(const XLATensorPtr& input, const XLATensorPtr& other); XLATensorPtr gelu(const XLATensorPtr& input, - const c10::string_view approximate); + const std::string_view approximate); XLATensorPtr gelu_backward(const XLATensorPtr& grad, const XLATensorPtr& input, - const c10::string_view approximate); + const std::string_view approximate); XLATensorPtr gt(const XLATensorPtr& input, const at::Scalar& other); @@ -842,7 +842,7 @@ XLATensorPtr scatter_add(const XLATensorPtr& input, int64_t dim, XLATensorPtr scatter_reduce(const XLATensorPtr& input, int64_t dim, const XLATensorPtr& index, const XLATensorPtr& src, - c10::string_view reduce, bool include_self); + std::string_view reduce, bool include_self); XLATensorPtr select(const XLATensorPtr& input, int64_t dim, int64_t index);