Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Phi] Rename ScalarArray to IntArray #40975

Merged
merged 10 commits into from
Mar 31, 2022
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ EagerReducer::EagerReducer(

if (find_unused_vars_each_step_) {
global_used_vars_ = paddle::experimental::empty(
ScalarArray({static_cast<int32_t>(tensors_.size())}), DataType::INT32,
IntArray({static_cast<int32_t>(tensors_.size())}), DataType::INT32,
inner_place_);
}
}
Expand Down Expand Up @@ -364,7 +364,7 @@ void EagerReducer::InitializeGroups(
// process the dense gradient.
InitializeDenseGroups(tensor_indices_, &group);
group.dense_contents_ = paddle::experimental::empty(
ScalarArray({group.all_length_}), group.dtype_, inner_place_);
IntArray({group.all_length_}), group.dtype_, inner_place_);
}

// map tensors to this group by VariableLocator
Expand Down Expand Up @@ -403,7 +403,7 @@ void EagerReducer::InitializeDenseGroups(
p_group->length_.push_back(size);

// for concat operator
p_group->origin_shapes_.push_back(ScalarArray(tensor.shape()));
p_group->origin_shapes_.push_back(IntArray(tensor.shape()));
p_group->dense_tensors_.push_back(phi::DenseTensor());

const auto &dtype = tensor.dtype();
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/collective/reducer.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ namespace paddle {
namespace distributed {
using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::ScalarBase<paddle::experimental::Tensor>;
using ScalarArray =
paddle::experimental::ScalarArrayBase<paddle::experimental::Tensor>;
using IntArray =
paddle::experimental::IntArrayBase<paddle::experimental::Tensor>;
using Backend = paddle::experimental::Backend;

std::vector<std::vector<size_t>> Eager_AssignGroupBySize(
Expand All @@ -52,7 +52,7 @@ class EagerGroup {
std::vector<phi::DenseTensor> dense_tensors_;
std::vector<int64_t> length_;
int64_t all_length_{0};
std::vector<ScalarArray> origin_shapes_;
std::vector<IntArray> origin_shapes_;

// Global indices of participating tensors in the group
std::vector<size_t> tensor_indices_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
'Scalar(int64_t)' : 'paddle::experimental::Scalar',
'Scalar(float)' : 'paddle::experimental::Scalar',
'Scalar(double)' : 'paddle::experimental::Scalar',
'ScalarArray' : 'paddle::experimental::ScalarArray'
'IntArray' : 'paddle::experimental::IntArray'
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def SkipAPIGeneration(forward_api_name):
"std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar",
"paddle::experimental::ScalarArray": "CastPyArg2ScalarArray",
"paddle::experimental::IntArray": "CastPyArg2IntArray",
"paddle::experimental::Place": "CastPyArg2Place",
"paddle::experimental::DataType": "CastPyArg2DataType",
}
Expand Down Expand Up @@ -140,7 +140,7 @@ def FindParsingFunctionFromAttributeType(atype):
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/include/strings_api.h"
#include "paddle/fluid/pybind/op_function_common.h"
Expand Down
26 changes: 13 additions & 13 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/compat/arg_map_context.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/compat/op_utils.h"
Expand Down Expand Up @@ -363,12 +363,12 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
auto attr_reader = ctx->Attrs();
for (size_t i = 0; i < attr_names.size(); ++i) {
auto attr_name = attr_names[i];
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
// When attr is a vector_tensor or tensor, transform it to ScalarArray
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
// When attr is a vector_tensor or tensor, transform it to IntArray
if (ctx->HasInputs(attr_name) || ctx->HasInput(attr_name)) {
const auto& infershape_inputs = ctx->GetInputVarPtrs(attr_name);
if (ctx->IsRuntime()) {
// If is in runtime, we will get tensor's value for ScalarArray
// If is in runtime, we will get tensor's value for IntArray
// and push it into attrs
std::vector<Variable*> vars;
vars.reserve(infershape_inputs.size());
Expand All @@ -377,13 +377,13 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
}
if (infershape_inputs.size() != 1) {
infer_meta_context.EmplaceBackAttr(
std::move(experimental::MakePhiScalarArrayFromVarList(vars)));
std::move(experimental::MakePhiIntArrayFromVarList(vars)));
} else {
infer_meta_context.EmplaceBackAttr(
std::move(experimental::MakePhiScalarArrayFromVar(*vars[0])));
std::move(experimental::MakePhiIntArrayFromVar(*vars[0])));
}
} else {
// If is not in runtime, we will set default value(-1) for ScalarArray
// If is not in runtime, we will set default value(-1) for IntArray
std::vector<VarDesc*> vars;
vars.reserve(infershape_inputs.size());
for (size_t i = 0; i < infershape_inputs.size(); ++i) {
Expand All @@ -400,15 +400,15 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,

if (num_ele <= 0) {
PADDLE_THROW(platform::errors::Unimplemented(
"Invalid number for construct phi::ScalarArray, expected "
"Invalid number for construct phi::IntArray, expected "
"number > 0, but actually is %d. ",
num_ele));
}

} else {
num_ele = vars.size();
}
phi::ScalarArray tensor_attr(std::vector<int32_t>(num_ele, -1));
phi::IntArray tensor_attr(std::vector<int32_t>(num_ele, -1));
tensor_attr.SetFromTensor(true);
infer_meta_context.EmplaceBackAttr(std::move(tensor_attr));
}
Expand All @@ -417,18 +417,18 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
infer_meta_context.EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
infer_meta_context.EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int))) {
infer_meta_context.EmplaceBackAttr(
phi::ScalarArray({BOOST_GET_CONST(int, attr)}));
phi::IntArray({BOOST_GET_CONST(int, attr)}));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to ScalarArray when "
"Unsupported cast op attribute `%s` to IntArray when "
"construct InferMetaContext.",
attr_name));
}
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"

Expand Down Expand Up @@ -2196,35 +2196,35 @@ void OperatorWithKernel::BuildPhiKernelContext(
VLOG(4) << "Done outputs";

for (size_t i = 0; i < attr_names.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
auto attr_iter = Attrs().find(attr_names[i]);
if (attr_iter != Attrs().end()) { // shape is in the attribute
if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second))));
} else if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
BOOST_GET_CONST(std::vector<int32_t>, attr_iter->second))));
} else if (std::type_index(attr_iter->second.type()) ==
std::type_index(typeid(int32_t))) {
pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
&BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
pt_kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(&BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to ScalarArray when "
"Unsupported cast op attribute `%s` to IntArray when "
"construct KernelContext.",
attr_names[i]));
}
} else { // shape is in the input
auto& ins_vector = ctx.inputs.at(attr_names[i]);
if (ins_vector.size() == 1) { // ShapeTensor
pt_kernel_context->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVar(*ins_vector.front())));
experimental::MakePhiIntArrayFromVar(*ins_vector.front())));
} else { // ShapeTensorList
pt_kernel_context->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVarList(ins_vector)));
pt_kernel_context->EmplaceBackAttr(
std::move(experimental::MakePhiIntArrayFromVarList(ins_vector)));
}
}
} else if (attr_defs[i].type_index ==
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/imperative/infer_shape_context.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/utils/small_vector.h"
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/imperative/prepared_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -361,26 +361,26 @@ void BuildDygraphPhiKernelContext(
}

for (size_t i = 0; i < attr_names.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
if (attr_defs[i].type_index == std::type_index(typeid(phi::IntArray))) {
if (attrs.find(attr_names[i]) !=
attrs.end()) { // shape is in the attribute
auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int64_t>))) {
kernel_ctx->EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(std::vector<int32_t>))) {
kernel_ctx->EmplaceBackAttr(std::move(
phi::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
phi::IntArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int64_t))) {
kernel_ctx->EmplaceBackAttr(
std::move(phi::ScalarArray(&BOOST_GET_CONST(int64_t, attr), 1)));
std::move(phi::IntArray(&BOOST_GET_CONST(int64_t, attr), 1)));
} else if (std::type_index(attr.type()) ==
std::type_index(typeid(int32_t))) {
kernel_ctx->EmplaceBackAttr(
std::move(phi::ScalarArray(&BOOST_GET_CONST(int32_t, attr), 1)));
std::move(phi::IntArray(&BOOST_GET_CONST(int32_t, attr), 1)));
} else if (attr_defs[i].type_index ==
std::type_index(typeid(std::vector<int32_t>))) {
const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
Expand All @@ -395,15 +395,15 @@ void BuildDygraphPhiKernelContext(
auto& ins_vector = ins.at(attr_names[i]);
if (ins_vector.size() == 1) { // ShapeTensor
kernel_ctx->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVar(ins_vector[0]->Var())));
experimental::MakePhiIntArrayFromVar(ins_vector[0]->Var())));
} else { // ShapeTensorList
std::vector<framework::Variable*> variables;
variables.reserve(ins_vector.size());
for (const auto& var_base : ins_vector) {
variables.push_back(var_base->MutableVar());
}
kernel_ctx->EmplaceBackAttr(std::move(
experimental::MakePhiScalarArrayFromVarList(variables)));
kernel_ctx->EmplaceBackAttr(
std::move(experimental::MakePhiIntArrayFromVarList(variables)));
}
}
} else if (attr_defs[i].type_index ==
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ limitations under the License. */
// only can include the headers in paddle/phi/api dirs
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
Expand Down Expand Up @@ -354,7 +354,7 @@ class ReshapeKernel {
auto *shape_tensor = ctx.HasInput("Shape")
? ctx.Input<framework::LoDTensor>("Shape")
: nullptr;
phi::ScalarArray pt_scalar_shape;
phi::IntArray pt_scalar_shape;
if (list_new_shape_tensor.size() > 0) {
// have shape tensor
std::vector<phi::DenseTensor> pt_vec_shape;
Expand All @@ -369,7 +369,7 @@ class ReshapeKernel {
pt_vec_shape.push_back(*tensor);
}
}
pt_scalar_shape = phi::ScalarArray(pt_vec_shape);
pt_scalar_shape = phi::IntArray(pt_vec_shape);
} else if (shape_tensor) {
phi::DenseTensor pt_shape;
if (platform::is_gpu_place(shape_tensor->place()) ||
Expand All @@ -381,10 +381,10 @@ class ReshapeKernel {
} else {
pt_shape = *shape_tensor;
}
pt_scalar_shape = phi::ScalarArray(pt_shape);
pt_scalar_shape = phi::IntArray(pt_shape);
} else {
auto &shape_attr = ctx.Attr<std::vector<int>>("shape");
pt_scalar_shape = phi::ScalarArray(shape_attr);
pt_scalar_shape = phi::IntArray(shape_attr);
}
if (platform::is_cpu_place(ctx.GetPlace())) {
auto &dev_ctx = ctx.device_context<platform::CPUDeviceContext>();
Expand Down
15 changes: 8 additions & 7 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -929,9 +929,10 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
return paddle::experimental::Scalar(1.0);
}

paddle::experimental::ScalarArray CastPyArg2ScalarArray(
PyObject* obj, const std::string& op_type, ssize_t arg_pos) {
// In case of ScalarArray, only two possible PyObjects:
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
// In case of IntArray, only two possible PyObjects:
// 1. list of int
// 2. Tensor
if (obj == Py_None) {
Expand All @@ -947,12 +948,12 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
auto type_name = std::string(type->tp_name);
if (type_name == "list" || type_name == "tuple") {
std::vector<int> value = CastPyArg2Ints(obj, op_type, arg_pos);
return paddle::experimental::ScalarArray(value);
return paddle::experimental::IntArray(value);

} else if (type_name == "paddle.Tensor") {
paddle::experimental::Tensor& value = GetTensorFromPyObject(
op_type, "" /*arg_name*/, obj, arg_pos, false /*dispensable*/);
return paddle::experimental::ScalarArray(value);
return paddle::experimental::IntArray(value);

} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand All @@ -962,8 +963,8 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}

// Fake a ScalarArray
return paddle::experimental::ScalarArray({1});
// Fake a IntArray
return paddle::experimental::IntArray({1});
}

paddle::framework::Scope* CastPyArg2ScopePtr(PyObject* obj) {
Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ limitations under the License. */
#include <Python.h>
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"

#include "pybind11/pybind11.h"
Expand Down Expand Up @@ -150,8 +150,9 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

paddle::experimental::ScalarArray CastPyArg2ScalarArray(
PyObject* obj, const std::string& op_type, ssize_t arg_pos);
paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

paddle::experimental::Place CastPyArg2Place(PyObject* obj,
const std::string& op_type,
Expand Down
4 changes: 2 additions & 2 deletions paddle/infrt/host_context/value.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"

Expand Down Expand Up @@ -90,7 +90,7 @@ using ValueVariantType =
std::vector<const ::phi::DenseTensor*>,
std::vector<::phi::DenseTensor*>,
paddle::experimental::ScalarBase<::phi::DenseTensor>,
paddle::experimental::ScalarArrayBase<::phi::DenseTensor>,
paddle::experimental::IntArrayBase<::phi::DenseTensor>,
std::vector<::phi::MetaTensor*>,
::phi::MetaConfig,
paddle::experimental::Backend,
Expand Down
Loading