Skip to content

Commit

Permalink
resolve conflit with yunfei
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Oct 14, 2021
2 parents 46ba70c + 073aef3 commit 06789ba
Show file tree
Hide file tree
Showing 12 changed files with 235 additions and 44 deletions.
50 changes: 37 additions & 13 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1960,27 +1960,51 @@ pt::KernelContext OperatorWithKernel::ConstructPtKernelContext(
op_kernel_ctx.EmplaceBackOutputs(tmp_outputs);
}

for (size_t i = 0; i < attr_pairs.size(); ++i) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
for (size_t i = 0; i < attr_defs.size(); ++i) {
paddle::any attr_item;
if (attr_defs[i].type_index == std::type_index(typeid(pt::Scalar))) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<int>(attr_pairs[i].first)));
break;
case framework::proto::AttrType::FLOAT:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<float>(attr_pairs[i].first)));
break;
case framework::proto::AttrType::BOOLEAN:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<double>(attr_pairs[i].first)));
break;
default:
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
} else {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
if (attr_defs[i].type_index == std::type_index(typeid(int))) {
op_kernel_ctx.EmplaceBackAttr(Attr<int>(attr_pairs[i].first));
break;
case framework::proto::AttrType::FLOAT:
} else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(Attr<float>(attr_pairs[i].first));
break;
case framework::proto::AttrType::BOOLEAN:
} else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
op_kernel_ctx.EmplaceBackAttr(Attr<bool>(attr_pairs[i].first));
break;
default:
} else {
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
}
}

Expand Down
49 changes: 36 additions & 13 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -366,30 +366,53 @@ static pt::KernelContext BuildDygraphKernelContext(
op_kernel_ctx.EmplaceBackOutputs(tmp_outputs);
}

for (size_t i = 0; i < attr_pairs.size(); ++i) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
for (size_t i = 0; i < attr_defs.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(pt::Scalar))) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<int>(attrs, default_attrs, attr_pairs[i].first)));
break;
case framework::proto::AttrType::FLOAT:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<float>(attrs, default_attrs, attr_pairs[i].first)));
break;
case framework::proto::AttrType::BOOLEAN:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<bool>(attrs, default_attrs, attr_pairs[i].first)));
break;
default:
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
} else {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
if (attr_defs[i].type_index == std::type_index(typeid(int))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<int>(attrs, default_attrs, attr_pairs[i].first));
break;
case framework::proto::AttrType::FLOAT:
} else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<float>(attrs, default_attrs, attr_pairs[i].first));
break;
case framework::proto::AttrType::BOOLEAN:
} else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<bool>(attrs, default_attrs, attr_pairs[i].first));
break;
default:
} else {
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
}
}

Expand Down
1 change: 1 addition & 0 deletions paddle/tcmpt/api/include/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ limitations under the License. */
#include "paddle/tcmpt/core/kernel_context.h"
#include "paddle/tcmpt/core/kernel_factory.h"
#include "paddle/tcmpt/core/mkldnn_dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"
2 changes: 2 additions & 0 deletions paddle/tcmpt/core/kernel_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/kernel_context.h"
#include "paddle/tcmpt/core/kernel_def.h"
#include "paddle/tcmpt/core/scalar.h"

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device_context.h"
Expand Down Expand Up @@ -162,6 +163,7 @@ struct KernelImpl<Return (*)(Args...), kernel_fn> {
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int64_t);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(paddle::platform::float16);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const pt::Scalar&);

/* Output Helpers */

Expand Down
63 changes: 63 additions & 0 deletions paddle/tcmpt/core/scalar.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

namespace pt {

class Scalar {
public:
// Constructor support implicit
Scalar(float val) : tag(Tag::HAS_F) { data_.f = val; } // NOLINT

Scalar(double val) : tag(Tag::HAS_D) { data_.d = val; } // NOLINT

Scalar(int32_t val) : tag(Tag::HAS_I32) { data_.i32 = val; } // NOLINT

Scalar(int64_t val) : tag(Tag::HAS_I64) { data_.i64 = val; } // NOLINT

Scalar(bool val) : tag(Tag::HAS_B) { data_.b = val; } // NOLINT

template <typename T>
inline T to() const {
switch (tag) {
case Tag::HAS_F:
return static_cast<T>(data_.f);
case Tag::HAS_D:
return static_cast<T>(data_.d);
case Tag::HAS_I32:
return static_cast<T>(data_.i32);
case Tag::HAS_I64:
return static_cast<T>(data_.i64);
case Tag::HAS_B:
return static_cast<T>(data_.b);
default:
throw std::runtime_error("Invalid Scalar type.");
}
}

private:
enum class Tag { HAS_F, HAS_D, HAS_I32, HAS_I64, HAS_B };
Tag tag;

union data {
float f;
double d;
int32_t i32;
int64_t i64;
bool b;
} data_;
};

} // namespace pt
8 changes: 2 additions & 6 deletions paddle/tcmpt/cpu/creation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,9 @@ namespace pt {
template <typename T>
void FillAnyLike(const CPUContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
std::isnan(val),
false,
paddle::platform::errors::InvalidArgument("The filled value is NaN."));
eigen::fill<CPUContext, T>(dev_ctx, out, val);
eigen::fill<CPUContext, T>(dev_ctx, out, val.to<T>());
}

} // namespace pt
Expand Down
3 changes: 2 additions & 1 deletion paddle/tcmpt/cpu/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#pragma once

#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"

#include "paddle/fluid/platform/device_context.h"

Expand All @@ -25,7 +26,7 @@ using CPUContext = paddle::platform::CPUDeviceContext;
template <typename T>
void FillAnyLike(const CPUContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out);

} // namespace pt
8 changes: 2 additions & 6 deletions paddle/tcmpt/cuda/creation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,9 @@ namespace pt {
template <typename T>
void FillAnyLike(const CUDAContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
std::isnan(val),
false,
paddle::platform::errors::InvalidArgument("The filled value is NaN."));
eigen::fill<CUDAContext, T>(dev_ctx, out, val);
eigen::fill<CUDAContext, T>(dev_ctx, out, val.to<T>());
}

} // namespace pt
Expand Down
3 changes: 2 additions & 1 deletion paddle/tcmpt/cuda/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)

#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"

#include "paddle/fluid/platform/device_context.h"

Expand All @@ -28,7 +29,7 @@ using CUDAContext = paddle::platform::CUDADeviceContext;
template <typename T>
void FillAnyLike(const CUDAContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out);

} // namespace pt
Expand Down
10 changes: 9 additions & 1 deletion paddle/tcmpt/hapi/include/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,20 @@

#pragma once

#include "paddle/tcmpt/core/dtype.h"
#include "paddle/tcmpt/core/scalar.h"
#include "paddle/tcmpt/hapi/include/tensor.h"

namespace paddle {
namespace experimental {

Tensor full_like(const Tensor& x, float value);
Tensor full_like(const Tensor& x,
const pt::Scalar& value,
pt::DataType dtype = pt::DataType::kUndef);

Tensor ones_like(const Tensor& x, pt::DataType dtype = pt::DataType::kUndef);

Tensor zeros_like(const Tensor& x, pt::DataType dtype = pt::DataType::kUndef);

} // namespace experimental
} // namespace paddle
14 changes: 13 additions & 1 deletion paddle/tcmpt/hapi/lib/creation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ limitations under the License. */
namespace paddle {
namespace experimental {

Tensor full_like(const Tensor& x, float value) {
Tensor full_like(const Tensor& x, const pt::Scalar& value, pt::DataType dtype) {
// 1. Get kernel signature and kernel
auto kernel_signature = ParseKernelNameAndKeyByArgs("fill_any_like", x);
VLOG(1) << kernel_signature.first;
Expand All @@ -52,6 +52,10 @@ Tensor full_like(const Tensor& x, float value) {
// 5. Prepare outputs
Tensor out;
auto out_def = kernel.args_def().output_defs()[0];
// InferDataType
if (dtype != pt::DataType::kUndef) {
out_def.SetDataType(dtype);
}
auto dense_out = std::make_shared<pt::DenseTensor>(
pt::TensorMeta(out_dims, out_def.backend, out_def.dtype, out_def.layout),
pt::TensorStatus());
Expand All @@ -64,5 +68,13 @@ Tensor full_like(const Tensor& x, float value) {
return out;
}

Tensor ones_like(const Tensor& x, pt::DataType dtype) {
return full_like(x, 1, dtype);
}

Tensor zeros_like(const Tensor& x, pt::DataType dtype) {
return full_like(x, 0, dtype);
}

} // namespace experimental
} // namespace paddle
Loading

0 comments on commit 06789ba

Please sign in to comment.