diff --git a/.github/ISSUE_TEMPLATE/---document-issue-.md b/.github/ISSUE_TEMPLATE/---document-issue-.md index 7c464ac584bc8..ffc2fcd7817b6 100644 --- a/.github/ISSUE_TEMPLATE/---document-issue-.md +++ b/.github/ISSUE_TEMPLATE/---document-issue-.md @@ -56,4 +56,4 @@ For example: no sample code; The sample code is not helpful; The sample code not For example:Chinese API in this doc is inconsistent with English API, including params, description, sample code, formula, etc. #### Other -For example: The doc link is broken; The doc page is missing; Dead link in docs. \ No newline at end of file +For example: The doc link is broken; The doc page is missing; Dead link in docs. diff --git a/paddle/fluid/distributed/service/graph_brpc_client.h b/paddle/fluid/distributed/service/graph_brpc_client.h index c0d7910851f91..854e0f6ca4ac1 100644 --- a/paddle/fluid/distributed/service/graph_brpc_client.h +++ b/paddle/fluid/distributed/service/graph_brpc_client.h @@ -14,10 +14,10 @@ #pragma once +#include #include #include #include -#include #include #include "ThreadPool.h" diff --git a/paddle/fluid/distributed/table/common_graph_table.cc b/paddle/fluid/distributed/table/common_graph_table.cc index 9776e4775bdde..995a39a654312 100644 --- a/paddle/fluid/distributed/table/common_graph_table.cc +++ b/paddle/fluid/distributed/table/common_graph_table.cc @@ -124,7 +124,7 @@ int32_t GraphTable::load_nodes(const std::string &path, std::string node_type) { std::ifstream file(path); std::string line; while (std::getline(file, line)) { - count ++; + count++; auto values = paddle::string::split_string(line, "\t"); if (values.size() < 2) continue; auto id = std::stoull(values[1]); @@ -160,12 +160,12 @@ int32_t GraphTable::load_nodes(const std::string &path, std::string node_type) { << " not in feature_map."; } } - valid_count ++; + valid_count++; } } - VLOG(0) << valid_count << "/" << count << " nodes in type " << - node_type << " are loaded successfully in " << path; + VLOG(0) << valid_count << "/" << count << " nodes in type " << node_type + << " are loaded successfully in " << path; return 0; } @@ -209,10 +209,11 @@ int32_t GraphTable::load_edges(const std::string &path, bool reverse_edge) { size_t index = src_shard_id - shard_start; shards[index].add_graph_node(src_id)->build_edges(is_weighted); shards[index].add_neighboor(src_id, dst_id, weight); - valid_count ++; + valid_count++; } } - VLOG(0) << valid_count << "/" << count << " edges are loaded successfully in " << path; + VLOG(0) << valid_count << "/" << count << " edges are loaded successfully in " + << path; // Build Sampler j diff --git a/paddle/fluid/distributed/table/graph_edge.cc b/paddle/fluid/distributed/table/graph_edge.cc index 56ce7852484ec..cc90f4c6516c1 100644 --- a/paddle/fluid/distributed/table/graph_edge.cc +++ b/paddle/fluid/distributed/table/graph_edge.cc @@ -17,14 +17,13 @@ namespace paddle { namespace distributed { -void GraphEdgeBlob::add_edge(uint64_t id, float weight=1){ +void GraphEdgeBlob::add_edge(uint64_t id, float weight = 1) { id_arr.push_back(id); } -void WeightedGraphEdgeBlob::add_edge(uint64_t id, float weight=1){ +void WeightedGraphEdgeBlob::add_edge(uint64_t id, float weight = 1) { id_arr.push_back(id); weight_arr.push_back(weight); } - } } diff --git a/paddle/fluid/distributed/table/graph_node.cc b/paddle/fluid/distributed/table/graph_node.cc index 8c20fc302f8d7..27a2cafaf4f0f 100644 --- a/paddle/fluid/distributed/table/graph_node.cc +++ b/paddle/fluid/distributed/table/graph_node.cc @@ -17,13 +17,12 @@ namespace paddle { namespace distributed { - GraphNode::~GraphNode() { - if (sampler != nullptr){ + if (sampler != nullptr) { delete sampler; sampler = nullptr; } - if (edges != nullptr){ + if (edges != nullptr) { delete edges; edges = nullptr; } @@ -33,9 +32,7 @@ int Node::weight_size = sizeof(float); int Node::id_size = sizeof(uint64_t); int Node::int_size = sizeof(int); -int Node::get_size(bool need_feature) { - return id_size + int_size; -} +int Node::get_size(bool need_feature) { return id_size + int_size; } void Node::to_buffer(char* buffer, bool need_feature) { memcpy(buffer, &id, id_size); @@ -45,15 +42,13 @@ void Node::to_buffer(char* buffer, bool need_feature) { memcpy(buffer, &feat_num, sizeof(int)); } -void Node::recover_from_buffer(char* buffer) { - memcpy(&id, buffer, id_size); -} +void Node::recover_from_buffer(char* buffer) { memcpy(&id, buffer, id_size); } int FeatureNode::get_size(bool need_feature) { - int size = id_size + int_size; // id, feat_num - if (need_feature){ + int size = id_size + int_size; // id, feat_num + if (need_feature) { size += feature.size() * int_size; - for (const std::string& fea: feature){ + for (const std::string& fea : feature) { size += fea.size(); } } @@ -61,8 +56,8 @@ int FeatureNode::get_size(bool need_feature) { } void GraphNode::build_edges(bool is_weighted) { - if (edges == nullptr){ - if (is_weighted == true){ + if (edges == nullptr) { + if (is_weighted == true) { edges = new WeightedGraphEdgeBlob(); } else { edges = new GraphEdgeBlob(); @@ -70,11 +65,11 @@ void GraphNode::build_edges(bool is_weighted) { } } void GraphNode::build_sampler(std::string sample_type) { - if (sample_type == "random"){ + if (sample_type == "random") { sampler = new RandomSampler(); - } else if (sample_type == "weighted"){ + } else if (sample_type == "weighted") { sampler = new WeightedSampler(); - } + } sampler->build(edges); } void FeatureNode::to_buffer(char* buffer, bool need_feature) { @@ -87,7 +82,7 @@ void FeatureNode::to_buffer(char* buffer, bool need_feature) { feat_num += feature.size(); memcpy(buffer, &feat_num, sizeof(int)); buffer += sizeof(int); - for (int i = 0; i < feat_num; ++i){ + for (int i = 0; i < feat_num; ++i) { feat_len = feature[i].size(); memcpy(buffer, &feat_len, sizeof(int)); buffer += sizeof(int); @@ -99,14 +94,13 @@ void FeatureNode::to_buffer(char* buffer, bool need_feature) { } } void FeatureNode::recover_from_buffer(char* buffer) { - int feat_num, feat_len; memcpy(&id, buffer, id_size); buffer += id_size; - + memcpy(&feat_num, buffer, sizeof(int)); buffer += sizeof(int); - + feature.clear(); for (int i = 0; i < feat_num; ++i) { memcpy(&feat_len, buffer, sizeof(int)); @@ -118,7 +112,6 @@ void FeatureNode::recover_from_buffer(char* buffer) { str[feat_len] = '\0'; feature.push_back(std::string(str)); } - } } } diff --git a/paddle/fluid/distributed/table/weighted_sampler.cc b/paddle/fluid/distributed/table/weighted_sampler.cc index 9dc9064742dee..69f845843007c 100644 --- a/paddle/fluid/distributed/table/weighted_sampler.cc +++ b/paddle/fluid/distributed/table/weighted_sampler.cc @@ -14,17 +14,15 @@ #include "paddle/fluid/distributed/table/weighted_sampler.h" #include -#include +#include namespace paddle { namespace distributed { -void RandomSampler::build(GraphEdgeBlob* edges) { - this->edges = edges; -} +void RandomSampler::build(GraphEdgeBlob *edges) { this->edges = edges; } std::vector RandomSampler::sample_k(int k) { int n = edges->size(); - if (k > n){ + if (k > n) { k = n; } struct timespec tn; @@ -32,19 +30,19 @@ std::vector RandomSampler::sample_k(int k) { srand(tn.tv_nsec); std::vector sample_result; std::unordered_map replace_map; - while(k--){ + while (k--) { int rand_int = rand() % n; auto iter = replace_map.find(rand_int); - if(iter == replace_map.end()){ + if (iter == replace_map.end()) { sample_result.push_back(rand_int); - }else{ + } else { sample_result.push_back(iter->second); } iter = replace_map.find(n - 1); - if(iter == replace_map.end()){ + if (iter == replace_map.end()) { replace_map[rand_int] = n - 1; - }else{ + } else { replace_map[rand_int] = iter->second; } --n; @@ -52,36 +50,37 @@ std::vector RandomSampler::sample_k(int k) { return sample_result; } -WeightedSampler::WeightedSampler(){ +WeightedSampler::WeightedSampler() { left = nullptr; right = nullptr; edges = nullptr; } WeightedSampler::~WeightedSampler() { - if(left != nullptr){ + if (left != nullptr) { delete left; left = nullptr; } - if(right != nullptr){ + if (right != nullptr) { delete right; right = nullptr; } } -void WeightedSampler::build(GraphEdgeBlob* edges) { - if(left != nullptr){ +void WeightedSampler::build(GraphEdgeBlob *edges) { + if (left != nullptr) { delete left; left = nullptr; } - if(right != nullptr){ + if (right != nullptr) { delete right; right = nullptr; } - return build_one((WeightedGraphEdgeBlob*)edges, 0, edges->size()); + return build_one((WeightedGraphEdgeBlob *)edges, 0, edges->size()); } -void WeightedSampler::build_one(WeightedGraphEdgeBlob *edges, int start, int end) { +void WeightedSampler::build_one(WeightedGraphEdgeBlob *edges, int start, + int end) { count = 0; this->edges = edges; if (start + 1 == end) { @@ -137,7 +136,7 @@ int WeightedSampler::sample( if (right_count == 0 || left_count > 0 && left->weight - left_subtract >= query_weight) { return_idx = left->sample(query_weight, subtract_weight_map, - subtract_count_map, subtract); + subtract_count_map, subtract); } else { return_idx = right->sample(query_weight - (left->weight - left_subtract), diff --git a/paddle/fluid/distributed/table/weighted_sampler.h b/paddle/fluid/distributed/table/weighted_sampler.h index 82e5109e8f34b..cfc341d27c6b7 100644 --- a/paddle/fluid/distributed/table/weighted_sampler.h +++ b/paddle/fluid/distributed/table/weighted_sampler.h @@ -21,21 +21,21 @@ namespace paddle { namespace distributed { class Sampler { -public: + public: virtual ~Sampler() {} - virtual void build(GraphEdgeBlob* edges) = 0; + virtual void build(GraphEdgeBlob *edges) = 0; virtual std::vector sample_k(int k) = 0; }; -class RandomSampler: public Sampler { -public: +class RandomSampler : public Sampler { + public: virtual ~RandomSampler() {} - virtual void build(GraphEdgeBlob* edges); + virtual void build(GraphEdgeBlob *edges); virtual std::vector sample_k(int k); - GraphEdgeBlob* edges; + GraphEdgeBlob *edges; }; -class WeightedSampler: public Sampler { +class WeightedSampler : public Sampler { public: WeightedSampler(); virtual ~WeightedSampler(); @@ -43,17 +43,16 @@ class WeightedSampler: public Sampler { float weight; int count; int idx; - GraphEdgeBlob * edges; - virtual void build(GraphEdgeBlob* edges); + GraphEdgeBlob *edges; + virtual void build(GraphEdgeBlob *edges); virtual void build_one(WeightedGraphEdgeBlob *edges, int start, int end); virtual std::vector sample_k(int k); private: - int sample( - float query_weight, - std::unordered_map &subtract_weight_map, - std::unordered_map &subtract_count_map, - float &subtract); + int sample(float query_weight, + std::unordered_map &subtract_weight_map, + std::unordered_map &subtract_count_map, + float &subtract); }; } } diff --git a/paddle/fluid/inference/api/demo_ci/clean.sh b/paddle/fluid/inference/api/demo_ci/clean.sh index 0d9f3d2aa237a..c265721db5775 100755 --- a/paddle/fluid/inference/api/demo_ci/clean.sh +++ b/paddle/fluid/inference/api/demo_ci/clean.sh @@ -1,3 +1,17 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -x cd `dirname $0` rm -rf build/ data/ diff --git a/paddle/fluid/train/demo/run.sh b/paddle/fluid/train/demo/run.sh index 2955e7574daa2..c45a3528febdd 100755 --- a/paddle/fluid/train/demo/run.sh +++ b/paddle/fluid/train/demo/run.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -x PADDLE_ROOT=$1 diff --git a/paddle/fluid/train/imdb_demo/run.sh b/paddle/fluid/train/imdb_demo/run.sh index f71b4bac602a9..6de1df27e0035 100644 --- a/paddle/fluid/train/imdb_demo/run.sh +++ b/paddle/fluid/train/imdb_demo/run.sh @@ -1,3 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. set -exu build/demo_trainer --flagfile="train.cfg" diff --git a/paddle/scripts/build_docker_images.sh b/paddle/scripts/build_docker_images.sh index a90f0885294a9..2b584cdca6b4c 100644 --- a/paddle/scripts/build_docker_images.sh +++ b/paddle/scripts/build_docker_images.sh @@ -1,4 +1,19 @@ #!/bin/sh + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -xe REPO="${REPO:-paddlepaddle}" diff --git a/paddle/scripts/docker/root/.scripts/git-completion.sh b/paddle/scripts/docker/root/.scripts/git-completion.sh index bdddef5ac2faf..c43e88a4acd73 100755 --- a/paddle/scripts/docker/root/.scripts/git-completion.sh +++ b/paddle/scripts/docker/root/.scripts/git-completion.sh @@ -1,4 +1,19 @@ #!bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # # bash/zsh completion support for core Git. # diff --git a/paddle/scripts/fast_install.sh b/paddle/scripts/fast_install.sh index 1034b1c5c1043..cacec55d3bc22 100644 --- a/paddle/scripts/fast_install.sh +++ b/paddle/scripts/fast_install.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + ## purple to echo function purple(){ echo -e "\033[35m$1\033[0m" diff --git a/patches/eigen/TensorBlock.h b/patches/eigen/TensorBlock.h index 1e55d12c42fc2..be0a02f53d1c5 100644 --- a/patches/eigen/TensorBlock.h +++ b/patches/eigen/TensorBlock.h @@ -1,3 +1,17 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // @@ -82,15 +96,16 @@ struct TensorBlockResourceRequirements { // which is implicitly invoked in the "merge" / "any" routines. else HIPCC // errors out complaining about the lack of a matching constructor EIGEN_DEVICE_FUNC - TensorBlockResourceRequirements(TensorBlockShapeType shape_type_, size_t size_, - TensorOpCost cost_) - : shape_type(shape_type_), size(size_), cost_per_coeff(cost_) - {} + TensorBlockResourceRequirements(TensorBlockShapeType shape_type_, + size_t size_, + TensorOpCost cost_) + : shape_type(shape_type_), size(size_), cost_per_coeff(cost_) {} #endif template EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements withShapeAndSize( - TensorBlockShapeType shape_type, size_t size_in_bytes, + TensorBlockShapeType shape_type, + size_t size_in_bytes, TensorOpCost cost) { const size_t size = numext::maxi(size_t(1), size_in_bytes / sizeof(Scalar)); return {shape_type, size, cost}; @@ -113,7 +128,8 @@ struct TensorBlockResourceRequirements { // cost approximation (e.g. shuffling inner dimension has a much higher cost // because it reads memory randomly, although the total number of moved // bytes is the same). - return withShapeAndSize(shape_type, size_in_bytes, + return withShapeAndSize(shape_type, + size_in_bytes, {/*bytes_loaded=*/sizeof(Scalar), /*bytes_stored=*/sizeof(Scalar), /*compute_cycles=*/0}); @@ -247,7 +263,8 @@ class TensorBlockDescriptor { DestinationBuffer() : m_data(NULL), m_data_type_size(0), m_kind(kEmpty) {} template - DestinationBuffer(Scalar* data, const Dimensions& strides, + DestinationBuffer(Scalar* data, + const Dimensions& strides, DestinationBufferKind kind) : m_data(static_cast(data)), m_data_type_size(sizeof(Scalar)), @@ -256,7 +273,8 @@ class TensorBlockDescriptor { template static DestinationBuffer make(const TensorBlockDescriptor& desc, - Scalar* data, const Dimensions& strides) { + Scalar* data, + const Dimensions& strides) { return DestinationBuffer(data, strides, kind(desc, strides)); } @@ -284,7 +302,8 @@ class TensorBlockDescriptor { DestinationBufferKind m_kind; }; - TensorBlockDescriptor(const IndexType offset, const Dimensions& dimensions, + TensorBlockDescriptor(const IndexType offset, + const Dimensions& dimensions, const DestinationBuffer& destination) : m_offset(offset), m_dimensions(dimensions), @@ -651,15 +670,19 @@ struct XprScalar { // be invalid, and should never be used in block assignment or any other tensor // expression. -template class TensorMaterializedBlock { public: typedef DSizes Dimensions; - typedef TensorMap > XprType; + typedef TensorMap> XprType; - TensorMaterializedBlock(TensorBlockKind kind, const Scalar* data, - const Dimensions& dimensions, bool valid_expr = true) + TensorMaterializedBlock(TensorBlockKind kind, + const Scalar* data, + const Dimensions& dimensions, + bool valid_expr = true) : m_kind(kind), m_data(data), m_dimensions(dimensions), @@ -702,14 +725,18 @@ class TensorMaterializedBlock { m_materialized_in_output ? internal::TensorBlockKind::kMaterializedInOutput : internal::TensorBlockKind::kMaterializedInScratch, - m_data, m_dimensions, !m_strided_storage); + m_data, + m_dimensions, + !m_strided_storage); } private: friend class TensorMaterializedBlock; - Storage(Scalar* data, const Dimensions& dimensions, - const Dimensions& strides, bool materialized_in_output, + Storage(Scalar* data, + const Dimensions& dimensions, + const Dimensions& strides, + bool materialized_in_output, bool strided_storage) : m_data(data), m_dimensions(dimensions), @@ -728,7 +755,8 @@ class TensorMaterializedBlock { // destination buffer, or allocates a new buffer with scratch allocator. template EIGEN_STRONG_INLINE static Storage prepareStorage( - TensorBlockDesc& desc, TensorBlockScratch& scratch, + TensorBlockDesc& desc, + TensorBlockScratch& scratch, bool allow_strided_storage = false) { // Try to reuse destination as an output block buffer. typedef typename TensorBlockDesc::DestinationBuffer DestinationBuffer; @@ -736,7 +764,8 @@ class TensorMaterializedBlock { if (desc.destination().kind() == DestinationBuffer::kContiguous) { Scalar* buffer = desc.destination().template data(); desc.DropDestinationBuffer(); - return Storage(buffer, desc.dimensions(), + return Storage(buffer, + desc.dimensions(), internal::strides(desc.dimensions()), /*materialized_in_output=*/true, /*strided_storage=*/false); @@ -745,12 +774,16 @@ class TensorMaterializedBlock { allow_strided_storage) { Scalar* buffer = desc.destination().template data(); desc.DropDestinationBuffer(); - return Storage(buffer, desc.dimensions(), desc.destination().strides(), - /*materialized_in_output=*/true, /*strided_storage=*/true); + return Storage(buffer, + desc.dimensions(), + desc.destination().strides(), + /*materialized_in_output=*/true, + /*strided_storage=*/true); } else { void* mem = scratch.allocate(desc.size() * sizeof(Scalar)); - return Storage(static_cast(mem), desc.dimensions(), + return Storage(static_cast(mem), + desc.dimensions(), internal::strides(desc.dimensions()), /*materialized_in_output=*/false, /*strided_storage=*/false); @@ -760,8 +793,10 @@ class TensorMaterializedBlock { // Creates a materialized block for the given descriptor from a memory buffer. template EIGEN_STRONG_INLINE static TensorMaterializedBlock materialize( - const Scalar* data, const DataDimensions& data_dims, - TensorBlockDesc& desc, TensorBlockScratch& scratch) { + const Scalar* data, + const DataDimensions& data_dims, + TensorBlockDesc& desc, + TensorBlockScratch& scratch) { eigen_assert(array_size::value == desc.dimensions().size()); // If a tensor block dimensions covers a contiguous block of the underlying @@ -797,8 +832,8 @@ class TensorMaterializedBlock { if (can_use_direct_access) { const Scalar* block_start = data + desc.offset(); - return TensorMaterializedBlock(internal::TensorBlockKind::kView, - block_start, desc.dimensions()); + return TensorMaterializedBlock( + internal::TensorBlockKind::kView, block_start, desc.dimensions()); } else { // Reuse destination buffer or allocate new buffer with scratch allocator. @@ -810,9 +845,10 @@ class TensorMaterializedBlock { typedef typename TensorBlockIO::Src TensorBlockIOSrc; TensorBlockIOSrc src(internal::strides(Dimensions(data_dims)), - data, desc.offset()); - TensorBlockIODst dst(storage.dimensions(), storage.strides(), - storage.data()); + data, + desc.offset()); + TensorBlockIODst dst( + storage.dimensions(), storage.strides(), storage.data()); TensorBlockIO::Copy(dst, src); return storage.AsTensorMaterializedBlock(); @@ -838,9 +874,10 @@ class TensorCwiseUnaryBlock { public: typedef typename conditional< - NoArgBlockAccess, void, - TensorCwiseUnaryOp >:: - type XprType; + NoArgBlockAccess, + void, + TensorCwiseUnaryOp>::type + XprType; typedef typename XprScalar::type Scalar; @@ -870,9 +907,11 @@ class TensorCwiseBinaryBlock { public: typedef typename conditional< - NoArgBlockAccess, void, - TensorCwiseBinaryOp >::type + NoArgBlockAccess, + void, + TensorCwiseBinaryOp>::type XprType; typedef typename XprScalar::type Scalar; @@ -915,7 +954,8 @@ class TensorUnaryExprBlock { public: typedef typename conditional< - NoArgBlockAccess, void, + NoArgBlockAccess, + void, typename BlockFactory::template XprType::type>::type XprType; typedef typename XprScalar::type Scalar; @@ -938,8 +978,10 @@ class TensorUnaryExprBlock { // TensorTernaryExprBlock is a lazy tensor expression block that can construct // an arbitrary tensor expression from three blocks of the underlying type. -template +template class TensorTernaryExprBlock { typedef typename Arg1TensorBlock::XprType Arg1XprType; typedef typename Arg2TensorBlock::XprType Arg2XprType; @@ -951,8 +993,10 @@ class TensorTernaryExprBlock { public: typedef typename conditional< - NoArgBlockAccess, void, - typename BlockFactory::template XprType::type>::type XprType; typedef typename XprScalar::type Scalar; @@ -968,8 +1012,8 @@ class TensorTernaryExprBlock { TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } XprType expr() const { - return m_factory.expr(m_arg1_block.expr(), m_arg2_block.expr(), - m_arg3_block.expr()); + return m_factory.expr( + m_arg1_block.expr(), m_arg2_block.expr(), m_arg3_block.expr()); } const Scalar* data() const { return NULL; } void cleanup() { @@ -1029,16 +1073,24 @@ class StridedLinearBufferCopy { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const Dst& dst, const Src& src, const size_t count) { - Run(count, dst.offset, dst.stride, dst.data, src.offset, src.stride, + Run(count, + dst.offset, + dst.stride, + dst.data, + src.offset, + src.stride, src.data); } private: template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const IndexType count, const IndexType dst_offset, - const IndexType dst_stride, Scalar* EIGEN_RESTRICT dst_data, - const IndexType src_offset, const IndexType src_stride, + const IndexType count, + const IndexType dst_offset, + const IndexType dst_stride, + Scalar* EIGEN_RESTRICT dst_data, + const IndexType src_offset, + const IndexType src_stride, const Scalar* EIGEN_RESTRICT src_data) { const Scalar* src = &src_data[src_offset]; Scalar* dst = &dst_data[dst_offset]; @@ -1150,7 +1202,9 @@ class TensorBlockIO { typedef DSizes DimensionsMap; struct Dst { - Dst(const Dimensions& dst_dims, const Dimensions& dst_strides, Scalar* dst, + Dst(const Dimensions& dst_dims, + const Dimensions& dst_strides, + Scalar* dst, IndexType dst_offset = 0) : dims(dst_dims), strides(dst_strides), data(dst), offset(dst_offset) {} @@ -1161,7 +1215,8 @@ class TensorBlockIO { }; struct Src { - Src(const Dimensions& src_strides, const Scalar* src, + Src(const Dimensions& src_strides, + const Scalar* src, IndexType src_offset = 0) : strides(src_strides), data(src), offset(src_offset) {} @@ -1376,7 +1431,9 @@ class TensorBlockIO { // where `src` is a tensor expression. Explore if it is possible to rewrite IO // to use expressions instead of pointers, and after that TensorBlockAssignment // will become an alias to IO. -template class TensorBlockAssignment { // We will use coeff/packet path to evaluate block expressions. @@ -1392,7 +1449,8 @@ class TensorBlockAssignment { template struct InnerDimAssign { - EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, + IndexType count, const Evaluator& eval, IndexType eval_offset) { for (IndexType i = 0; i < count; ++i) { @@ -1403,7 +1461,8 @@ class TensorBlockAssignment { template struct InnerDimAssign { - EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, + IndexType count, const Evaluator& eval, IndexType eval_offset) { typedef typename packet_traits::type Packet; @@ -1433,8 +1492,10 @@ class TensorBlockAssignment { public: struct Target { - Target(const Dimensions& target_dims, const Dimensions& target_strides, - Scalar* target_data, IndexType target_offset = 0) + Target(const Dimensions& target_dims, + const Dimensions& target_strides, + Scalar* target_data, + IndexType target_offset = 0) : dims(target_dims), strides(target_strides), data(target_data), @@ -1447,7 +1508,8 @@ class TensorBlockAssignment { }; static Target target(const Dimensions& target_dims, - const Dimensions& target_strides, Scalar* target_data, + const Dimensions& target_strides, + Scalar* target_data, IndexType target_offset = 0) { return Target(target_dims, target_strides, target_data, target_offset); } @@ -1456,10 +1518,13 @@ class TensorBlockAssignment { static Target target( const DSizes& target_dims, const DSizes& target_strides, - Scalar* target_data, IndexType target_offset = 0) { + Scalar* target_data, + IndexType target_offset = 0) { // DSizes constructor will do index type promotion if it's safe. - return Target(Dimensions(target_dims), Dimensions(target_strides), - target_data, target_offset); + return Target(Dimensions(target_dims), + Dimensions(target_strides), + target_data, + target_offset); } static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( @@ -1521,7 +1586,8 @@ class TensorBlockAssignment { // Assign to `target` at current offset. InnerDimAssign::Run(target.data + output_offset, - output_inner_dim_size, eval, + output_inner_dim_size, + eval, input_offset); // Move input offset forward by the number of assigned coefficients. diff --git a/python/paddle/fluid/dataloader/fetcher.py b/python/paddle/fluid/dataloader/fetcher.py index 9382a70422370..41e12fbc68ec1 100644 --- a/python/paddle/fluid/dataloader/fetcher.py +++ b/python/paddle/fluid/dataloader/fetcher.py @@ -27,8 +27,8 @@ def fetch(self, batch_indices): class _IterableDatasetFetcher(_DatasetFetcher): def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): - super(_IterableDatasetFetcher, self).__init__(dataset, auto_collate_batch, - collate_fn, drop_last) + super(_IterableDatasetFetcher, self).__init__( + dataset, auto_collate_batch, collate_fn, drop_last) self.dataset_iter = iter(dataset) def fetch(self, batch_indices): @@ -53,7 +53,8 @@ def fetch(self, batch_indices): class _MapDatasetFetcher(_DatasetFetcher): def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): - super(_MapDatasetFetcher, self).__init__(dataset, auto_collate_batch, collate_fn, drop_last) + super(_MapDatasetFetcher, self).__init__(dataset, auto_collate_batch, + collate_fn, drop_last) def fetch(self, batch_indices): if self.auto_collate_batch: diff --git a/python/paddle/fluid/incubate/fleet/tests/cluster_train.sh b/python/paddle/fluid/incubate/fleet/tests/cluster_train.sh index 1df6b0618de8d..cac2f7234bdf2 100644 --- a/python/paddle/fluid/incubate/fleet/tests/cluster_train.sh +++ b/python/paddle/fluid/incubate/fleet/tests/cluster_train.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # start pserver0 python fleet_deep_ctr.py \ --role pserver \ diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py index 95cff4de6f6b0..69a9ae3c0ad2c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py @@ -40,9 +40,11 @@ def setUp(self): matmul_ab_square = paddle.square(matmul_ab) matmul_square_ab = paddle.matmul(data_a_square, data_b_square) - scale = paddle.fluid.layers.fill_constant(shape=[1], value=0.5, dtype='float32') + scale = paddle.fluid.layers.fill_constant( + shape=[1], value=0.5, dtype='float32') - sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, matmul_square_ab) + sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, + matmul_square_ab) squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale) self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py index 94434f4043448..080d1ccc9054b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py @@ -25,19 +25,16 @@ class TensorRTMatMulDims2Test(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( - name="data", shape=[24, 24], dtype="float32") + data = fluid.data(name="data", shape=[24, 24], dtype="float32") matmul_out = fluid.layers.matmul( x=data, y=data, - transpose_x = self.transpose_x, - transpose_y = self.transpose_y, - alpha = self.alpha) + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha) out = fluid.layers.batch_norm(matmul_out, is_test=True) - self.feeds = { - "data": np.ones([24, 24]).astype("float32"), - } + self.feeds = {"data": np.ones([24, 24]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTMatMulDims2Test.TensorRTParam( 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) @@ -65,14 +62,12 @@ def setUp(self): matmul_out = fluid.layers.matmul( x=data, y=data, - transpose_x = self.transpose_x, - transpose_y = self.transpose_y, - alpha = self.alpha) + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha) out = fluid.layers.batch_norm(matmul_out, is_test=True) - self.feeds = { - "data": np.ones([1, 6, 24, 24]).astype("float32"), - } + self.feeds = {"data": np.ones([1, 6, 24, 24]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTMatMulTest.TensorRTParam( 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) diff --git a/python/paddle/fluid/tests/unittests/parallel_test.sh b/python/paddle/fluid/tests/unittests/parallel_test.sh index 9da4f035345d7..551b7cdb7a43c 100644 --- a/python/paddle/fluid/tests/unittests/parallel_test.sh +++ b/python/paddle/fluid/tests/unittests/parallel_test.sh @@ -1,4 +1,19 @@ #!/bin/bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + unset https_proxy http_proxy export FLAGS_rpc_disable_reuse_port=1 diff --git a/python/paddle/fluid/tests/unittests/test_bce_loss.py b/python/paddle/fluid/tests/unittests/test_bce_loss.py index 4b39436842b89..ea1a22780f093 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_loss.py @@ -27,8 +27,10 @@ def test_static_layer(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64') - label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float64') + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64') if weight_np is not None: weight = paddle.fluid.data( name='weight', shape=weight_np.shape, dtype='float64') @@ -58,8 +60,10 @@ def test_static_functional(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64') - label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float64') + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64') if weight_np is not None: weight = paddle.fluid.data( name='weight', shape=weight_np.shape, dtype='float64') diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index a6175aa471d69..153b8fd3e7f6b 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -48,8 +48,10 @@ def test_static(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64') - label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') + logit = paddle.fluid.data( + name='logit', shape=logit_np.shape, dtype='float64') + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64') feed_dict = {"logit": logit_np, "label": label_np} pos_weight = None diff --git a/python/paddle/fluid/tests/unittests/test_c_comm_init_op.sh b/python/paddle/fluid/tests/unittests/test_c_comm_init_op.sh index a9d450e223f1e..aba95a68ab790 100644 --- a/python/paddle/fluid/tests/unittests/test_c_comm_init_op.sh +++ b/python/paddle/fluid/tests/unittests/test_c_comm_init_op.sh @@ -1,4 +1,19 @@ #!/bin/bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -e # use default values # FIXME: random fails on Unknown command lines -c (or -m). diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py index 16584ee50081a..a82866a797db1 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py @@ -23,7 +23,6 @@ paddle.enable_static() - # For Net base_lr = 0.2 emb_lr = base_lr * 3 diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index aa85eb3df3527..28803f5ac6232 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -170,7 +170,8 @@ def test_type(): x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x2 = x2.astype('float16') - x2_var = paddle.fluid.data(name='x2', shape=[3, 2, 4, 5], dtype='float16') + x2_var = paddle.fluid.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16') paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index fba16959901a8..c35188623b440 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -44,8 +44,10 @@ def run_imperative(self): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') - label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32') + label = paddle.fluid.data( + name='label', shape=[10, 10, 5], dtype='float32') result0 = paddle.nn.functional.l1_loss(input, label) result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum') result2 = paddle.nn.functional.l1_loss(input, label, reduction='none') @@ -127,8 +129,10 @@ def run_imperative(self): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') - label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32') + label = paddle.fluid.data( + name='label', shape=[10, 10, 5], dtype='float32') l1_loss = paddle.nn.loss.L1Loss() result0 = l1_loss(input, label) l1_loss = paddle.nn.loss.L1Loss(reduction='sum') diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv.sh b/python/paddle/fluid/tests/unittests/test_listen_and_serv.sh index bee230fba5a7e..d9d64e4dfa693 100644 --- a/python/paddle/fluid/tests/unittests/test_listen_and_serv.sh +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv.sh @@ -1,4 +1,19 @@ #!/bin/bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + unset https_proxy http_proxy nohup python -u test_listen_and_serv_op.py > test_listen_and_serv_op.log 2>&1 & diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index bc5d35d3254bc..89eef6ca24243 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -191,8 +191,10 @@ def test_NNFunctionalMseLoss_mean(self): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', shape=dim, dtype='float32') - target = paddle.fluid.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32') + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') exe = paddle.static.Executor(place) @@ -225,8 +227,10 @@ def test_NNFunctionalMseLoss_sum(self): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', shape=dim, dtype='float32') - target = paddle.fluid.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32') + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') exe = paddle.static.Executor(place) @@ -259,8 +263,10 @@ def test_NNFunctionalMseLoss_none(self): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', shape=dim, dtype='float32') - target = paddle.fluid.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32') + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py index 0533a0d09fa0d..3bb3e843b1b11 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py @@ -160,5 +160,6 @@ def run_main(self, num_workers, places): print("time cost", ret['time'], 'step_list', ret['step']) return ret + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index f75d6e9df540b..f1a409c712fc3 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -97,8 +97,10 @@ def test_static_graph_functional(self): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64") - x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64") + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64") out_1 = F.pixel_shuffle(x_1, 3) out_2 = F.pixel_shuffle(x_2, 3, "NHWC") @@ -123,8 +125,10 @@ def test_static_graph_layer(self): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64") - x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64") + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64") # init instance ps_1 = paddle.nn.PixelShuffle(3) ps_2 = paddle.nn.PixelShuffle(3, "NHWC") diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index 15fd79542d608..cdfcbb4e4e735 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -55,7 +55,8 @@ def run_imperative(self): self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32') result0 = paddle.prod(input) result1 = paddle.prod(input, axis=1) result2 = paddle.prod(input, axis=-1) @@ -114,7 +115,8 @@ def test_error(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data(name='bool_x', shape=[2, 2, 4], dtype='bool') + bool_x = paddle.fluid.data( + name='bool_x', shape=[2, 2, 4], dtype='bool') # The argument x shoule be a Tensor self.assertRaises(TypeError, paddle.prod, [1]) diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 95ae1eecc6614..e71adae8d9b6e 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -128,15 +128,18 @@ def test_errors(self): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 - x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32') + x_fp32 = paddle.fluid.data( + name='x_fp32', shape=[12, 10], dtype='float32') self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16') F.selu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py index 85f9501e53f4a..2ef04d9cbfa73 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py @@ -42,8 +42,10 @@ def test_static(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64') - label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') + logit = paddle.fluid.data( + name='logit', shape=logit_np.shape, dtype='float64') + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64') feed_dict = {"logit": logit_np, "label": label_np} normalizer = None diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index f72df8cbe4640..59b4afdf8b02d 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -23,6 +23,7 @@ paddle.enable_static() + class TestTransposeOp(OpTest): def setUp(self): self.init_op_type() @@ -151,6 +152,7 @@ def test_each_elem_value_check(): self.assertRaises(ValueError, test_each_elem_value_check) + class TestTransposeApi(unittest.TestCase): def test_static_out(self): paddle.enable_static() @@ -161,10 +163,11 @@ def test_static_out(self): place = paddle.CPUPlace() exe = paddle.static.Executor(place) x_np = np.random.random([2, 3, 4]).astype("float32") - result1, result2 = exe.run(feed={"x": x_np}, fetch_list=[x_trans1, x_trans2]) + result1, result2 = exe.run(feed={"x": x_np}, + fetch_list=[x_trans1, x_trans2]) expected_result1 = np.transpose(x_np, [1, 0, 2]) expected_result2 = np.transpose(x_np, (2, 1, 0)) - + np.testing.assert_array_equal(result1, expected_result1) np.testing.assert_array_equal(result2, expected_result2) @@ -185,6 +188,7 @@ def test_dygraph_out(self): # dygraph test paddle.enable_static() + class TestTAPI(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): diff --git a/tools/check_api_approvals.sh b/tools/check_api_approvals.sh index 4e8ea25715451..eb05468eda6ca 100644 --- a/tools/check_api_approvals.sh +++ b/tools/check_api_approvals.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + if [ -z ${BRANCH} ]; then BRANCH="develop" fi diff --git a/tools/check_sequence_op.sh b/tools/check_sequence_op.sh index ada96750eaad8..a263b046b258b 100644 --- a/tools/check_sequence_op.sh +++ b/tools/check_sequence_op.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../" && pwd )" function check_sequnece_op_unitests(){ diff --git a/tools/cudaError/start.sh b/tools/cudaError/start.sh index 3c0e57ffe7ec1..66e56b8485d8c 100644 --- a/tools/cudaError/start.sh +++ b/tools/cudaError/start.sh @@ -1,4 +1,19 @@ #!/usr/bin/env bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -ex SYSTEM=`uname -s` rm -f protoc-3.11.3-linux-x86_64.* diff --git a/tools/diff_api.py b/tools/diff_api.py index 8a2acbb3d0acc..f086598945afe 100644 --- a/tools/diff_api.py +++ b/tools/diff_api.py @@ -1,4 +1,19 @@ #!/usr/bin/env python + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from __future__ import print_function import difflib import sys diff --git a/tools/diff_unittest.py b/tools/diff_unittest.py index 382fbdd0b0c29..fa70be0990ec0 100644 --- a/tools/diff_unittest.py +++ b/tools/diff_unittest.py @@ -1,4 +1,19 @@ #!/usr/bin/env python + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import difflib import sys diff --git a/tools/dockerfile/icode.sh b/tools/dockerfile/icode.sh index da3ffb8c77db7..973975fe7f737 100755 --- a/tools/dockerfile/icode.sh +++ b/tools/dockerfile/icode.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + function install_gcc(){ sed -i 's##RUN apt-get update \ diff --git a/tools/document_preview.sh b/tools/document_preview.sh index 10f486f8fd4f6..83c758d0aa8b8 100755 --- a/tools/document_preview.sh +++ b/tools/document_preview.sh @@ -1,4 +1,19 @@ #!/bin/bash + +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + PADDLE_ROOT=/home mkdir ${PADDLE_ROOT} cd ${PADDLE_ROOT} diff --git a/tools/get_cpu_info.sh b/tools/get_cpu_info.sh index 81eb19dc0661e..bce338a8619e6 100755 --- a/tools/get_cpu_info.sh +++ b/tools/get_cpu_info.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + if [ "`uname -s`" != "Linux" ]; then echo "Current scenario only support in Linux yet!" exit 0