Skip to content

Commit

Permalink
remove boost::algorithm::ends_with ,boost macro and boost::lexical_ca…
Browse files Browse the repository at this point in the history
…st apis (#34310)

* replace boost::algorithm::ends_with with self define ends_with function

* remove BOOST macro in certain operators

* remove boost::lexical_cast

* add test for string_helper

* add more test case for string_helper

* modify join_string func and test case

* fix build_strategy_test failed bug

* remove string_helper_test from parallel_UT_rule.py
  • Loading branch information
MingMingShangTian authored Aug 5, 2021
1 parent 911c859 commit bb7b4c0
Show file tree
Hide file tree
Showing 19 changed files with 333 additions and 226 deletions.
9 changes: 4 additions & 5 deletions paddle/fluid/distributed/common/sparse_sharding_merge.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include <vector>

#include <ThreadPool.h>
#include "boost/lexical_cast.hpp"
#include "glog/logging.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/framework/blocking_queue.h"
Expand All @@ -36,8 +35,6 @@ constexpr int Q_SIZE = 10000;
constexpr int BUCKET = 10;
constexpr char XEOF[] = "EOF";

using boost::lexical_cast;

inline double GetCurrentUS() {
struct timeval time;
gettimeofday(&time, NULL);
Expand Down Expand Up @@ -208,8 +205,10 @@ class ShardingMerge {
for (int x = 0; x < embedding_dim; ++x) {
float v = 0.0;
try {
v = lexical_cast<float>(values_str[x]);
} catch (boost::bad_lexical_cast &e) {
v = std::stof(values_str[x]);
} catch (std::invalid_argument &e) {
VLOG(0) << " get unexpected line: " << line;
} catch (std::out_of_range &e) {
VLOG(0) << " get unexpected line: " << line;
}
out->push_back(v);
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/distributed/index_dataset/index_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/io/fs.h"

#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "paddle/fluid/distributed/index_dataset/index_wrapper.h"

namespace paddle {
Expand Down Expand Up @@ -65,7 +63,7 @@ int TreeIndex::Load(const std::string filename) {
if (item.key() == ".tree_meta") {
meta_.ParseFromString(item.value());
} else {
auto code = boost::lexical_cast<uint64_t>(item.key());
auto code = std::stoull(item.key());
IndexNode node;
node.ParseFromString(item.value());
PADDLE_ENFORCE_NE(node.id(), 0,
Expand Down
17 changes: 9 additions & 8 deletions paddle/fluid/distributed/table/common_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include "paddle/fluid/distributed/table/common_sparse_table.h"
#include <sstream>

#include "boost/lexical_cast.hpp"
#include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h"

Expand Down Expand Up @@ -50,8 +49,11 @@ void CommonSparseTable::ProcessALine(const std::vector<std::string>& columns,
float v = 0.0;

try {
v = lexical_cast<float>(va);
} catch (boost::bad_lexical_cast& e) {
v = std::stof(va);
} catch (std::invalid_argument& e) {
VLOG(0) << "id: " << id << " get unexpected value: " << va
<< " and be reset to: 0.0";
} catch (std::out_of_range& e) {
VLOG(0) << "id: " << id << " get unexpected value: " << va
<< " and be reset to: 0.0";
}
Expand Down Expand Up @@ -131,7 +133,7 @@ int64_t CommonSparseTable::LoadFromText(

while (std::getline(file, line)) {
auto values = paddle::string::split_string<std::string>(line, "\t");
auto id = lexical_cast<uint64_t>(values[0]);
auto id = std::stoull(values[0]);

if (id % pserver_num != pserver_id) {
VLOG(3) << "will not load " << values[0] << " from " << valuepath
Expand All @@ -150,10 +152,9 @@ int64_t CommonSparseTable::LoadFromText(
VALUE* value_instant = block->GetValue(id);

if (values.size() == 5) {
value_instant->count_ = lexical_cast<int>(values[1]);
value_instant->unseen_days_ = lexical_cast<int>(values[2]);
value_instant->is_entry_ =
static_cast<bool>(lexical_cast<int>(values[3]));
value_instant->count_ = std::stoi(values[1]);
value_instant->unseen_days_ = std::stoi(values[2]);
value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
}

std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/distributed/table/common_sparse_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
#include "paddle/fluid/string/string_helper.h"

#define PSERVER_SAVE_SUFFIX ".shard"
using boost::lexical_cast;

namespace paddle {
namespace distributed {
Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/distributed/table/ssd_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ int64_t SSDSparseTable::LoadFromText(

while (std::getline(file, line)) {
auto values = paddle::string::split_string<std::string>(line, "\t");
auto id = lexical_cast<uint64_t>(values[0]);
auto id = std::stoull(values[0]);

if (id % pserver_num != pserver_id) {
VLOG(3) << "will not load " << values[0] << " from " << valuepath
Expand All @@ -329,10 +329,9 @@ int64_t SSDSparseTable::LoadFromText(
VALUE* value_instant = block->GetValue(id);

if (values.size() == 5) {
value_instant->count_ = lexical_cast<int>(values[1]);
value_instant->unseen_days_ = lexical_cast<int>(values[2]);
value_instant->is_entry_ =
static_cast<bool>(lexical_cast<int>(values[3]));
value_instant->count_ = std::stoi(values[1]);
value_instant->unseen_days_ = std::stoi(values[2]);
value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
}

std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ if(NOT APPLE AND NOT WIN32 AND (WITH_GPU OR WITH_ROCM))
endif()
cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS})
cc_test(build_strategy_test SRCS build_strategy_test.cc
DEPS build_strategy op_registry op_proto_maker graph)
DEPS build_strategy op_registry op_proto_maker graph string_helper)

if (WITH_MKLDNN)
target_link_libraries(build_strategy mkldnn_placement_pass)
Expand Down
13 changes: 9 additions & 4 deletions paddle/fluid/framework/fleet/fleet_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ void FleetWrapper::HeterPushSparseVars(
int64_t* ids = tensor->data<int64_t>();
int slot = 0;
if (dump_slot) {
slot = boost::lexical_cast<int>(sparse_key_names[i]);
slot = std::stoi(sparse_key_names[i]);
}
Variable* g_var = scope.FindVar(sparse_grad_names[i]);
if (g_var == nullptr) {
Expand Down Expand Up @@ -915,12 +915,17 @@ void FleetWrapper::PushSparseVarsWithLabelAsync(
int slot = 0;
if (dump_slot) {
try {
slot = boost::lexical_cast<int>(sparse_key_names[i]);
} catch (boost::bad_lexical_cast const& e) {
slot = std::stoi(sparse_key_names[i]);
} catch (std::invalid_argument const& e) {
PADDLE_THROW(platform::errors::PreconditionNotMet(
"sparse var's name: %s, doesn't support non-integer type name when "
"dump_slot=True",
sparse_key_names[i]));
} catch (std::out_of_range const& e) {
PADDLE_THROW(platform::errors::PreconditionNotMet(
"sparse var's name: %s, integer type name out of range when "
"dump_slot=True",
sparse_key_names[i]));
}
}
Variable* g_var = scope.FindVar(sparse_grad_names[i]);
Expand Down Expand Up @@ -1121,7 +1126,7 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync(
data[click_index] = static_cast<float>(fea_labels.at(input_idx));
}
if (dump_slot) {
int slot = boost::lexical_cast<int>(input_names[index]);
int slot = std::stoi(input_names[index]);
data[0] = static_cast<float>(slot);
}
++input_idx;
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/framework/ir/lock_free_optimize_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@
#include <string>
#include <vector>

#include <boost/algorithm/string/predicate.hpp>

#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/string/string_helper.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -109,7 +108,7 @@ class LockFreeOptimizePass : public Pass {
"Input argument node cannot be nullptr."));

return node->NodeType() == Node::Type::kVariable &&
boost::algorithm::ends_with(node->Name(), name);
paddle::string::ends_with(node->Name(), name);
}

inline bool IsVarNameContains(ir::Node* node, const std::string& name) const {
Expand Down
68 changes: 36 additions & 32 deletions paddle/fluid/operators/expand_as_op.h
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -13,42 +13,12 @@ limitations under the License. */

#include <vector>

#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \
ExpandAs<n + 1>(context); \
break; \
}
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \
case n + 1: { \
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)

namespace paddle {
namespace operators {
Expand All @@ -67,7 +37,24 @@ class ExpandAsKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override {
auto rank = context.Input<Tensor>("X")->dims().size();
switch (rank) {
REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED)
case 1:
ExpandAs<1>(context);
break;
case 2:
ExpandAs<2>(context);
break;
case 3:
ExpandAs<3>(context);
break;
case 4:
ExpandAs<4>(context);
break;
case 5:
ExpandAs<5>(context);
break;
case 6:
ExpandAs<6>(context);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But received "
Expand Down Expand Up @@ -165,7 +152,24 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims));
switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
case 1:
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
Expand Down
70 changes: 38 additions & 32 deletions paddle/fluid/operators/expand_as_v2_op.h
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -14,42 +14,12 @@ limitations under the License. */
#include <algorithm>
#include <vector>

#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"

#define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \
ExpandAs<n + 1>(context); \
break; \
}
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \
case n + 1: { \
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -85,7 +55,26 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> {
"expand_as_v2 op must be less than or equal to %d.",
target_rank, MAX_RANK_SUPPORTED));

switch (target_rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) }
switch (target_rank) {
case 1:
ExpandAs<1>(context);
break;
case 2:
ExpandAs<2>(context);
break;
case 3:
ExpandAs<3>(context);
break;
case 4:
ExpandAs<4>(context);
break;
case 5:
ExpandAs<5>(context);
break;
case 6:
ExpandAs<6>(context);
break;
}
}

protected:
Expand Down Expand Up @@ -186,7 +175,24 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims));
switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
case 1:
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
Expand Down
Loading

0 comments on commit bb7b4c0

Please sign in to comment.