Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename consistent to global #8505

Merged
merged 48 commits into from
Jul 8, 2022
Merged
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
cc1328c
rename consistent to global
Flowingsun007 Jun 28, 2022
57ed40e
rename consistent to global
Flowingsun007 Jun 28, 2022
d44cf11
rename files
Flowingsun007 Jun 28, 2022
e711434
rename files
Flowingsun007 Jun 28, 2022
a209841
merge master
Flowingsun007 Jun 28, 2022
07d5e17
refine
Flowingsun007 Jun 28, 2022
ef866fe
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jun 28, 2022
21b5b27
auto format by CI
oneflow-ci-bot Jun 28, 2022
11cb7e4
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jun 28, 2022
c2c2d04
merge master
Flowingsun007 Jun 29, 2022
8ed46e7
merge master
Flowingsun007 Jun 29, 2022
40780dd
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jun 29, 2022
71eb51f
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jun 30, 2022
003dbfa
refine
Flowingsun007 Jun 30, 2022
1a5fc44
merge master
Flowingsun007 Jul 1, 2022
405d538
fix clang check
Flowingsun007 Jul 1, 2022
85b6925
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 1, 2022
643ba45
fix
Flowingsun007 Jul 1, 2022
a881386
fix
Flowingsun007 Jul 1, 2022
ee1b662
fix
Flowingsun007 Jul 1, 2022
ca7360f
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 1, 2022
a6d8286
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 1, 2022
b8db91e
merge master
Flowingsun007 Jul 4, 2022
c6705ef
rm to_consistent docs
Flowingsun007 Jul 4, 2022
4d55c6d
auto format by CI
oneflow-ci-bot Jul 4, 2022
d049541
refine
Flowingsun007 Jul 4, 2022
e71876e
Merge branch 'rename_consistent_to_global' of github.com:Oneflow-Inc/…
Flowingsun007 Jul 4, 2022
3509ebf
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 4, 2022
71a5fd6
fix
Flowingsun007 Jul 4, 2022
1ef10e4
merge master
Flowingsun007 Jul 4, 2022
ab2d761
fix
Flowingsun007 Jul 5, 2022
7bf01cc
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 5, 2022
13e8717
revert changes
Flowingsun007 Jul 5, 2022
0e70ca1
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 5, 2022
234a0dc
auto format by CI
oneflow-ci-bot Jul 5, 2022
453c62d
revert changes
Flowingsun007 Jul 5, 2022
7af9f03
Merge branch 'rename_consistent_to_global' of github.com:Oneflow-Inc/…
Flowingsun007 Jul 5, 2022
1def9c8
revert changes
Flowingsun007 Jul 5, 2022
fbe1a19
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 6, 2022
4269ecf
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 6, 2022
4e147a4
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 7, 2022
4c26020
rename
Flowingsun007 Jul 7, 2022
d859a2b
rename
Flowingsun007 Jul 7, 2022
4e7ee2e
resolve conflicts
Flowingsun007 Jul 8, 2022
9d0cf11
fix
Flowingsun007 Jul 8, 2022
4a248b5
fix clang check
Flowingsun007 Jul 8, 2022
69d67f0
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 8, 2022
0d81701
Merge branch 'master' into rename_consistent_to_global
Flowingsun007 Jul 8, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion docs/source/tensor.rst
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,6 @@ OneFlow Tensor Class
global_to_global,
to_global,
to_local,
to_consistent,
Flowingsun007 marked this conversation as resolved.
Show resolved Hide resolved
tolist,
topk,
transpose,
Expand Down
4 changes: 2 additions & 2 deletions oneflow/api/cpp/env.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ limitations under the License.
#include "oneflow/api/cpp/env.h"
#include "oneflow/api/cpp/env_impl.h"
#include "oneflow/core/framework/shut_down_util.h"
#include "oneflow/core/thread/thread_consistent_id.h"
#include "oneflow/core/thread/thread_global_id.h"

namespace oneflow_api {
void initialize() {
Expand All @@ -29,7 +29,7 @@ void initialize() {
void release() {
if (of::Singleton<OneFlowEnv>::Get() != nullptr) { of::Singleton<OneFlowEnv>::Delete(); }
of::SetShuttingDown();
of::ResetThisThreadUniqueConsistentId().GetOrThrow();
of::ResetThisThreadUniqueGlobalId().GetOrThrow();
}

} // namespace oneflow_api
2 changes: 1 addition & 1 deletion oneflow/api/cpp/framework/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ of::Maybe<void> Graph::GraphImpl::AddOp(of::OperatorConf op_conf) {
0, batch_size_);
}
auto* ctx = JUST(of::GetCurInferCtx());
JUST(ctx->AddAndInferConsistentOp(op_conf));
JUST(ctx->AddAndInferGlobalOp(op_conf));
return of::Maybe<void>::Ok();
}

Expand Down
2 changes: 1 addition & 1 deletion oneflow/api/python/framework/session_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ ONEFLOW_API_PYBIND11_MODULE("", m) {
.def("push_mirrored_strategy_enabled", &Session::PushMirroredStrategyEnabled)
.def("pop_mirrored_strategy_enabled", &Session::PopMirroredStrategyEnabled)
.def("is_mirrored_strategy_enabled", &Session::IsMirroredStrategyEnabled)
.def("is_consistent_strategy_enabled", &Session::IsConsistentStrategyEnabled)
.def("is_global_strategy_enabled", &Session::IsGlobalStrategyEnabled)
.def("is_mirrored_strategy_enabled_stack_size",
[](const Session* sess) { return sess->is_mirrored_strategy_enabled_stack()->size(); });

Expand Down
2 changes: 1 addition & 1 deletion oneflow/api/python/framework/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ static PyObject* PyTensorObject_is_eager(PyObject* self, void* unused) {
}

static PyObject* PyTensorObject_is_global(PyObject* self, void* unused) {
return functional::CastToPyObject(PyTensor_Unpack(self)->is_consistent());
return functional::CastToPyObject(PyTensor_Unpack(self)->is_global());
}

static PyObject* PyTensorObject_is_local(PyObject* self, void* unused) {
Expand Down
21 changes: 10 additions & 11 deletions oneflow/api/python/framework/tensor_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -662,16 +662,15 @@ static PyObject* PyTensorObject_local_to_global(PyObject* self, PyObject* args,
<< functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(sbp_obj)));
sbp = functional::PyUnpackSbpParallelSequence(sbp_obj);
}
return PyTensor_New(ASSERT_PTR(functional::ToConsistent(
return PyTensor_New(ASSERT_PTR(functional::ToGlobal(
tensor, functional::PyUnpackParallelDesc(placement_obj), sbp, {}, check_meta)));
END_HANDLE_ERRORS
}

static PyObject* PyTensorObject_global_to_global(PyObject* self, PyObject* args, PyObject* kwargs) {
HANDLE_ERRORS
auto tensor = PyTensor_Unpack(self);
CHECK_OR_THROW(tensor->is_consistent())
<< Error::RuntimeError() << "input must be a global tensor";
CHECK_OR_THROW(tensor->is_global()) << Error::RuntimeError() << "input must be a global tensor";
PyObject* placement_obj = Py_None;
PyObject* sbp_obj = Py_None;
PyObject* grad_sbp_obj = Py_None;
Expand Down Expand Up @@ -721,15 +720,15 @@ static PyObject* PyTensorObject_global_to_global(PyObject* self, PyObject* args,
grad_sbp = functional::PyUnpackSbpParallelSequence(grad_sbp_obj);
}
return PyTensor_New(
ASSERT_PTR(functional::ToConsistent(tensor, placement, sbp, grad_sbp, check_meta)));
ASSERT_PTR(functional::ToGlobal(tensor, placement, sbp, grad_sbp, check_meta)));
END_HANDLE_ERRORS
}

static PyObject* PyTensorObject_to_global(PyObject* self, PyObject* args, PyObject* kwargs) {
HANDLE_ERRORS
const auto& tensor = PyTensor_Unpack(self);
PyObject* result = NULL;
if (tensor->is_consistent())
if (tensor->is_global())
result = PyTensorObject_global_to_global(self, args, kwargs);
else {
result = PyTensorObject_local_to_global(self, args, kwargs);
Expand All @@ -743,9 +742,9 @@ static PyObject* PyTensorObject_to_global(PyObject* self, PyObject* args, PyObje
static PyObject* PyTensorObject_to_local(PyObject* self, PyObject* unused) {
HANDLE_ERRORS
auto tensor = PyTensor_Unpack(self);
CHECK_OR_THROW(tensor->is_consistent())
CHECK_OR_THROW(tensor->is_global())
<< Error::RuntimeError() << "Expected global tensor for to_local but got local tensor!";
return PyTensor_New(ASSERT_PTR(functional::ConsistentToLocal(tensor)));
return PyTensor_New(ASSERT_PTR(functional::GlobalToLocal(tensor)));
END_HANDLE_ERRORS
}

Expand All @@ -760,21 +759,21 @@ int PyTensorObject_setitem(PyObject* self, PyObject* item, PyObject* value) {
<< Error::TypeError() << "tensor_setitem(): argument 'value' must be tensor or scalar, not "
<< functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(value)));

if (tensor->is_consistent()) {
if (tensor->is_global()) {
Symbol<ParallelDesc> placement = ASSERT(tensor->parallel_desc());
auto ndsbp = ASSERT(tensor->nd_sbp());
std::vector<Symbol<SbpParallel>> sbp(ndsbp->sbp_parallel_size(),
ASSERT(MakeBroadcastSbpParallel()));
if (functional::PyScalarCheck(value)) {
Scalar value_scalar = functional::PyUnpackScalar(value);
value_tensor = ASSERT_PTR(
functional::ConsistentConstant({1}, value_scalar, tensor->dtype(), placement, sbp));
functional::GlobalConstant({1}, value_scalar, tensor->dtype(), placement, sbp));
} else {
value_tensor = PyTensor_Unpack(value);
CHECK_OR_THROW(value_tensor->is_consistent())
CHECK_OR_THROW(value_tensor->is_global())
<< Error::RuntimeError()
<< "tensor_setitem(): value must be a global tensor when self is global";
value_tensor = ASSERT_PTR(functional::ToConsistent(value_tensor, placement, sbp, {}, true));
value_tensor = ASSERT_PTR(functional::ToGlobal(value_tensor, placement, sbp, {}, true));
}
} else {
if (functional::PyScalarCheck(value)) {
Expand Down
32 changes: 16 additions & 16 deletions oneflow/api/python/functional/tensor_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class TensorWithDataFunctor {
}
};

class ConsistentTensorWithDataFunctor {
class GlobalTensorWithDataFunctor {
public:
Maybe<Tensor> operator()(PyObject* data, const Optional<Symbol<DType>>& dtype,
const Symbol<ParallelDesc>& placement,
Expand All @@ -93,8 +93,8 @@ class ConsistentTensorWithDataFunctor {
const auto& other = PyTensor_Unpack(data);
return MakeTensorFromOtherTensor(other, dtype, placement, sbp_tuple, requires_grad);
}
// Make consistent tensor from python sequence or numpy array.
return MakeConsistentTensorFromData(data, dtype, placement, sbp_tuple, requires_grad);
// Make global tensor from python sequence or numpy array.
return MakeGlobalTensorFromData(data, dtype, placement, sbp_tuple, requires_grad);
}
};

Expand All @@ -106,13 +106,13 @@ class TensorEmptyCtorFunctor {
}
};

class ConsistentTensorEmptyCtorFunctor {
class GlobalTensorEmptyCtorFunctor {
public:
Maybe<Tensor> operator()(const Symbol<ParallelDesc>& placement,
const std::vector<Symbol<SbpParallel>>& sbp_tuple) const {
Shape shape(DimVector{0});
JUST(CheckDeviceIdsIsValid(placement));
return ConsistentTensorWithShapeCtor(shape, placement, sbp_tuple);
return GlobalTensorWithShapeCtor(shape, placement, sbp_tuple);
}
};

Expand Down Expand Up @@ -155,7 +155,7 @@ class TensorWithDataCtorFunctor {
}
};

class ConsistentTensorWithDataCtorFunctor {
class GlobalTensorWithDataCtorFunctor {
public:
Maybe<Tensor> operator()(PyObject* data, const Symbol<ParallelDesc>& placement,
const std::vector<Symbol<SbpParallel>>& sbp_tuple) const {
Expand All @@ -164,10 +164,10 @@ class ConsistentTensorWithDataCtorFunctor {
if (PyLong_Check(data)) {
int64_t size = PyLong_AsLongLong(data);
Shape shape(DimVector{size});
return ConsistentTensorWithShapeCtor(shape, placement, sbp_tuple);
return GlobalTensorWithShapeCtor(shape, placement, sbp_tuple);
}
if (TensorSize_Check(data)) {
return ConsistentTensorWithShapeCtor(TensorSize_AsShape(data), placement, sbp_tuple);
return GlobalTensorWithShapeCtor(TensorSize_AsShape(data), placement, sbp_tuple);
}

// NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now.
Expand All @@ -179,8 +179,8 @@ class ConsistentTensorWithDataCtorFunctor {
return MakeTensorFromOtherTensor(other, dtype, placement, sbp_tuple,
/*requires_grad=*/false);
}
// Make consistent tensor from python sequence or numpy array.
return MakeConsistentTensorFromData(data, dtype, placement, sbp_tuple, /*requires_grad=*/false);
// Make global tensor from python sequence or numpy array.
return MakeGlobalTensorFromData(data, dtype, placement, sbp_tuple, /*requires_grad=*/false);
}
};

Expand All @@ -199,14 +199,14 @@ class TensorWithShapeCtorFunctor {
}
};

class ConsistentTensorWithShapeCtorFunctor {
class GlobalTensorWithShapeCtorFunctor {
public:
Maybe<Tensor> operator()(const Shape& shape, const Symbol<ParallelDesc>& placement,
const std::vector<Symbol<SbpParallel>>& sbp_tuple) const {
// NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now.
LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false);
JUST(CheckDeviceIdsIsValid(placement));
return functional::ConsistentEmpty(shape, DType::Float(), placement, sbp_tuple);
return functional::GlobalEmpty(shape, DType::Float(), placement, sbp_tuple);
}
};

Expand Down Expand Up @@ -305,14 +305,14 @@ class LocalTensorSharedNumpyDataFunctor {

ONEFLOW_FUNCTION_LIBRARY(m) {
m.add_functor<impl::TensorWithDataFunctor>("TensorWithData");
m.add_functor<impl::ConsistentTensorWithDataFunctor>("ConsistentTensorWithData");
m.add_functor<impl::GlobalTensorWithDataFunctor>("GlobalTensorWithData");
m.add_functor<impl::TensorEmptyCtorFunctor>("TensorEmptyCtor");
m.add_functor<impl::ConsistentTensorEmptyCtorFunctor>("ConsistentTensorEmptyCtor");
m.add_functor<impl::GlobalTensorEmptyCtorFunctor>("GlobalTensorEmptyCtor");
m.add_functor<impl::TensorWithOtherCtorFunctor>("TensorWithOtherCtor");
m.add_functor<impl::TensorWithDataCtorFunctor>("TensorWithDataCtor");
m.add_functor<impl::ConsistentTensorWithDataCtorFunctor>("ConsistentTensorWithDataCtor");
m.add_functor<impl::GlobalTensorWithDataCtorFunctor>("GlobalTensorWithDataCtor");
m.add_functor<impl::TensorWithShapeCtorFunctor>("TensorWithShapeCtor");
m.add_functor<impl::ConsistentTensorWithShapeCtorFunctor>("ConsistentTensorWithShapeCtor");
m.add_functor<impl::GlobalTensorWithShapeCtorFunctor>("GlobalTensorWithShapeCtor");
m.add_functor<impl::AssignLocalTensorFunctor>("AssignLocalTensor");
m.add_functor<impl::LocalTensorSharedNumpyDataFunctor>("LocalTensorSharedNumpyData");
}
Expand Down
8 changes: 4 additions & 4 deletions oneflow/api/python/functional/tensor_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,20 +17,20 @@
"Tensor (PyObject* data, *, DataType dtype=None, Device device=None,
Bool requires_grad=False, Bool pin_memory=False) => TensorWithData",
"Tensor (PyObject* data, *, DataType dtype=None, Placement placement,
SbpList sbp, Bool requires_grad=False) => ConsistentTensorWithData",
SbpList sbp, Bool requires_grad=False) => GlobalTensorWithData",
]
bind_python: True

- name: "_legacy_tensor_ctor"
signature:
[
"Tensor (*, Device device=None) => TensorEmptyCtor",
"Tensor (*, Placement placement, SbpList sbp) => ConsistentTensorEmptyCtor",
"Tensor (*, Placement placement, SbpList sbp) => GlobalTensorEmptyCtor",
"Tensor (Tensor other) => TensorWithOtherCtor",
"Tensor (PyObject* data, *, Device device=None) => TensorWithDataCtor",
"Tensor (PyObject* data, *, Placement placement, SbpList sbp) => ConsistentTensorWithDataCtor",
"Tensor (PyObject* data, *, Placement placement, SbpList sbp) => GlobalTensorWithDataCtor",
"Tensor (Shape size, *, Device device=None) => TensorWithShapeCtor",
"Tensor (Shape size, *, Placement placement, SbpList sbp) => ConsistentTensorWithShapeCtor",
"Tensor (Shape size, *, Placement placement, SbpList sbp) => GlobalTensorWithShapeCtor",
]
bind_python: True

Expand Down
3 changes: 1 addition & 2 deletions oneflow/api/python/job_build/job_build_and_infer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ ONEFLOW_API_PYBIND11_MODULE("", m) {
m.def("CurJobBuildAndInferCtx_AddAndInferMirroredOp",
&CurJobBuildAndInferCtx_AddAndInferMirroredOp, py::call_guard<py::gil_scoped_release>());

m.def("CurJobBuildAndInferCtx_AddAndInferConsistentOp",
&CurJobBuildAndInferCtx_AddAndInferConsistentOp);
m.def("CurJobBuildAndInferCtx_AddAndInferGlobalOp", &CurJobBuildAndInferCtx_AddAndInferGlobalOp);
m.def("CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair",
&CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair);

Expand Down
4 changes: 2 additions & 2 deletions oneflow/api/python/job_build/job_build_and_infer.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,12 @@ inline Maybe<std::string> CurJobBuildAndInferCtx_AddAndInferMirroredOp(
return PbMessage2TxtString(*op_attribute);
}

inline Maybe<std::string> CurJobBuildAndInferCtx_AddAndInferConsistentOp(
inline Maybe<std::string> CurJobBuildAndInferCtx_AddAndInferGlobalOp(
const std::string& op_conf_str) {
OperatorConf op_conf;
CHECK_OR_RETURN(TxtString2PbMessage(op_conf_str, &op_conf)) << "operator conf parse failed";
auto* ctx = JUST(GetCurInferCtx());
const auto& op_attribute = JUST(ctx->AddAndInferConsistentOp(op_conf));
const auto& op_attribute = JUST(ctx->AddAndInferGlobalOp(op_conf));
return PbMessage2TxtString(*op_attribute);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License.
#include <pybind11/stl.h>
#include <pybind11/functional.h>
#include "oneflow/api/python/of_api_registry.h"
#include "oneflow/core/thread/thread_consistent_id.h"
#include "oneflow/core/thread/thread_global_id.h"
#include "oneflow/core/framework/rank_group_rpc_util.h"
#include "oneflow/core/job/rank_group.h"
#include "oneflow/core/job/rank_group_scope.h"
Expand All @@ -29,32 +29,30 @@ namespace oneflow {

namespace {

Maybe<void> InitConsistentTransportTokenScope(const std::string& thread_tag,
int64_t thread_consistent_id,
Symbol<RankGroup> rank_group) {
JUST(InitThisThreadUniqueConsistentId(thread_consistent_id, thread_tag));
Maybe<void> InitGlobalTransportTokenScope(const std::string& thread_tag, int64_t thread_global_id,
Symbol<RankGroup> rank_group) {
JUST(InitThisThreadUniqueGlobalId(thread_global_id, thread_tag));
static thread_local const auto& init_rank_group_scope =
JUST(RankGroupScope::MakeInitialRankGroupScope(rank_group));
// no unused warning for `init_rank_group_scope`.
(void)(init_rank_group_scope);
return Maybe<void>::Ok();
}

Maybe<void> InitConsistentTransportTokenScope(const std::string& thread_tag,
int64_t thread_consistent_id) {
Maybe<void> InitGlobalTransportTokenScope(const std::string& thread_tag, int64_t thread_global_id) {
const auto& rank_group = JUST(RankGroup::DefaultRankGroup());
JUST(InitConsistentTransportTokenScope(thread_tag, thread_consistent_id, rank_group));
JUST(InitGlobalTransportTokenScope(thread_tag, thread_global_id, rank_group));
return Maybe<void>::Ok();
}

Maybe<void> ApiInitDefaultConsistentTransportTokenScope() {
return InitConsistentTransportTokenScope("main", kThreadConsistentIdMain);
Maybe<void> ApiInitDefaultGlobalTransportTokenScope() {
return InitGlobalTransportTokenScope("main", kThreadGlobalIdMain);
}

} // namespace

ONEFLOW_API_PYBIND11_MODULE("", m) {
m.def("InitDefaultConsistentTransportTokenScope", &ApiInitDefaultConsistentTransportTokenScope);
m.def("InitDefaultGlobalTransportTokenScope", &ApiInitDefaultGlobalTransportTokenScope);
}

} // namespace oneflow
Loading