diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index aab299edd0a..142eb4f573c 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -125,26 +125,17 @@ static PyObject* PyTensorObject_subscript(PyObject* self, PyObject* item) { END_HANDLE_ERRORS } -static int PyTensorObject_ass_subscript(PyObject* self, PyObject* item, PyObject* value) { - HANDLE_ERRORS - const auto& p = PyTensor_Unpack(self); - const auto& v = PyTensor_Unpack(value); - functional::PythonArg arg(item); - ASSERT(functional::TensorSetItem(p, arg.As(), v)); - return 0; - END_HANDLE_ERRORS_RET(-1) -} - static PySequenceMethods PyTensorObject_as_sequence = { (lenfunc)PyTensorObject_length, NULL, /*sq_concat*/ NULL, /*sq_repeat*/ (ssizeargfunc)PyTensorObject_getitem, /*sq_item*/ }; +extern int PyTensorObject_setitem(PyObject*, PyObject*, PyObject*); static PyMappingMethods PyTensorObject_as_mapping = { (lenfunc)PyTensorObject_length, (binaryfunc)PyTensorObject_subscript, - (objobjargproc)PyTensorObject_ass_subscript, + (objobjargproc)PyTensorObject_setitem, }; static PyObject* PyTensorObject_storage_offset(PyObject* self, PyObject* unused) { diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 2dbfd4a3a02..f74050debf7 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -632,6 +632,168 @@ static PyObject* PyTensorObject_transpose(PyObject* self, PyObject* args, PyObje END_HANDLE_ERRORS } +static PyObject* PyTensorObject_local_to_global(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + CHECK_OR_THROW(tensor->is_local()) << Error::RuntimeError() << "input must be a local tensor"; + PyObject* placement_obj = Py_None; + PyObject* sbp_obj = Py_None; + bool check_meta = true; + static const char* keywords[4] = {"placement", "sbp", "check_meta", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO$O!:local_to_global", + const_cast(keywords), &placement_obj, &sbp_obj, + &PyBool_Type, &check_meta)) { + return NULL; + }; + + CHECK_OR_THROW(placement_obj != Py_None && sbp_obj != Py_None) << Error::InvalidValueError( + "Converting a local tensor to global tensor must have placement and sbp parameters."); + CHECK_OR_THROW(functional::PyParallelDescCheck(placement_obj)) + << Error::TypeError() << "Invalid parameter placement with type " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(placement_obj))); + + std::vector> sbp; + if (functional::PySbpParallelCheck(sbp_obj)) { + sbp.emplace_back(functional::PyUnpackSbpParallel(sbp_obj)); + } else { + CHECK_OR_THROW(functional::PySbpParallelSequenceCheck(sbp_obj)) + << Error::TypeError() << "Invalid parameter sbp with type " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(sbp_obj))); + sbp = functional::PyUnpackSbpParallelSequence(sbp_obj); + } + return PyTensor_New(ASSERT_PTR(functional::ToConsistent( + tensor, functional::PyUnpackParallelDesc(placement_obj), sbp, {}, check_meta))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_global_to_global(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + CHECK_OR_THROW(tensor->is_consistent()) + << Error::RuntimeError() << "input must be a global tensor"; + PyObject* placement_obj = Py_None; + PyObject* sbp_obj = Py_None; + PyObject* grad_sbp_obj = Py_None; + Symbol placement; + std::vector> sbp; + std::vector> grad_sbp; + bool check_meta = false; + static const char* keywords[5] = {"placement", "sbp", "grad_sbp", "check_meta", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO$OO!:global_to_global", + const_cast(keywords), &placement_obj, &sbp_obj, + &grad_sbp_obj, &PyBool_Type, &check_meta)) { + return NULL; + }; + + // sbp + CHECK_OR_THROW(sbp_obj == Py_None || functional::PySbpParallelCheck(sbp_obj) + || functional::PySbpParallelSequenceCheck(sbp_obj)) + << Error::TypeError() + << "sbp parameter must be type of oneflow.sbp.sbp or list/tuple of oneflow.sbp.sbp"; + if (functional::PySbpParallelCheck(sbp_obj)) { + sbp.emplace_back(functional::PyUnpackSbpParallel(sbp_obj)); + } else if (functional::PySbpParallelSequenceCheck(sbp_obj)) { + sbp = functional::PyUnpackSbpParallelSequence(sbp_obj); + } else { + for (int32_t i = 0; i < ASSERT(tensor->nd_sbp())->sbp_parallel_size(); i++) + sbp.emplace_back(ASSERT(tensor->nd_sbp())->sbp_parallel(i)); + } + + // placement + CHECK_OR_THROW(placement_obj == Py_None || functional::PyParallelDescCheck(placement_obj)) + << Error::TypeError() << "Invalid parameter placement with type " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(placement_obj))); + if (placement_obj == Py_None) { + placement = ASSERT(tensor->parallel_desc()); + } else { + placement = functional::PyUnpackParallelDesc(placement_obj); + } + + // grad_sbp + CHECK_OR_THROW(grad_sbp_obj == Py_None || functional::PySbpParallelCheck(grad_sbp_obj) + || functional::PySbpParallelSequenceCheck(grad_sbp_obj)) + << Error::TypeError() + << "grad_sbp parameter must be type of oneflow.sbp.sbp or list/tuple of oneflow.sbp.sbp"; + if (functional::PySbpParallelCheck(grad_sbp_obj)) { + grad_sbp.emplace_back(functional::PyUnpackSbpParallel(grad_sbp_obj)); + } else if (functional::PySbpParallelSequenceCheck(grad_sbp_obj)) { + grad_sbp = functional::PyUnpackSbpParallelSequence(grad_sbp_obj); + } + return PyTensor_New( + ASSERT_PTR(functional::ToConsistent(tensor, placement, sbp, grad_sbp, check_meta))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_to_global(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + const auto& tensor = PyTensor_Unpack(self); + PyObject* result = NULL; + if (tensor->is_consistent()) + result = PyTensorObject_global_to_global(self, args, kwargs); + else { + result = PyTensorObject_local_to_global(self, args, kwargs); + } + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_to_local(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + CHECK_OR_THROW(tensor->is_consistent()) + << Error::RuntimeError() << "Expected global tensor for to_local but got local tensor!"; + return PyTensor_New(ASSERT_PTR(functional::ConsistentToLocal(tensor))); + END_HANDLE_ERRORS +} + +int PyTensorObject_setitem(PyObject* self, PyObject* item, PyObject* value) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + std::shared_ptr value_tensor; + CHECK_OR_THROW(functional::PyTensorIndexCheck(item)) + << Error::TypeError() << "tensor_setitem(): argument 'index' must be index, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(item))); + CHECK_OR_THROW(functional::PyScalarCheck(value) || PyTensor_Check(value)) + << Error::TypeError() << "tensor_setitem(): argument 'value' must be tensor or scalar, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(value))); + + if (tensor->is_consistent()) { + Symbol placement = ASSERT(tensor->parallel_desc()); + auto ndsbp = ASSERT(tensor->nd_sbp()); + std::vector> sbp(ndsbp->sbp_parallel_size(), + ASSERT(MakeBroadcastSbpParallel())); + if (functional::PyScalarCheck(value)) { + Scalar value_scalar = functional::PyUnpackScalar(value); + value_tensor = ASSERT_PTR( + functional::ConsistentConstant({1}, value_scalar, tensor->dtype(), placement, sbp)); + } else { + value_tensor = PyTensor_Unpack(value); + CHECK_OR_THROW(value_tensor->is_consistent()) + << Error::RuntimeError() + << "tensor_setitem(): value must be a global tensor when self is global"; + value_tensor = ASSERT_PTR(functional::ToConsistent(value_tensor, placement, sbp, {}, true)); + } + } else { + if (functional::PyScalarCheck(value)) { + Scalar value_scalar = functional::PyUnpackScalar(value); + value_tensor = ASSERT_PTR( + functional::Constant({1}, value_scalar, tensor->dtype(), ASSERT(tensor->device()))); + } else { + value_tensor = PyTensor_Unpack(value); + CHECK_OR_THROW(value_tensor->is_local()) + << Error::RuntimeError() + << "tensor_setitem(): value must be a local tensor when self is local"; + Optional> device = ASSERT(tensor->device()); + value_tensor = ASSERT_PTR(functional::To(value_tensor, device, value_tensor->dtype(), false)); + } + } + ASSERT(functional::TensorSetItem(tensor, functional::PyUnpackTensorIndex(item), value_tensor)); + return 0; + END_HANDLE_ERRORS_RET(-1) +} + PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, {"size", (PyCFunction)PyTensorObject_size, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -655,6 +817,12 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"half", PyTensorObject_half, METH_NOARGS, NULL}, {"float", PyTensorObject_float, METH_NOARGS, NULL}, {"double", PyTensorObject_double, METH_NOARGS, NULL}, + {"local_to_global", (PyCFunction)PyTensorObject_local_to_global, METH_VARARGS | METH_KEYWORDS, + NULL}, + {"global_to_global", (PyCFunction)PyTensorObject_global_to_global, METH_VARARGS | METH_KEYWORDS, + NULL}, + {"to_local", PyTensorObject_to_local, METH_NOARGS, NULL}, + {"to_global", (PyCFunction)PyTensorObject_to_global, METH_VARARGS | METH_KEYWORDS, NULL}, {"cpu", PyTensorObject_cpu, METH_NOARGS, NULL}, {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, {"var", (PyCFunction)PyTensorObject_var, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 8c97c973596..b0194bb88b0 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -71,30 +71,6 @@ def _backward(self, gradient=None, retain_graph=False, create_graph=False): flow._oneflow_internal.nn.graph.AddTensorAsGraphLoss(self) -def _setitem(self, key, value): - if self.is_global: - if isinstance(value, (int, float)): - value = flow._C.global_constant( - [1], - value, - dtype=self.dtype, - placement=self.placement, - sbp=[flow.sbp.broadcast,] * len(self.sbp), - ) - else: - value = value.to_global( - self.placement, sbp=[flow.sbp.broadcast,] * len(self.sbp) - ) - else: - if isinstance(value, (int, float)): - value = flow._C.constant([1], value, dtype=self.dtype, device=self.device) - else: - value = value.to(device=self.device) - - flow._C.tensor_setitem(self, key, value) - return self - - def _str(self): return self.__repr__() @@ -641,10 +617,6 @@ def _triu(self, diagonal=0): return flow.triu(self, diagonal=diagonal) -def _to_local(self): - return flow.to_local(self) - - def _relu(self): return flow._C.relu(self) @@ -920,24 +892,6 @@ def _to(self, *args, **kwargs): return flow._C.to(self, *new_args, **kwargs) -def _local_to_global(self, placement=None, sbp=None, *, check_meta=True): - return flow.local_to_global(self, placement, sbp, check_meta) - - -def _global_to_global( - self, placement=None, sbp=None, *, grad_sbp=None, check_meta=False -): - return flow.global_to_global(self, placement, sbp, grad_sbp, check_meta) - - -def _to_global(self, placement=None, sbp=None, **kwargs): - return flow.to_global(self, placement, sbp, **kwargs) - - -def _to_local(self): - return flow.to_local(self) - - def _tolist(self): if self.numel() == 1 and self.ndim == 0: return self.item() @@ -1144,7 +1098,6 @@ def RegisterMethods(): Tensor.sub = _sub Tensor.sub_ = _sub_inplace Tensor.backward = _backward - Tensor.__setitem__ = _setitem Tensor.__str__ = _str Tensor.__repr__ = _repr Tensor.__bool__ = is_nonzero @@ -1176,9 +1129,6 @@ def RegisterMethods(): Tensor.new_zeros = _new_zeros Tensor.where = _where Tensor.norm = _norm - Tensor.local_to_global = _local_to_global - Tensor.global_to_global = _global_to_global - Tensor.to_global = _to_global Tensor.repeat = _repeat Tensor.repeat_interleave = _repeat_interleave Tensor.tile = _tile @@ -1189,7 +1139,6 @@ def RegisterMethods(): Tensor.masked_select = _masked_select Tensor.eq = _eq Tensor.item = _item - Tensor.to_local = _to_local Tensor.sort = _sort Tensor.type_as = _type_as Tensor.tolist = _tolist diff --git a/python/oneflow/test/exceptions/test_local_global_convert_error.py b/python/oneflow/test/exceptions/test_local_global_convert_error.py index 8ebb5c63e6e..eac0acba4c7 100644 --- a/python/oneflow/test/exceptions/test_local_global_convert_error.py +++ b/python/oneflow/test/exceptions/test_local_global_convert_error.py @@ -64,7 +64,7 @@ def test_global_to_global_with_invalid_split_axis(test_case): @flow.unittest.skip_unless_1n1d() def test_call_to_local_for_local_tensor(test_case): x = flow.tensor([1, 2, 3, 4]) - with test_case.assertRaises(AssertionError) as ctx: + with test_case.assertRaises(RuntimeError) as ctx: y = x.to_local() test_case.assertTrue( "Expected global tensor for to_local but got local tensor!"